gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from zeroos.core0.client import Client
from .Disk import Disks, DiskType
from .Container import Containers
from .StoragePool import StoragePools
from .Network import Network
from .healthcheck import HealthCheck
from collections import namedtuple
from datetime import datetime
from io import BytesIO
import netaddr
import time
import redis
Mount = namedtuple('Mount', ['device', 'mountpoint', 'fstype', 'options'])
class Node:
"""Represent a G8OS Server"""
def __init__(self, addr, port=6379, password=None, timeout=120):
# g8os client to talk to the node
self._storageAddr = None
self.addr = addr
self.port = port
self.password = password
self.timeout = timeout
self.disks = Disks(self)
self.storagepools = StoragePools(self)
self.containers = Containers(self)
self.network = Network(self)
self.healthcheck = HealthCheck(self)
self._client = None
@classmethod
def from_ays(cls, service, password=None, timeout=120):
return cls(
addr=service.model.data.redisAddr,
port=service.model.data.redisPort,
password=password,
timeout=timeout
)
@property
def client(self):
if not self._client:
self._client = Client(host=self.addr, port=self.port, password=self.password, timeout=self.timeout)
return self._client
@property
def name(self):
def get_nic_hwaddr(nics, name):
for nic in nics:
if nic['name'] == name:
return nic['hardwareaddr']
defaultgwdev = self.client.bash("ip route | grep default | awk '{print $5}'", max_time=60).get().stdout.strip()
nics = self.client.info.nic()
macgwdev = None
if defaultgwdev:
macgwdev = get_nic_hwaddr(nics, defaultgwdev)
if not macgwdev:
raise AttributeError("name not find for node {}".format(self))
return macgwdev.replace(":", '')
@property
def storageAddr(self):
if not self._storageAddr:
nic_data = self.client.info.nic()
for nic in nic_data:
if nic['name'] == 'backplane':
for ip in nic['addrs']:
network = netaddr.IPNetwork(ip['addr'])
if network.version == 4:
self._storageAddr = network.ip.format()
return self._storageAddr
self._storageAddr = self.addr
return self._storageAddr
def get_nic_by_ip(self, addr):
try:
res = next(nic for nic in self.client.info.nic() if any(addr == a['addr'].split('/')[0] for a in nic['addrs']))
return res
except StopIteration:
return None
def _eligible_fscache_disk(self, disks):
"""
return the first disk that is eligible to be used as filesystem cache
First try to find a ssd disk, otherwise return a HDD
"""
priorities = [DiskType.ssd, DiskType.hdd, DiskType.nvme]
eligible = {t: [] for t in priorities}
# Pick up the first ssd
usedisks = []
for pool in (self.client.btrfs.list() or []):
for device in pool['devices']:
usedisks.append(device['path'])
for disk in disks[::-1]:
if disk.devicename in usedisks or len(disk.partitions) > 0:
continue
if disk.type in priorities:
eligible[disk.type].append(disk)
# pick up the first disk according to priorities
for t in priorities:
if eligible[t]:
return eligible[t][0]
else:
raise RuntimeError("cannot find eligible disks for the fs cache")
def find_disks(self, disk_type):
"""
return a list of disk that are not used by storage pool
or has a different type as the one required for this cluster
"""
available_disks = {}
for disk in self.disks.list():
# skip disks of wrong type
if disk.type.name != disk_type:
continue
# skip devices which have filesystems on the device
if len(disk.filesystems) > 0:
continue
# include devices which have partitions
if len(disk.partitions) == 0:
available_disks.setdefault(self.name, []).append(disk)
return available_disks
def _mount_fscache(self, storagepool):
"""
mount the fscache storage pool and copy the content of the in memmory fs inside
"""
mountedpaths = [mount.mountpoint for mount in self.list_mounts()]
containerpath = '/var/cache/containers'
if containerpath not in mountedpaths:
if storagepool.exists('containercache'):
storagepool.get('containercache').delete()
fs = storagepool.create('containercache')
self.client.disk.mount(storagepool.devicename, containerpath, ['subvol={}'.format(fs.subvolume)])
logpath = '/var/log'
if logpath not in mountedpaths:
# logs is empty filesystem which we create a snapshot on to store logs of current boot
snapname = '{:%Y-%m-%d-%H-%M}'.format(datetime.now())
fs = storagepool.get('logs')
snapshot = fs.create(snapname)
self.client.bash('mkdir /tmp/log && mv /var/log/* /tmp/log/')
self.client.disk.mount(storagepool.devicename, logpath, ['subvol={}'.format(snapshot.subvolume)])
self.client.bash('mv /tmp/log/* /var/log/').get()
self.client.logger.reopen()
# startup syslogd and klogd
self.client.system('syslogd -n -O /var/log/messages')
self.client.system('klogd -n')
def freeports(self, baseport=2000, nrports=3):
ports = self.client.info.port()
usedports = set()
for portInfo in ports:
if portInfo['network'] != "tcp":
continue
usedports.add(portInfo['port'])
freeports = []
while True:
if baseport not in usedports:
freeports.append(baseport)
if len(freeports) >= nrports:
return freeports
baseport += 1
def find_persistance(self, name='fscache'):
fscache_sp = None
for sp in self.storagepools.list():
if sp.name == name:
fscache_sp = sp
break
return fscache_sp
def is_configured(self, name=None):
if not name:
name = self.name
poolname = '{}_fscache'.format(name)
fscache_sp = self.find_persistance(poolname)
if fscache_sp is None:
return False
return bool(fscache_sp.mountpoint)
def ensure_persistance(self, name='fscache'):
"""
look for a disk not used,
create a partition and mount it to be used as cache for the g8ufs
set the label `fs_cache` to the partition
"""
disks = self.disks.list()
if len(disks) <= 0:
# if no disks, we can't do anything
return
# check if there is already a storage pool with the fs_cache label
fscache_sp = self.find_persistance(name)
# create the storage pool if we don't have one yet
if fscache_sp is None:
disk = self._eligible_fscache_disk(disks)
fscache_sp = self.storagepools.create(name, devices=[disk.devicename], metadata_profile='single', data_profile='single', overwrite=True)
fscache_sp.mount()
try:
fscache_sp.get('logs')
except ValueError:
fscache_sp.create('logs')
# mount the storage pool
self._mount_fscache(fscache_sp)
return fscache_sp
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode('utf8')
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def wipedisks(self):
print('Wiping node {hostname}'.format(**self.client.info.os()))
mounteddevices = {mount['device']: mount for mount in self.client.info.disk()}
def getmountpoint(device):
for mounteddevice, mount in mounteddevices.items():
if mounteddevice.startswith(device):
return mount
jobs = []
for disk in self.client.disk.list()['blockdevices']:
devicename = '/dev/{}'.format(disk['kname'])
if disk['tran'] == 'usb':
print(' * Not wiping usb {kname} {model}'.format(**disk))
continue
mount = getmountpoint(devicename)
if not mount:
print(' * Wiping disk {kname}'.format(**disk))
jobs.append(self.client.system('dd if=/dev/zero of={} bs=1M count=50'.format(devicename)))
else:
print(' * Not wiping {device} mounted at {mountpoint}'.format(device=devicename, mountpoint=mount['mountpoint']))
# wait for wiping to complete
for job in jobs:
job.get()
def list_mounts(self):
allmounts = []
for mount in self.client.info.disk():
allmounts.append(Mount(mount['device'],
mount['mountpoint'],
mount['fstype'],
mount['opts']))
return allmounts
def is_running(self):
state = False
start = time.time()
err = None
while time.time() < start + 30:
try:
self.client.testConnectionAttempts = 0
state = self.client.ping()
break
except (RuntimeError, ConnectionError, redis.TimeoutError, TimeoutError) as error:
err = error
time.sleep(1)
else:
print("Could not ping %s within 30 seconds due to %s" % (self.addr, err))
return state
def __str__(self):
return "Node <{host}:{port}>".format(
host=self.addr,
port=self.port,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
a = "{}:{}".format(self.addr, self.port)
b = "{}:{}".format(other.addr, other.port)
return a == b
def __hash__(self):
return hash((self.addr, self.port))
|
|
'''Author: Tyler Reddy
The purpose of this Python module is to provide utility code for handling spherical Voronoi Diagrams.'''
import scipy
try:
if int(scipy.__version__.split('.')[1]) < 13:
raise ImportError('Module requires version of scipy module >= 0.13.0')
except AttributeError: #handle this for sphinx build process on readthedocs because of module mocking
pass
import circumcircle
import scipy.spatial
import numpy
import numpy.linalg
import pandas
import math
import numpy.random
class IntersectionError(Exception):
pass
def filter_tetrahedron_to_triangle(current_tetrahedron_coord_array):
current_triangle_coord_array = [] #initialize as a list
for row in current_tetrahedron_coord_array: #ugly to use for loop for this, but ok for now!
if row[0] == 0 and row[1] == 0 and row[2] == 0: #filter out origin row
continue
else:
current_triangle_coord_array.append(row)
current_triangle_coord_array = numpy.array(current_triangle_coord_array)
return current_triangle_coord_array
def test_polygon_for_self_intersection(array_ordered_Voronoi_polygon_vertices_2D):
'''Test an allegedly properly-ordered numpy array of Voronoi region vertices in 2D for self-intersection of edges based on algorithm described at http://algs4.cs.princeton.edu/91primitives/'''
total_vertices = array_ordered_Voronoi_polygon_vertices_2D.shape[0]
total_edges = total_vertices
def intersection_test(a,b,c,d):
#code in r & s equations provided on above website, which operate on the 2D coordinates of the edge vertices for edges a - b and c - d
#so: a, b, c, d are numpy arrays of vertex coordinates -- presumably with shape (2,)
intersection = False
denominator = (b[0] - a[0]) * (d[1] - c[1]) - (b[1] - a[1]) * (d[0] - c[0])
r = ( (a[1] - c[1]) * (d[0] - c[0]) - (a[0] - c[0]) * (d[1] - c[1]) ) / denominator
s = ( (a[1] - c[1]) * (b[0] - a[0]) - (a[0] - c[0]) * (b[1] - a[1]) ) / denominator
if (r >= 0 and r <= 1) and (s >= 0 and s <= 1): #conditions for intersection
intersection = True
if intersection:
raise IntersectionError("Voronoi polygon line intersection !")
#go through and test all possible non-consecutive edge combinations for intersection
list_vertex_indices_in_edges = [ [vertex_index, vertex_index + 1] for vertex_index in xrange(total_vertices)]
#for the edge starting from the last point in the Voronoi polygon the index of the final point should be switched to the starting index -- to close the polygon
filtered_list_vertex_indices_in_edges = []
for list_vertex_indices_in_edge in list_vertex_indices_in_edges:
if list_vertex_indices_in_edge[1] == total_vertices:
filtered_list_vertex_indices_in_edge = [list_vertex_indices_in_edge[0],0]
else:
filtered_list_vertex_indices_in_edge = list_vertex_indices_in_edge
filtered_list_vertex_indices_in_edges.append(filtered_list_vertex_indices_in_edge)
for edge_index, list_vertex_indices_in_edge in enumerate(filtered_list_vertex_indices_in_edges):
for edge_index_2, list_vertex_indices_in_edge_2 in enumerate(filtered_list_vertex_indices_in_edges):
if (list_vertex_indices_in_edge[0] not in list_vertex_indices_in_edge_2) and (list_vertex_indices_in_edge[1] not in list_vertex_indices_in_edge_2): #non-consecutive edges
a = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge[0]]
b = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge[1]]
c = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge_2[0]]
d = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge_2[1]]
intersection_test(a,b,c,d)
def calculate_Vincenty_distance_between_spherical_points(cartesian_array_1,cartesian_array_2,sphere_radius):
'''Apparently, the special case of the Vincenty formula (http://en.wikipedia.org/wiki/Great-circle_distance) may be the most accurate method for calculating great-circle distances.'''
spherical_array_1 = convert_cartesian_array_to_spherical_array(cartesian_array_1)
spherical_array_2 = convert_cartesian_array_to_spherical_array(cartesian_array_2)
lambda_1 = spherical_array_1[1]
lambda_2 = spherical_array_2[1]
phi_1 = spherical_array_1[2]
phi_2 = spherical_array_2[2]
delta_lambda = abs(lambda_2 - lambda_1)
delta_phi = abs(phi_2 - phi_1)
radian_angle = math.atan2( math.sqrt( (math.sin(phi_2)*math.sin(delta_lambda))**2 + (math.sin(phi_1)*math.cos(phi_2) - math.cos(phi_1)*math.sin(phi_2)*math.cos(delta_lambda) )**2 ), (math.cos(phi_1) * math.cos(phi_2) + math.sin(phi_1) * math.sin(phi_2) * math.cos(delta_lambda) ) )
spherical_distance = sphere_radius * radian_angle
return spherical_distance
def calculate_haversine_distance_between_spherical_points(cartesian_array_1,cartesian_array_2,sphere_radius):
'''Calculate the haversine-based distance between two points on the surface of a sphere. Should be more accurate than the arc cosine strategy. See, for example: http://en.wikipedia.org/wiki/Haversine_formula'''
spherical_array_1 = convert_cartesian_array_to_spherical_array(cartesian_array_1)
spherical_array_2 = convert_cartesian_array_to_spherical_array(cartesian_array_2)
lambda_1 = spherical_array_1[1]
lambda_2 = spherical_array_2[1]
phi_1 = spherical_array_1[2]
phi_2 = spherical_array_2[2]
#we rewrite the standard Haversine slightly as long/lat is not the same as spherical coordinates - phi differs by pi/4
spherical_distance = 2.0 * sphere_radius * math.asin(math.sqrt( ((1 - math.cos(phi_2-phi_1))/2.) + math.sin(phi_1) * math.sin(phi_2) * ( (1 - math.cos(lambda_2-lambda_1))/2.) ))
return spherical_distance
def generate_random_array_spherical_generators(num_generators,sphere_radius,prng_object):
'''Recoded using standard uniform selector over theta and acos phi, http://mathworld.wolfram.com/SpherePointPicking.html
Same as in iPython notebook version'''
u = prng_object.uniform(low=0,high=1,size=num_generators)
v = prng_object.uniform(low=0,high=1,size=num_generators)
theta_array = 2 * math.pi * u
phi_array = numpy.arccos((2*v - 1.0))
r_array = sphere_radius * numpy.ones((num_generators,))
spherical_polar_data = numpy.column_stack((r_array,theta_array,phi_array))
cartesian_random_points = convert_spherical_array_to_cartesian_array(spherical_polar_data)
#filter out any duplicate generators:
df_random_points = pandas.DataFrame(cartesian_random_points)
df_random_points_no_duplicates = df_random_points.drop_duplicates()
array_random_spherical_generators = df_random_points_no_duplicates.as_matrix()
return array_random_spherical_generators
def filter_polygon_vertex_coordinates_for_extreme_proximity(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Merge (take the midpoint of) polygon vertices that are judged to be extremely close together and return the filtered polygon vertex array. The purpose is to alleviate numerical complications that may arise during surface area calculations involving polygons with ultra-close / nearly coplanar vertices.'''
while 1:
distance_matrix = scipy.spatial.distance.cdist(array_ordered_Voronoi_polygon_vertices,array_ordered_Voronoi_polygon_vertices,'euclidean')
maximum_euclidean_distance_between_any_vertices = numpy.amax(distance_matrix)
vertex_merge_threshold = 0.02 #merge any vertices that are separated by less than 1% of the longest inter-vertex distance (may have to play with this value a bit)
threshold_assessment_matrix = distance_matrix / maximum_euclidean_distance_between_any_vertices
row_indices_that_violate_threshold, column_indices_that_violate_threshold = numpy.where((threshold_assessment_matrix < vertex_merge_threshold) & (threshold_assessment_matrix > 0))
if len(row_indices_that_violate_threshold) > 0 and len(column_indices_that_violate_threshold) > 0:
for row, column in zip(row_indices_that_violate_threshold,column_indices_that_violate_threshold):
if not row==column: #ignore diagonal values
first_violating_vertex_index = row
associated_vertex_index = column
new_vertex_at_midpoint = ( array_ordered_Voronoi_polygon_vertices[row] + array_ordered_Voronoi_polygon_vertices[column] ) / 2.0
spherical_polar_coords_new_vertex = convert_cartesian_array_to_spherical_array(new_vertex_at_midpoint)
spherical_polar_coords_new_vertex[0] = sphere_radius #project back to surface of sphere
new_vertex_at_midpoint = convert_spherical_array_to_cartesian_array(spherical_polar_coords_new_vertex)
array_ordered_Voronoi_polygon_vertices[row] = new_vertex_at_midpoint
array_ordered_Voronoi_polygon_vertices = numpy.delete(array_ordered_Voronoi_polygon_vertices,column,0)
break
else: break #no more violating vertices
return array_ordered_Voronoi_polygon_vertices
def calculate_surface_area_of_planar_polygon_in_3D_space(array_ordered_Voronoi_polygon_vertices):
'''Based largely on: http://stackoverflow.com/a/12653810
Use this function when spherical polygon surface area calculation fails (i.e., lots of nearly-coplanar vertices and negative surface area).'''
#unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = numpy.linalg.det([[1,a[1],a[2]],
[1,b[1],b[2]],
[1,c[1],c[2]]])
y = numpy.linalg.det([[a[0],1,a[2]],
[b[0],1,b[2]],
[c[0],1,c[2]]])
z = numpy.linalg.det([[a[0],a[1],1],
[b[0],b[1],1],
[c[0],c[1],1]])
magnitude = (x**2 + y**2 + z**2)**.5
return (x/magnitude, y/magnitude, z/magnitude)
#area of polygon poly
def poly_area(poly):
'''Accepts a list of xyz tuples.'''
assert len(poly) >= 3, "Not a polygon (< 3 vertices)."
total = [0, 0, 0]
N = len(poly)
for i in range(N):
vi1 = poly[i]
vi2 = poly[(i+1) % N]
prod = numpy.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = numpy.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
list_vertices = [] #need a list of tuples for above function
for coord in array_ordered_Voronoi_polygon_vertices:
list_vertices.append(tuple(coord))
planar_polygon_surface_area = poly_area(list_vertices)
return planar_polygon_surface_area
def calculate_surface_area_of_a_spherical_Voronoi_polygon(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Calculate the surface area of a polygon on the surface of a sphere. Based on equation provided here: http://mathworld.wolfram.com/LHuiliersTheorem.html
Decompose into triangles, calculate excess for each'''
#have to convert to unit sphere before applying the formula
spherical_coordinates = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
spherical_coordinates[...,0] = 1.0
array_ordered_Voronoi_polygon_vertices = convert_spherical_array_to_cartesian_array(spherical_coordinates)
#handle nearly-degenerate vertices on the unit sphere by returning an area close to 0 -- may be better options, but this is my current solution to prevent crashes, etc.
#seems to be relatively rare in my own work, but sufficiently common to cause crashes when iterating over large amounts of messy data
if scipy.spatial.distance.pdist(array_ordered_Voronoi_polygon_vertices).min() < (10 ** -7):
return 10 ** -8
else:
n = array_ordered_Voronoi_polygon_vertices.shape[0]
#point we start from
root_point = array_ordered_Voronoi_polygon_vertices[0]
totalexcess = 0
#loop from 1 to n-2, with point 2 to n-1 as other vertex of triangle
# this could definitely be written more nicely
b_point = array_ordered_Voronoi_polygon_vertices[1]
root_b_dist = calculate_haversine_distance_between_spherical_points(root_point, b_point, 1.0)
for i in 1 + numpy.arange(n - 2):
a_point = b_point
b_point = array_ordered_Voronoi_polygon_vertices[i+1]
root_a_dist = root_b_dist
root_b_dist = calculate_haversine_distance_between_spherical_points(root_point, b_point, 1.0)
a_b_dist = calculate_haversine_distance_between_spherical_points(a_point, b_point, 1.0)
s = (root_a_dist + root_b_dist + a_b_dist) / 2
totalexcess += 4 * math.atan(math.sqrt( math.tan(0.5 * s) * math.tan(0.5 * (s-root_a_dist)) * math.tan(0.5 * (s-root_b_dist)) * math.tan(0.5 * (s-a_b_dist))))
return totalexcess * (sphere_radius ** 2)
def calculate_and_sum_up_inner_sphere_surface_angles_Voronoi_polygon(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Takes an array of ordered Voronoi polygon vertices (for a single generator) and calculates the sum of the inner angles on the sphere surface. The resulting value is theta in the equation provided here: http://mathworld.wolfram.com/SphericalPolygon.html '''
#if sphere_radius != 1.0:
#try to deal with non-unit circles by temporarily normalizing the data to radius 1:
#spherical_polar_polygon_vertices = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
#spherical_polar_polygon_vertices[...,0] = 1.0
#array_ordered_Voronoi_polygon_vertices = convert_spherical_array_to_cartesian_array(spherical_polar_polygon_vertices)
num_vertices_in_Voronoi_polygon = array_ordered_Voronoi_polygon_vertices.shape[0] #the number of rows == number of vertices in polygon
#some debugging here -- I'm concerned that some sphere radii are demonstrating faulty projection of coordinates (some have r = 1, while others have r = sphere_radius -- see workflowy for more detailed notes)
spherical_polar_polygon_vertices = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
min_vertex_radius = spherical_polar_polygon_vertices[...,0].min()
#print 'before array projection check'
assert sphere_radius - min_vertex_radius < 0.1, "The minimum projected Voronoi vertex r value should match the sphere_radius of {sphere_radius}, but got {r_min}.".format(sphere_radius=sphere_radius,r_min=min_vertex_radius)
#print 'after array projection check'
#two edges (great circle arcs actually) per vertex are needed to calculate tangent vectors / inner angle at that vertex
current_vertex_index = 0
list_Voronoi_poygon_angles_radians = []
while current_vertex_index < num_vertices_in_Voronoi_polygon:
current_vertex_coordinate = array_ordered_Voronoi_polygon_vertices[current_vertex_index]
if current_vertex_index == 0:
previous_vertex_index = num_vertices_in_Voronoi_polygon - 1
else:
previous_vertex_index = current_vertex_index - 1
if current_vertex_index == num_vertices_in_Voronoi_polygon - 1:
next_vertex_index = 0
else:
next_vertex_index = current_vertex_index + 1
#try using the law of cosines to produce the angle at the current vertex (basically using a subtriangle, which is a common strategy anyway)
current_vertex = array_ordered_Voronoi_polygon_vertices[current_vertex_index]
previous_vertex = array_ordered_Voronoi_polygon_vertices[previous_vertex_index]
next_vertex = array_ordered_Voronoi_polygon_vertices[next_vertex_index]
#produce a,b,c for law of cosines using spherical distance (http://mathworld.wolfram.com/SphericalDistance.html)
#old_a = math.acos(numpy.dot(current_vertex,next_vertex))
#old_b = math.acos(numpy.dot(next_vertex,previous_vertex))
#old_c = math.acos(numpy.dot(previous_vertex,current_vertex))
#print 'law of cosines a,b,c:', old_a,old_b,old_c
#a = calculate_haversine_distance_between_spherical_points(current_vertex,next_vertex,sphere_radius)
#b = calculate_haversine_distance_between_spherical_points(next_vertex,previous_vertex,sphere_radius)
#c = calculate_haversine_distance_between_spherical_points(previous_vertex,current_vertex,sphere_radius)
a = calculate_Vincenty_distance_between_spherical_points(current_vertex,next_vertex,sphere_radius)
b = calculate_Vincenty_distance_between_spherical_points(next_vertex,previous_vertex,sphere_radius)
c = calculate_Vincenty_distance_between_spherical_points(previous_vertex,current_vertex,sphere_radius)
#print 'law of haversines a,b,c:', a,b,c
#print 'Vincenty edge lengths a,b,c:', a,b,c
pre_acos_term = (math.cos(b) - math.cos(a)*math.cos(c)) / (math.sin(a)*math.sin(c))
if abs(pre_acos_term) > 1.0:
print 'angle calc vertex coords (giving acos violation):', [convert_cartesian_array_to_spherical_array(vertex) for vertex in [current_vertex,previous_vertex,next_vertex]]
print 'Vincenty edge lengths (giving acos violation) a,b,c:', a,b,c
print 'pre_acos_term:', pre_acos_term
#break
current_vertex_inner_angle_on_sphere_surface = math.acos(pre_acos_term)
list_Voronoi_poygon_angles_radians.append(current_vertex_inner_angle_on_sphere_surface)
current_vertex_index += 1
if abs(pre_acos_term) > 1.0:
theta = 0
else:
theta = numpy.sum(numpy.array(list_Voronoi_poygon_angles_radians))
return theta
def convert_cartesian_array_to_spherical_array(coord_array,angle_measure='radians'):
'''Take shape (N,3) cartesian coord_array and return an array of the same shape in spherical polar form (r, theta, phi). Based on StackOverflow response: http://stackoverflow.com/a/4116899
use radians for the angles by default, degrees if angle_measure == 'degrees' '''
spherical_coord_array = numpy.zeros(coord_array.shape)
xy = coord_array[...,0]**2 + coord_array[...,1]**2
spherical_coord_array[...,0] = numpy.sqrt(xy + coord_array[...,2]**2)
spherical_coord_array[...,1] = numpy.arctan2(coord_array[...,1], coord_array[...,0])
spherical_coord_array[...,2] = numpy.arccos(coord_array[...,2] / spherical_coord_array[...,0])
if angle_measure == 'degrees':
spherical_coord_array[...,1] = numpy.degrees(spherical_coord_array[...,1])
spherical_coord_array[...,2] = numpy.degrees(spherical_coord_array[...,2])
return spherical_coord_array
def convert_spherical_array_to_cartesian_array(spherical_coord_array,angle_measure='radians'):
'''Take shape (N,3) spherical_coord_array (r,theta,phi) and return an array of the same shape in cartesian coordinate form (x,y,z). Based on the equations provided at: http://en.wikipedia.org/wiki/List_of_common_coordinate_transformations#From_spherical_coordinates
use radians for the angles by default, degrees if angle_measure == 'degrees' '''
cartesian_coord_array = numpy.zeros(spherical_coord_array.shape)
#convert to radians if degrees are used in input (prior to Cartesian conversion process)
if angle_measure == 'degrees':
spherical_coord_array[...,1] = numpy.deg2rad(spherical_coord_array[...,1])
spherical_coord_array[...,2] = numpy.deg2rad(spherical_coord_array[...,2])
#now the conversion to Cartesian coords
cartesian_coord_array[...,0] = spherical_coord_array[...,0] * numpy.cos(spherical_coord_array[...,1]) * numpy.sin(spherical_coord_array[...,2])
cartesian_coord_array[...,1] = spherical_coord_array[...,0] * numpy.sin(spherical_coord_array[...,1]) * numpy.sin(spherical_coord_array[...,2])
cartesian_coord_array[...,2] = spherical_coord_array[...,0] * numpy.cos(spherical_coord_array[...,2])
return cartesian_coord_array
def produce_triangle_vertex_coordinate_array_Delaunay_sphere(hull_instance):
'''Return shape (N,3,3) numpy array of the Delaunay triangle vertex coordinates on the surface of the sphere.'''
list_points_vertices_Delaunay_triangulation = []
for simplex in hull_instance.simplices: #for each simplex (face; presumably a triangle) of the convex hull
convex_hull_triangular_facet_vertex_coordinates = hull_instance.points[simplex]
assert convex_hull_triangular_facet_vertex_coordinates.shape == (3,3), "Triangular facet of convex hull should be a triangle in 3D space specified by coordinates in a shape (3,3) numpy array."
list_points_vertices_Delaunay_triangulation.append(convex_hull_triangular_facet_vertex_coordinates)
array_points_vertices_Delaunay_triangulation = numpy.array(list_points_vertices_Delaunay_triangulation)
return array_points_vertices_Delaunay_triangulation
def produce_array_Voronoi_vertices_on_sphere_surface(facet_coordinate_array_Delaunay_triangulation,sphere_radius,sphere_centroid):
'''Return shape (N,3) array of coordinates for the vertices of the Voronoi diagram on the sphere surface given a shape (N,3,3) array of Delaunay triangulation vertices.'''
assert facet_coordinate_array_Delaunay_triangulation.shape[1:] == (3,3), "facet_coordinate_array_Delaunay_triangulation should have shape (N,3,3)."
#draft numpy vectorized workflow to avoid Python for loop
facet_normals_array = numpy.cross(facet_coordinate_array_Delaunay_triangulation[...,1,...] - facet_coordinate_array_Delaunay_triangulation[...,0,...],facet_coordinate_array_Delaunay_triangulation[...,2,...] - facet_coordinate_array_Delaunay_triangulation[...,0,...])
facet_normal_magnitudes = numpy.linalg.norm(facet_normals_array,axis=1)
facet_normal_unit_vector_array = facet_normals_array / numpy.column_stack((facet_normal_magnitudes,facet_normal_magnitudes,facet_normal_magnitudes))
#try to ensure that facet normal faces the correct direction (i.e., out of sphere)
triangle_centroid_array = numpy.average(facet_coordinate_array_Delaunay_triangulation,axis=1)
#normalize the triangle_centroid to unit sphere distance for the purposes of the following directionality check
array_triangle_centroid_spherical_coords = convert_cartesian_array_to_spherical_array(triangle_centroid_array)
array_triangle_centroid_spherical_coords[...,0] = 1.0
triangle_centroid_array = convert_spherical_array_to_cartesian_array(array_triangle_centroid_spherical_coords)
#the Euclidean distance between the triangle centroid and the facet normal should be smaller than the sphere centroid to facet normal distance, otherwise, need to invert the vector
triangle_to_normal_distance_array = numpy.linalg.norm(triangle_centroid_array - facet_normal_unit_vector_array,axis=1)
sphere_centroid_to_normal_distance_array = numpy.linalg.norm(sphere_centroid-facet_normal_unit_vector_array,axis=1)
delta_value_array = sphere_centroid_to_normal_distance_array - triangle_to_normal_distance_array
facet_normal_unit_vector_array[delta_value_array < -0.1] *= -1.0 #need to rotate the vector so that it faces out of the circle
facet_normal_unit_vector_array *= sphere_radius #adjust for radius of sphere
array_Voronoi_vertices = facet_normal_unit_vector_array
assert array_Voronoi_vertices.shape[1] == 3, "The array of Voronoi vertices on the sphere should have shape (N,3)."
return array_Voronoi_vertices
class Voronoi_Sphere_Surface:
'''Voronoi diagrams on the surface of a sphere.
Parameters
----------
points : *array, shape (npoints, 3)*
Coordinates of points used to construct a Voronoi diagram on the surface of a sphere.
sphere_radius : *float*
Radius of the sphere (providing radius is more accurate than forcing an estimate). Default: None (force estimation).
sphere_center_origin_offset_vector : *array, shape (3,)*
A 1D numpy array that can be subtracted from the generators (original data points) to translate the center of the sphere back to the origin. Default: None assumes already centered at origin.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex Hull of the input points (generators) is calculated, and is equivalent to their Delaunay triangulation on the surface of the sphere [Caroli]_. A 3D Delaunay tetrahedralization is obtained by including the origin of the coordinate system as the fourth vertex of each simplex of the Convex Hull. The circumcenters of all tetrahedra in the system are calculated and projected to the surface of the sphere, producing the Voronoi vertices. The Delaunay tetrahedralization neighbour information is then used to order the Voronoi region vertices around each generator. The latter approach is substantially less sensitive to floating point issues than angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them into triangles and using L'Huilier's Theorem to calculate the spherical excess of each triangle [Weisstein]_. The sum of the spherical excesses is multiplied by the square of the sphere radius to obtain the surface area of the spherical polygon. For nearly-degenerate spherical polygons an area of approximately 0 is returned by default, rather than attempting the unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests quadratic time complexity (loglinear is optimal, but algorithms are more challenging to implement). The reconstitution of the surface area of the sphere, measured as the sum of the surface areas of all Voronoi regions, is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
Examples
--------
Produce a Voronoi diagram for a pseudo-random set of points on the unit sphere:
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.colors as colors
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import numpy as np
>>> import scipy as sp
>>> import voronoi_utility
>>> #pin down the pseudo random number generator (prng) object to avoid certain pathological generator sets
>>> prng = np.random.RandomState(117) #otherwise, would need to filter the random data to ensure Voronoi diagram is possible
>>> #produce 1000 random points on the unit sphere using the above seed
>>> random_coordinate_array = voronoi_utility.generate_random_array_spherical_generators(1000,1.0,prng)
>>> #produce the Voronoi diagram data
>>> voronoi_instance = voronoi_utility.Voronoi_Sphere_Surface(random_coordinate_array,1.0)
>>> dictionary_voronoi_polygon_vertices = voronoi_instance.voronoi_region_vertices_spherical_surface()
>>> #plot the Voronoi diagram
>>> fig = plt.figure()
>>> fig.set_size_inches(2,2)
>>> ax = fig.add_subplot(111, projection='3d')
>>> for generator_index, voronoi_region in dictionary_voronoi_polygon_vertices.iteritems():
... random_color = colors.rgb2hex(sp.rand(3))
... #fill in the Voronoi region (polygon) that contains the generator:
... polygon = Poly3DCollection([voronoi_region],alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> ax.set_xlim(-1,1);ax.set_ylim(-1,1);ax.set_zlim(-1,1);
(-1, 1)
(-1, 1)
(-1, 1)
>>> ax.set_xticks([-1,1]);ax.set_yticks([-1,1]);ax.set_zticks([-1,1]); #doctest: +ELLIPSIS
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
>>> plt.tick_params(axis='both', which='major', labelsize=6)
.. image:: example_random_Voronoi_plot.png
Now, calculate the surface areas of the Voronoi region polygons and verify that the reconstituted surface area is sensible:
>>> import math
>>> dictionary_voronoi_polygon_surface_areas = voronoi_instance.voronoi_region_surface_areas_spherical_surface()
>>> theoretical_surface_area_unit_sphere = 4 * math.pi
>>> reconstituted_surface_area_Voronoi_regions = sum(dictionary_voronoi_polygon_surface_areas.itervalues())
>>> percent_area_recovery = round((reconstituted_surface_area_Voronoi_regions / theoretical_surface_area_unit_sphere) * 100., 5)
>>> print percent_area_recovery
99.91979
For completeness, produce the Delaunay triangulation on the surface of the unit sphere for the same data set:
>>> Delaunay_triangles = voronoi_instance.delaunay_triangulation_spherical_surface()
>>> fig2 = plt.figure()
>>> fig2.set_size_inches(2,2)
>>> ax = fig2.add_subplot(111, projection='3d')
>>> for triangle_coordinate_array in Delaunay_triangles:
... m = ax.plot(triangle_coordinate_array[...,0],triangle_coordinate_array[...,1],triangle_coordinate_array[...,2],c='r',alpha=0.1)
... connecting_array = np.delete(triangle_coordinate_array,1,0)
... n = ax.plot(connecting_array[...,0],connecting_array[...,1],connecting_array[...,2],c='r',alpha=0.1)
>>> o = ax.scatter(random_coordinate_array[...,0],random_coordinate_array[...,1],random_coordinate_array[...,2],c='k',lw=0,s=0.9)
>>> ax.set_xlim(-1,1);ax.set_ylim(-1,1);ax.set_zlim(-1,1);
(-1, 1)
(-1, 1)
(-1, 1)
>>> ax.set_xticks([-1,1]);ax.set_yticks([-1,1]);ax.set_zticks([-1,1]); #doctest: +ELLIPSIS
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
>>> plt.tick_params(axis='both', which='major', labelsize=6)
.. image:: example_Delaunay.png
'''
def __init__(self,points,sphere_radius=None,sphere_center_origin_offset_vector=None):
if numpy.all(sphere_center_origin_offset_vector):
self.original_point_array = points - sphere_center_origin_offset_vector #translate generator data such that sphere center is at origin
else:
self.original_point_array = points
self.sphere_centroid = numpy.zeros((3,)) #already at origin, or has been moved to origin
if not sphere_radius:
self.estimated_sphere_radius = numpy.average(scipy.spatial.distance.cdist(self.original_point_array,self.sphere_centroid[numpy.newaxis,:]))
else:
self.estimated_sphere_radius = sphere_radius #if the radius of the sphere is known, it is pobably best to specify to avoid centroid bias in radius estimation, etc.
def delaunay_triangulation_spherical_surface(self):
'''Delaunay tessellation of the points on the surface of the sphere. This is simply the 3D convex hull of the points. Returns a shape (N,3,3) array of points representing the vertices of the Delaunay triangulation on the sphere (i.e., N three-dimensional triangle vertex arrays).'''
hull = scipy.spatial.ConvexHull(self.original_point_array)
array_points_vertices_Delaunay_triangulation = produce_triangle_vertex_coordinate_array_Delaunay_sphere(hull)
return array_points_vertices_Delaunay_triangulation
def voronoi_region_vertices_spherical_surface(self):
'''Returns a dictionary with the sorted (non-intersecting) polygon vertices for the Voronoi regions associated with each generator (original data point) index. A dictionary entry would be structured as follows: `{generator_index : array_polygon_vertices, ...}`.'''
#use strategy for Voronoi region generation discussed at PyData London 2015 with Ross Hemsley and Nikolai Nowaczyk
#step 2: perform 3D Delaunay triangulation on data set that includes the extra generator
tri = scipy.spatial.ConvexHull(self.original_point_array) #using ConvexHull is much faster in scipy (vs. Delaunay), but here we only get the triangles on the sphere surface in the simplices object (no longer adding an extra point at the origin at this stage)
#add the origin to each of the simplices to get the same tetrahedra we'd have gotten from Delaunay tetrahedralization
simplex_coords = tri.points[tri.simplices] #triangles on surface surface
simplex_coords = numpy.insert(simplex_coords, 3, numpy.zeros((1,3)), axis = 1)
#step 3: produce circumspheres / circumcenters of tetrahedra from 3D Delaunay
array_circumcenter_coords = circumcircle.calc_circumcenter_circumsphere_tetrahedron_vectorized(simplex_coords)
#step 4: project tetrahedron circumcenters up to the surface of the sphere, to produce the Voronoi vertices
array_vector_lengths = scipy.spatial.distance.cdist(array_circumcenter_coords, numpy.zeros((1,3)))
array_Voronoi_vertices = (self.estimated_sphere_radius / numpy.abs(array_vector_lengths)) * array_circumcenter_coords
#step 5: use the Delaunay tetrahedralization neighbour information to connect the Voronoi vertices around the generators, to produce the Voronoi regions
dictionary_sorted_Voronoi_point_coordinates_for_each_generator = {}
array_tetrahedra = simplex_coords
generator_index = 0
generator_index_array = numpy.arange(self.original_point_array.shape[0])
filter_tuple = numpy.where((numpy.expand_dims(tri.simplices, -1) == generator_index_array).any(axis=1))
df = pandas.DataFrame({'generator_indices' : filter_tuple[1]}, index = filter_tuple[0])
gb = df.groupby('generator_indices')
dictionary_generators_and_triangle_indices_containing_those_generators = gb.groups
for generator in tri.points[:-1]:
indices_of_triangles_surrounding_generator = dictionary_generators_and_triangle_indices_containing_those_generators[generator_index]
#pick any one of the triangles surrounding the generator and pick a non-generator vertex
first_tetrahedron_index = indices_of_triangles_surrounding_generator[0]
first_tetrahedron = array_tetrahedra[first_tetrahedron_index]
first_triangle = first_tetrahedron[:-1,...]
#pick one of the two non-generator vertices in the first triangle
indices_non_generator_vertices_first_triangle = numpy.unique(numpy.where(first_triangle != generator)[0])
ordered_list_tetrahedron_indices_surrounding_current_generator = [first_tetrahedron_index]
#determine the appropriate ordering of Voronoi vertices to close the Voronoi region (polygon) by traversing the Delaunay neighbour data structure from scipy
vertices_remaining = len(indices_of_triangles_surrounding_generator) - 1
#choose the neighbour opposite the first non-generator vertex of the first triangle
neighbour_tetrahedral_index = tri.neighbors[first_tetrahedron_index][indices_non_generator_vertices_first_triangle[0]]
ordered_list_tetrahedron_indices_surrounding_current_generator.append(neighbour_tetrahedral_index)
vertices_remaining -= 1
#for all subsequent triangles it is the common non-generator vertex with the previous neighbour that should be used to propagate the connection chain to the following neighbour
#the common vertex with the previous neighbour is the the vertex of the previous neighbour that was NOT used to locate the current neighbour
#since there are only two candidate vertices on the previous neighbour and I've chosen to use the vertex with index 0, the remaining vertex on the previous neighbour is the non-generator vertex with index 1
common_vertex_coordinate = first_triangle[indices_non_generator_vertices_first_triangle[1]]
while vertices_remaining > 0:
current_tetrahedron_index = ordered_list_tetrahedron_indices_surrounding_current_generator[-1]
current_tetrahedron_coord_array = array_tetrahedra[current_tetrahedron_index]
current_triangle_coord_array = current_tetrahedron_coord_array[:-1,...]
indices_candidate_vertices_current_triangle_excluding_generator = numpy.unique(numpy.where(current_triangle_coord_array != generator)[0])
array_candidate_vertices = current_triangle_coord_array[indices_candidate_vertices_current_triangle_excluding_generator]
current_tetrahedron_index_for_neighbour_propagation = numpy.unique(numpy.where(current_tetrahedron_coord_array == common_vertex_coordinate)[0])
next_tetrahedron_index_surrounding_generator = tri.neighbors[current_tetrahedron_index][current_tetrahedron_index_for_neighbour_propagation][0]
common_vertex_coordinate = array_candidate_vertices[array_candidate_vertices != common_vertex_coordinate] #for the next iteration
ordered_list_tetrahedron_indices_surrounding_current_generator.append(next_tetrahedron_index_surrounding_generator)
vertices_remaining -= 1
dictionary_sorted_Voronoi_point_coordinates_for_each_generator[generator_index] = array_Voronoi_vertices[ordered_list_tetrahedron_indices_surrounding_current_generator]
generator_index += 1
return dictionary_sorted_Voronoi_point_coordinates_for_each_generator
def voronoi_region_surface_areas_spherical_surface(self):
'''Returns a dictionary with the estimated surface areas of the Voronoi region polygons corresponding to each generator (original data point) index. An example dictionary entry: `{generator_index : surface_area, ...}`.'''
dictionary_sorted_Voronoi_point_coordinates_for_each_generator = self.voronoi_region_vertices_spherical_surface()
dictionary_Voronoi_region_surface_areas_for_each_generator = {}
for generator_index, Voronoi_polygon_sorted_vertex_array in dictionary_sorted_Voronoi_point_coordinates_for_each_generator.iteritems():
current_Voronoi_polygon_surface_area_on_sphere = calculate_surface_area_of_a_spherical_Voronoi_polygon(Voronoi_polygon_sorted_vertex_array,self.estimated_sphere_radius)
assert current_Voronoi_polygon_surface_area_on_sphere > 0, "Obtained a surface area of zero for a Voronoi region."
dictionary_Voronoi_region_surface_areas_for_each_generator[generator_index] = current_Voronoi_polygon_surface_area_on_sphere
return dictionary_Voronoi_region_surface_areas_for_each_generator
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from networkx import DiGraph
import os
from xml.sax.saxutils import escape
from androguard.core.analysis import analysis
try :
from androguard.core.analysis.libsign.libsign import entropy
except ImportError :
import math
def entropy(data):
entropy = 0
if len(data) == 0 :
return entropy
for x in range(256):
p_x = float(data.count(chr(x)))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
DEFAULT_SIGNATURE = analysis.SIGNATURE_L0_4
def create_entropies(vmx, m) :
try :
default_signature = vmx.get_method_signature(m, predef_sign = DEFAULT_SIGNATURE).get_string()
l = [ default_signature,
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Landroid"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Ljava"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "hex" ).get_string() ),
entropy( vmx.get_method_signature(m, "L2" ).get_string() ),
]
return l
except KeyError :
return [ "", 0.0, 0.0, 0.0, 0.0 ]
def create_info(vmx, m) :
E = create_entropies(vmx, m)
H = {}
H["signature"] = E[0]
H["signature_entropy"] = entropy( E[0] )
H["android_api_entropy"] = E[1]
H["java_api_entropy"] = E[2]
H["hex_entropy"] = E[3]
H["exceptions_entropy"] = E[4]
return H
class Data :
def __init__(self, vm, vmx, gvmx, a=None) :
self.vm = vm
self.vmx = vmx
self.gvmx = gvmx
self.a = a
self.apk_data = None
self.dex_data = None
if self.a != None :
self.apk_data = ApkViewer( self.a )
self.dex_data = DexViewer( vm, vmx, gvmx )
self.gvmx.set_new_attributes( create_info )
self.export_methods_to_gml()
def export_methodcalls_to_gml(self) :
return self.gvmx.export_to_gml()
def export_methods_to_gml(self) :
print self.gvmx.G
for node in self.gvmx.G.nodes() :
print self.gvmx.nodes_id[ node ].method_name, self.gvmx.nodes_id[ node ].get_attributes()
def export_apk_to_gml(self) :
if self.apk_data != None :
return self.apk_data.export_to_gml()
def export_dex_to_gml(self) :
if self.dex_data != None :
return self.dex_data.export_to_gml()
class DexViewer :
def __init__(self, vm, vmx, gvmx) :
self.vm = vm
self.vmx = vmx
self.gvmx = gvmx
def _create_node(self, id, height, width, color, label) :
buff = "<node id=\"%d\">\n" % id
buff += "<data key=\"d6\">\n"
buff += "<y:ShapeNode>\n"
buff += "<y:Geometry height=\"%f\" width=\"%f\"/>\n" % (16 * height, 7.5 * width)
buff += "<y:Fill color=\"#%s\" transparent=\"false\"/>\n" % color
buff += "<y:NodeLabel alignment=\"left\" autoSizePolicy=\"content\" fontFamily=\"Dialog\" fontSize=\"13\" fontStyle=\"plain\" hasBackgroundColor=\"false\" hasLineColor=\"false\" modelName=\"internal\" modelPosition=\"c\" textColor=\"#000000\" visible=\"true\">\n"
buff += escape(label)
buff += "</y:NodeLabel>\n"
buff += "</y:ShapeNode>\n"
buff += "</data>\n"
buff += "</node>\n"
return buff
def add_exception_node(self, exception, id_i) :
buff = ""
# 9933FF
height = 2
width = 0
label = ""
label += "%x:%x\n" % (exception.start, exception.end)
for i in exception.exceptions :
c_label = "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
label += c_label
width = max(len(c_label), width)
height += 1
return self._create_node( id_i, height, width, "9333FF", label )
def add_method_node(self, i, id_i) :
height = 0
width = 0
label = ""
label += i.get_name() + "\n"
label += i.get_descriptor()
height = 3
width = len(label)
return self._create_node( id_i, height, width, "FF0000", label )
def add_node(self, i, id_i) :
height = 0
width = 0
idx = i.start
label = ""
for ins in i.get_instructions() :
c_label = "%x %s\n" % (idx, self.vm.dotbuff(ins, idx))
idx += ins.get_length()
label += c_label
width = max(width, len(c_label))
height += 1
if height < 10 :
height += 3
return self._create_node( id_i, height, width, "FFCC00", label )
def add_edge(self, i, id_i, j, id_j, l_eid, val) :
buff = "<edge id=\"%d\" source=\"%d\" target=\"%d\">\n" % (len(l_eid), id_i, id_j)
buff += "<data key=\"d9\">\n"
buff += "<y:PolyLineEdge>\n"
buff += "<y:Arrows source=\"none\" target=\"standard\"/>\n"
if val == 0 :
buff += "<y:LineStyle color=\"#00FF00\" type=\"line\" width=\"1.0\"/>\n"
elif val == 1 :
buff += "<y:LineStyle color=\"#FF0000\" type=\"line\" width=\"1.0\"/>\n"
else :
buff += "<y:LineStyle color=\"#0000FF\" type=\"line\" width=\"1.0\"/>\n"
buff += "</y:PolyLineEdge>\n"
buff += "</data>\n"
buff += "</edge>\n"
l_eid[ "%d+%d" % (id_i, id_j) ] = len(l_eid)
return buff
def new_id(self, i, l) :
try :
return l[i]
except KeyError :
l[i] = len(l)
return l[i]
def export_to_gml(self) :
H = {}
for _class in self.vm.get_classes() :
name = _class.get_name()
name = name[1:-1]
buff = ""
buff += "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
buff += "<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xmlns:yed=\"http://www.yworks.com/xml/yed/3\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd\">\n"
buff += "<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d5\"/>\n"
buff += "<key for=\"node\" id=\"d6\" yfiles.type=\"nodegraphics\"/>\n"
buff += "<key for=\"edge\" id=\"d9\" yfiles.type=\"edgegraphics\"/>\n"
buff += "<graph edgedefault=\"directed\" id=\"G\">\n"
print name
buff_nodes = ""
buff_edges = ""
l_id = {}
l_eid = {}
for method in _class.get_methods() :
mx = self.vmx.get_method( method )
exceptions = mx.exceptions
id_method = self.new_id(method, l_id)
buff_nodes += self.add_method_node(method, id_method)
for i in mx.basic_blocks.get() :
id_i = self.new_id(i, l_id)
print i, id_i, i.exception_analysis
buff_nodes += self.add_node( i, id_i )
# add childs nodes
val = 0
if len(i.childs) > 1 :
val = 1
elif len(i.childs) == 1 :
val = 2
for j in i.childs :
print "\t", j
id_j = self.new_id(j[-1], l_id)
buff_edges += self.add_edge(i, id_i, j[-1], id_j, l_eid, val)
if val == 1 :
val = 0
# add exceptions node
if i.exception_analysis != None :
id_exceptions = self.new_id(i.exception_analysis, l_id)
buff_nodes += self.add_exception_node(i.exception_analysis, id_exceptions)
buff_edges += self.add_edge(None, id_exceptions, None, id_i, l_eid, 2)
buff_edges += self.add_edge(None, id_method, None, id_method+1, l_eid, 2)
buff += buff_nodes
buff += buff_edges
buff += "</graph>\n"
buff += "</graphml>\n"
H[ name ] = buff
return H
class Directory :
def __init__(self, name) :
self.name = name
self.basename = os.path.basename(name)
self.color = "FF0000"
self.width = len(self.name)
def set_color(self, color) :
self.color = color
class File :
def __init__(self, name, file_type, file_crc) :
self.name = name
self.basename = os.path.basename(name)
self.file_type = file_type
self.file_crc = file_crc
self.color = "FFCC00"
self.width = max(len(self.name), len(self.file_type))
def splitall(path, z) :
if len(path) == 0 :
return
l = os.path.split( path )
z.append(l[0])
for i in l :
return splitall( i, z )
class ApkViewer :
def __init__(self, a) :
self.a = a
self.G = DiGraph()
self.all_files = {}
self.ids = {}
root = Directory( "APK" )
root.set_color( "00FF00" )
self.ids[ root ] = len(self.ids)
self.G.add_node( root )
for x, y, z in self.a.get_files_information() :
print x, y, z, os.path.basename(x)
l = []
splitall( x, l )
l.reverse()
l.pop(0)
last = root
for i in l :
if i not in self.all_files :
tmp = Directory( i )
self.ids[ tmp ] = len(self.ids)
self.all_files[ i ] = tmp
else :
tmp = self.all_files[ i ]
self.G.add_edge(last, tmp)
last = tmp
n1 = last
n2 = File( x, y, z )
self.G.add_edge(n1, n2)
self.ids[ n2 ] = len(self.ids)
def export_to_gml(self) :
buff = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
buff += "<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xmlns:yed=\"http://www.yworks.com/xml/yed/3\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd\">\n"
buff += "<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d5\"/>\n"
buff += "<key for=\"node\" id=\"d6\" yfiles.type=\"nodegraphics\"/>\n"
buff += "<graph edgedefault=\"directed\" id=\"G\">\n"
for node in self.G.nodes() :
print node
buff += "<node id=\"%d\">\n" % self.ids[node]
buff += "<data key=\"d6\">\n"
buff += "<y:ShapeNode>\n"
buff += "<y:Geometry height=\"%f\" width=\"%f\"/>\n" % (60.0, 7 * node.width)
buff += "<y:Fill color=\"#%s\" transparent=\"false\"/>\n" % node.color
buff += "<y:NodeLabel>\n"
buff += "%s\n" % node.basename
if isinstance(node, File) :
buff += "%s\n" % node.file_type
buff += "%s\n" % hex(node.file_crc)
buff += "</y:NodeLabel>\n"
buff += "</y:ShapeNode>\n"
buff += "</data>\n"
buff += "</node>\n"
nb = 0
for edge in self.G.edges() :
buff += "<edge id=\"%d\" source=\"%d\" target=\"%d\">\n" % (nb, self.ids[edge[0]], self.ids[edge[1]])
buff += "</edge>\n"
nb += 1
buff += "</graph>\n"
buff += "</graphml>\n"
return buff
|
|
#!/usr/bin/env python
#pylint: disable=W0312,C0111,C0330,R0913,C0326
import argparse
import json
import os
import sys
import random
MIN_CRIMES = 4
SPECIAL_CONTEXT = 'notes'
RULES_CONTEXTS = ['setup', 'ghost', 'psychics', SPECIAL_CONTEXT]
FMT_KEY_SETUP = '%s_%d'
REQUIRED_FIELDS_SET = ['id', 'setup', 'rules', 'cards']
REQUIRED_FIELDS_SETUP = ['players', 'mode', 'kinds', 'num_cards']
REQUIRED_FIELDS_RULES = ['phase', 'context', 'mode', 'players', 'difficulties', 'rule']
REQUIRED_FIELDS_CARDS = ['id', 'kind', 'set']
#------------------------------------------------------------------------------#
def report(fmt, *arg):
sys.stdout.write(fmt % arg)
def parse_json(path_json, required_fields):
blob = None
with open(path_json) as file_json:
blob = json.load(file_json)
#TODO:: not much validation happening
for elem in blob:
for field in required_fields:
if field not in elem:
raise ValueError(
"'%s' missing required field '%s'" % (path_json, field))
return blob
#------------------------------------------------------------------------------#
def report_info_cards(kind, list_cards):
report("\n%s cards:\n", kind)
report(" total: %d\n", len(list_cards))
sets = {}
for card in list_cards:
key = card['set']
list_set = sets.get(key)
if not list_set:
list_set = []
sets[key] = list_set
list_set.append(card)
keys = sorted(sets.keys())
for key in keys:
report(" %s: %d\n", key, len(sets[key]))
def report_psychic_cards(kinds, dealt_by_kind):
report("\npsychic cards:\n")
for kind in kinds:
ids = []
for card in dealt_by_kind[kind]:
ids.append("%4s" % (card['id']))
report(" %-9s: %s\n", kind, " ".join(sorted(ids)))
def report_mode_rules(mode_rules, note):
report(note)
#sort by phase
rules_by_phase = {}
for rule in mode_rules:
key = rule['phase']
list_rules = rules_by_phase.get(key)
if not list_rules:
list_rules = []
rules_by_phase[key] = list_rules
list_rules.append(rule)
#TODO:: special casing context, seems bad
list_phases = sorted(rules_by_phase.keys())
set_context = set(RULES_CONTEXTS)
for phase in list_phases:
rules = rules_by_phase[phase]
#sort by context
rules_by_context = {}
for rule in rules:
key = rule['context']
if key not in set_context:
key = SPECIAL_CONTEXT
list_rules = rules_by_context.get(key)
if not list_rules:
list_rules = []
rules_by_context[key] = list_rules
list_rules.append(rule['rule'])
#report by context
report("\nrules phase %s:\n", phase)
for key in RULES_CONTEXTS:
list_rules = rules_by_context.get(key)
if not list_rules:
continue
for rule in list_rules:
report(" %s\n", rule)
report("\n")
def report_crimes(crimes, num_players, murderer):
counter = 0
report("\ncrimes:\n")
for crime in crimes:
counter += 1
pid = '-'
if counter <= num_players:
pid = str(counter)
guilty = ' '
if counter == murderer:
guilty = '*'
report(" %s%2s : %s\n", guilty, pid, " ".join(crime))
#------------------------------------------------------------------------------#
def parse_sets(path_sets, list_sets):
data_sets = parse_json(path_sets, REQUIRED_FIELDS_SET)
path_data = os.path.dirname(path_sets)
setups = {}
rules = {}
cards = {}
available_sets = []
#TODO:: Naively parsing files by set.
#TODO:: We will potentially parse the same file more than once.
#TODO:: This should not be an issue given the small number of cards.
for data in data_sets:
key = data['id']
available_sets.append(key)
if key in list_sets:
#parse setups
path_setup = data['setup']
if path_setup:
blob = parse_json(
os.path.join(path_data, path_setup),
REQUIRED_FIELDS_SETUP)
for setup in blob:
key = FMT_KEY_SETUP % (setup['mode'], setup['players'])
setups[key] = setup
#parse rules
path_rules = data['rules']
if path_rules:
blob = parse_json(
os.path.join(path_data, path_rules),
REQUIRED_FIELDS_RULES)
for rule in blob:
key = rule['mode']
list_rules = rules.get(key)
if not list_rules:
list_rules = []
rules[key] = list_rules
list_rules.append(rule)
#parse cards
list_path_cards = data['cards']
if list_path_cards:
for path_cards in list_path_cards:
blob = parse_json(
os.path.join(path_data, path_cards),
REQUIRED_FIELDS_CARDS)
for card in blob:
if not card['set'] in list_sets:
continue
key = card['kind']
list_cards = cards.get(key)
if not list_cards:
list_cards = []
cards[key] = list_cards
list_cards.append(card)
available_sets = sorted(list(set(available_sets)))
return available_sets, setups, rules, cards
def report_info(
available_sets, setups_by_mode_player, cards_by_kind, mode, num_players, difficulty):
report("available sets: %s\n", " ".join(available_sets))
#report setup
key_setup = FMT_KEY_SETUP % (mode, num_players)
setup = setups_by_mode_player.get(key_setup)
report("\n%s settings for '%d' players on '%s':\n", mode, num_players, difficulty)
if setup:
report(" cards: %d\n", setup['num_cards'][difficulty])
report(" kinds: %s\n", ' '.join(setup['kinds']))
else:
report("missing\n")
#report cards
kinds = sorted(cards_by_kind.keys())
for key in kinds:
report_info_cards(key, cards_by_kind[key])
def gen_setup(setups, cards_by_kind, mode, num_players, difficulty):
key_setup = FMT_KEY_SETUP % (mode, num_players)
setup = setups[key_setup]
num_cards = setup['num_cards'][difficulty]
kinds = setup['kinds']
dealt_by_kind = {}
for kind in kinds:
if kind not in cards_by_kind.keys():
raise ValueError("missing card kind '%s'" % (kind))
list_cards = cards_by_kind[kind]
len_cards = len(list_cards)
if len_cards < num_cards:
raise ValueError("not enough cards '%d/%d'" % (num_cards, len_cards))
random.shuffle(list_cards)
dealt_by_kind[kind] = list_cards[:num_cards] #random.sample(list_cards, num_cards)
return kinds, dealt_by_kind
def gen_rules(rules_by_mode, mode, num_players, difficulty):
rules = []
for rule in rules_by_mode[mode]:
if num_players not in rule['players']:
continue
if difficulty not in rule['difficulties']:
continue
rules.append(rule)
return rules
def gen_crimes(kinds, dealt_by_kind, num_players):
results = []
max_crimes = max(num_players, MIN_CRIMES)
for i in range(0, max_crimes):
results.append([])
for kind in kinds:
ids = []
for card in dealt_by_kind[kind]:
ids.append(card['id'])
random.shuffle(ids)
ids = ids[:max_crimes]
counter = 0
for uid in ids:
list_crime = results[counter]
value = "%s %-5s" % (kind, uid)
list_crime.append(value)
counter += 1
return results
def gen_murderer(num_players):
max_crimes = max(num_players, MIN_CRIMES)
murderer = random.randint(1, max_crimes)
return murderer
def main(args):
random.seed()
note = \
"\n This setup is for %d players on %s.\n" % (args.num_players, args.difficulty)
available_sets, \
setups_by_mode_player, \
rules_by_mode, \
cards_by_kind = parse_sets(args.json_sets, args.list_sets)
if args.action_list:
report_info(
available_sets,
setups_by_mode_player,
cards_by_kind,
args.mode,
args.num_players,
args.difficulty)
return
kinds, \
dealt_by_kind = gen_setup(
setups_by_mode_player,
cards_by_kind,
args.mode,
args.num_players,
args.difficulty)
mode_rules = gen_rules(rules_by_mode, args.mode, args.num_players, args.difficulty)
crimes = gen_crimes(kinds, dealt_by_kind, args.num_players)
murderer = gen_murderer(args.num_players)
report_psychic_cards(kinds, dealt_by_kind)
report_crimes(crimes, args.num_players, murderer)
report_mode_rules(mode_rules, note)
report("\n")
#------------------------------------------------------------------------------#
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description='Generates the setup for a game of Mysterium.'
)
PARSER.add_argument(
'json_sets',
help='Path to json file containing Mysterium card sets.',
)
PARSER.add_argument(
'-l', '--list',
action='store_true',
dest='action_list',
default=False,
help='List information about sets and game modes.'
)
PARSER.add_argument(
'-p', '--players',
action='store',
dest='num_players',
type=int,
choices=[2, 3, 4, 5, 6, 7],
required=True,
help='Number of players in the game.'
)
PARSER.add_argument(
'-d', '--difficulty',
action='store',
dest='difficulty',
type=str,
choices=['easy', 'normal', 'hard', 'insane', 'nightmare'],
required=True,
help='Difficulty of the game.'
)
PARSER.add_argument(
'-m', '--mode',
action='store',
dest='mode',
type=str,
default='original',
help='Game mode to generate.'
)
PARSER.add_argument(
'-s', '--sets',
action='store',
dest='list_sets',
type=str,
default=['base'],
nargs='+',
help='List of sets to include.'
)
ARGS = PARSER.parse_args()
main(ARGS)
|
|
from Tkinter import *
import cv2
from PIL import Image, ImageTk
import time
import facial
import os
import threading
import tkSimpleDialog
import database as db
global canvas
#######################################
# Button Class
#######################################
class Data(object): pass
class Button(object):
def __init__(self,x,y,f):
self.x = x
self.y = y
self.function = f
self.fill = None
class RoundButton(Button):
def __init__(self, x, y, r, f):
super(RoundButton,self).__init__(x,y,f)
self.r = r
def inBounds(self,x,y):
return ((self.x - x)**2 + (self.y - y)**2)**0.5 <= self.r
def draw(self,canvas,data):
canvas.create_oval(self.x-self.r,self.y-self.r,self.x+self.r,self.y+self.r,fill=self.fill,width=5)
class RectButton(Button):
def __init__(self, x, y, width, height,f):
super(RectButton,self).__init__(x,y,f)
self.width = width
self.height =height
def inBounds(self,x,y):
return ((self.x - self.width / 2 < x < self.x + self.width) and
(self.y - self.height / 2 < y < self.y + self.height / 2))
def draw(self,canvas,data):
canvas.create_rectangle(self.x-self.width//2,self.y-self.height//2,self.x+self.width//2,self.y+self.height//2,fill=self.fill,width=5)
def createButton(data):
data.button = Data()
data.button.BACK = RoundButton(912,44,25,BACK)
data.button.EXIT = RoundButton(1033,44,25,EXIT)
data.button.SETTINGS = RoundButton(791,44,25,SETTINGS)
data.button.SIDEBAR1 = RectButton(903,160,300,68,SIDEBAR1)
data.button.SIDEBAR2 = RectButton(903,293,300,68,SIDEBAR2)
data.button.SIDEBAR3 = RectButton(903,428,300,68,SIDEBAR3)
data.button.NEXTSONG = RoundButton(786,658,25,NEXTSONG)
data.mainButtonList = [
data.button.BACK,
data.button.EXIT,
data.button.SETTINGS,
data.button.SIDEBAR1,
data.button.SIDEBAR2,
data.button.SIDEBAR3,
data.button.NEXTSONG]
data.utilButtonList = [
data.button.BACK,
data.button.EXIT,
data.button.SETTINGS]
#######################################
# Button Functions
#######################################
def BACK(root,data):
print ":::Performing BACK Segue"
if data.mode == "TRAIN":
global canvas
loadingFace = ImageTk.PhotoImage(Image.open(data.utilPicPath+"loadingFace.png"))
canvas.delete(ALL)
canvas.create_image(data.center,image=loadingFace)
canvas.update()
train()
if data.prevMode != []:
data.mode = data.prevMode.pop(-1)
def EXIT(root,data):
data.terminate = True
exit(root,data)
def SETTINGS(root,data):
data.prevMode.append(data.mode)
data.mode = "TRAIN"
trainInit(data)
def SIDEBAR1(root,data):
if data.mainEmotion == facial.EMO_HAPPY:
data.prevMode.append(data.mode)
data.mode = "HAPPYS1"
happyS1Init(data)
elif data.mainEmotion == facial.EMO_SAD:
data.prevMode.append(data.mode)
data.mode = "SADS1"
sadS1Init(data)
elif data.mainEmotion == facial.EMO_ANGRY:
data.prevMode.append(data.mode)
data.mode = "ANGRYS1"
angryS1Init(data)
def SIDEBAR2(root,data):
if data.mainEmotion == facial.EMO_HAPPY:
data.prevMode.append(data.mode)
data.mode = "HAPPYS2"
happyS2Init(data)
elif data.mainEmotion == facial.EMO_SAD:
data.prevMode.append(data.mode)
data.mode = "SADS2"
sadS2Init(data)
elif data.mainEmotion == facial.EMO_ANGRY:
data.prevMode.append(data.mode)
data.mode = "ANGRYS2"
angryS2Init(data)
def SIDEBAR3(root,data):
if data.mainEmotion == facial.EMO_HAPPY:
data.prevMode.append(data.mode)
data.mode = "HAPPYS3"
happyS3Init(data)
elif data.mainEmotion == facial.EMO_SAD:
data.prevMode.append(data.mode)
data.mode = "SADS3"
sadS3Init(data)
elif data.mainEmotion == facial.EMO_ANGRY:
data.prevMode.append(data.mode)
data.mode = "ANGRYS3"
angryS3Init(data)
def NEXTSONG(root,data):
pass
# Animation framework from CMU 15-112 course page:
# https://www.cs.cmu.edu/~112/notes/events-example0.py
########################################
# Modes
########################################
def init(data):
data.center = 540,360
data.timerDelay = 10
data.mainCounter = 0
data.terminate = False
data.mode = "MAIN"
data.prevMode = []
data.utilPicPath = "utilitypic/"
createButton(data)
def train():
dct = facial.getImagesAndLabels("faces")
facial.MAPPING = facial.trainRecognizer(dct)
def initModes(data):
mainInit(data)
def exit(root, data):
data.terminate = True
root.destroy()
facial.CAMERA.release()
def mousePressed(root, event, data):
# use event.x and event.y
if not data.terminate:
if data.mode == "TRAIN":
trainMousePressed(root, event, data)
if data.mode == "MAIN":
mainMousePressed(root, event, data)
if data.mode == "HAPPYS1":
happyS1MousePressed(root, event, data)
if data.mode == "HAPPYS2":
happyS2MousePressed(root, event, data)
if data.mode == "HAPPYS3":
happyS3MousePressed(root, event, data)
if data.mode == "SADS1":
sadS1MousePressed(root, event, data)
if data.mode == "SADS2":
sadS2MousePressed(root, event, data)
if data.mode == "SADS3":
sadS3MousePressed(root, event, data)
if data.mode == "ANGRYS1":
angryS1MousePressed(root, event, data)
if data.mode == "ANGRYS2":
angryS2MousePressed(root, event, data)
if data.mode == "ANGRYS3":
angryS3MousePressed(root, event, data)
pass
def keyPressed(root, event, data):
if not data.terminate:
# use event.char and event.keysym
if event.keysym == "q":
exit(root, data)
if data.mode == "TRAIN":
trainKeyPressed(root, event, data)
if data.mode == "MAIN":
mainKeyPressed(root, event, data)
if data.mode == "HAPPYS1":
happyS1KeyPressed(root, event, data)
if data.mode == "HAPPYS2":
happyS2KeyPressed(root, event, data)
if data.mode == "HAPPYS3":
happyS3KeyPressed(root, event, data)
if data.mode == "SADS1":
sadS1KeyPressed(root, event, data)
if data.mode == "SADS2":
sadS2KeyPressed(root, event, data)
if data.mode == "SADS3":
sadS3KeyPressed(root, event, data)
if data.mode == "ANGRYS1":
angryS1KeyPressed(root, event, data)
if data.mode == "ANGRYS2":
angryS2KeyPressed(root, event, data)
if data.mode == "ANGRYS3":
angryS3KeyPressed(root, event, data)
pass
def timerFired(root, data):
if not data.terminate:
if data.mode == "TRAIN":
trainTimerFired(root,data)
if data.mode == "MAIN":
mainTimerFired(root, data)
if data.mode == "HAPPYS1":
happyS1TimerFired(root, data)
if data.mode == "HAPPYS2":
happyS2TimerFired(root, data)
if data.mode == "HAPPYS3":
happyS3TimerFired(root, data)
if data.mode == "SADS1":
sadS1TimerFired(root, data)
if data.mode == "SADS2":
sadS2TimerFired(root, data)
if data.mode == "SADS3":
sadS3TimerFired(root, data)
if data.mode == "ANGRYS1":
angryS1TimerFired(root, data)
if data.mode == "ANGRYS2":
angryS2TimerFired(root, data)
if data.mode == "ANGRYS3":
angryS3TimerFired(root, data)
pass
if data.prevMode: print data.prevMode
def redrawAll(root, canvas, data):
if not data.terminate:
if data.mode == "TRAIN":
trainRedrawAll(root, canvas, data)
if data.mode == "MAIN":
mainRedrawAll(root, canvas, data)
if data.mode == "HAPPYS1":
happyS1RedrawAll(root, canvas, data)
if data.mode == "HAPPYS2":
happyS2RedrawAll(root, canvas, data)
if data.mode == "HAPPYS3":
happyS3RedrawAll(root, canvas, data)
if data.mode == "SADS1":
sadS1RedrawAll(root, canvas, data)
if data.mode == "SADS2":
sadS2RedrawAll(root, canvas, data)
if data.mode == "SADS3":
sadS3RedrawAll(root, canvas, data)
if data.mode == "ANGRYS1":
angryS1RedrawAll(root, canvas, data)
if data.mode == "ANGRYS2":
angryS2RedrawAll(root, canvas, data)
if data.mode == "ANGRYS3":
angryS3RedrawAll(root, canvas, data)
pass
########################################
# Train Mode
########################################
def enterAndrewId(data):
data.andrewID = tkSimpleDialog.askstring("You Are ...", "AndrewID")
def trainInit(data):
# load data.xyz as appropriate
w = 1080 / 5
data.width = 1080
data.height = 720
(data.margin1, data.margin2) = (0, w)
(data.margin3, data.margin4) = (2 * w, 3 * w)
(data.margin5, data.margin6) = (4 * w, 5 * w)
data.highLightHappy = False
data.highLightSad = False
data.highLightAngry = False
data.selectHappy = False
data.selectSad = False
data.selectAngry = False
data.imgCenter1 = (w / 2, 720 / 2)
data.imgCenter2 = (w / 2 + w, 720 / 2)
data.imgCenter3 = (w / 2 + 2*w, 720 / 2)
data.imgCenter4 = (w / 2 + 3*w, 720 / 2)
data.imgCenter5 = (w / 2 + 4*w, 720 / 2)
data.utilPicPath = "utilitypic/"
data.happySaveSuccess = False
data.sadSaveSuccess = False
data.angrySaveSuccess = False
loadImage(data)
enterAndrewId(data)
def loadImage(data):
data.disgustNewBW = ImageTk.PhotoImage(Image.open(data.utilPicPath+"disgustNewBW.jpg"))
data.happyNewBW = ImageTk.PhotoImage(Image.open(data.utilPicPath+"happyNewBW.jpg"))
data.angryNewBW = ImageTk.PhotoImage(Image.open(data.utilPicPath+"angryNewBW.jpg"))
data.fearNewBW = ImageTk.PhotoImage(Image.open(data.utilPicPath+"fearNewBW.jpg"))
data.sadNewBW = ImageTk.PhotoImage(Image.open(data.utilPicPath+"sadNewBW.jpg"))
data.sadNew = ImageTk.PhotoImage(Image.open(data.utilPicPath+"sadNew.jpg"))
data.happyNew = ImageTk.PhotoImage(Image.open(data.utilPicPath+"happyNew.jpg"))
data.angryNew = ImageTk.PhotoImage(Image.open(data.utilPicPath+"angryNew.jpg"))
def trainMousePressed(root, event, data):
# use event.x and event.y
if (event.x < data.margin3 and event.x > data.margin2): #Happy
data.selectHappy = not data.selectHappy
if (data.selectHappy == True):
data.selectSad = False
data.selectAngry = False
elif (event.x < data.margin4 and event.x > data.margin3): #Sad
data.selectSad = not data.selectSad
if (data.selectSad == True):
data.selectHappy = False
data.selectAngry = False
elif (event.x < data.margin5 and event.x > data.margin4): #angry
data.selectAngry = not data.selectAngry
if (data.selectAngry == True):
data.selectHappy = False
data.selectSad = False
if (event.x < data.margin3 and event.x > data.margin2): #Happy
data.highLightHappy = not data.highLightHappy
if (data.highLightHappy == True):
data.highLightSad = False
data.highLightAngry = False
elif (event.x < data.margin4 and event.x > data.margin3): #Sad
data.highLightSad = not data.highLightSad
if (data.highLightSad == True):
data.highLightHappy = False
data.highLightAngry = False
elif (event.x < data.margin5 and event.x > data.margin4): #angry
data.highLightAngry = not data.highLightAngry
if (data.highLightAngry == True):
data.highLightHappy = False
data.highLightSad = False
if data.happySaveSuccess: data.highLightHappy = True
if data.sadSaveSuccess: data.highLightSad = True
if data.angrySaveSuccess: data.highLightAngry = True
def trainKeyPressed(root,event, data):
# use event.char and event.keysym
if (data.selectHappy):
if (event.keysym == "c"):
happyFace = facial._getCameraRaw()
facial.saveUserFace({data.andrewID:{"happy":happyFace}})
data.happySaveSuccess = True
if (data.selectSad):
if (event.keysym == "c"):
sadFace = facial._getCameraRaw()
facial.saveUserFace({data.andrewID:{"sad":sadFace}})
data.sadSaveSuccess = True
if (data.selectAngry):
if (event.keysym == "c"):
angryFace = facial._getCameraRaw()
facial.saveUserFace({data.andrewID:{"angry":angryFace}})
data.angrySaveSuccess = True
if (event.keysym == "Escape"):
data.button.BACK.function(root, data)
def trainTimerFired(root,data):
pass
def trainRedrawAll(root,canvas, data):
# draw in canvas
canvas.create_image(data.imgCenter1, image = data.disgustNewBW)
canvas.create_image(data.imgCenter5, image = data.fearNewBW)
# First Create the two that we won't change in this project
if (data.highLightHappy):
canvas.create_image(data.imgCenter2, image = data.happyNew)
if (not data.highLightHappy):
canvas.create_image(data.imgCenter2, image = data.happyNewBW)
if (data.highLightSad):
canvas.create_image(data.imgCenter3, image = data.sadNew)
if (not data.highLightSad):
canvas.create_image(data.imgCenter3, image = data.sadNewBW)
if (data.highLightAngry):
canvas.create_image(data.imgCenter4, image = data.angryNew)
if (not data.highLightAngry):
canvas.create_image(data.imgCenter4, image = data.angryNewBW)
########################################
# Main Function: Emotion Recognition
########################################
def mainInit(data):
data.mainWait = 0.5 # seconds
data.mainEmotion = None
loadEmotionPic(data)
def loadEmotionPic(data):
data.sad = ImageTk.PhotoImage(file=data.utilPicPath+"sad.png")
data.happy = ImageTk.PhotoImage(file=data.utilPicPath+"happy.png")
data.angry = ImageTk.PhotoImage(file=data.utilPicPath+"angry.png")
def loadMP3():
global angryMP3
angryMP3 = {}
for mp3 in os.listdir("music/angry"):
if mp3.endswith("mp3"):
angryMP3[mp3] = pyglet.media.load("music/angry/" + mp3,streaming=False)
global happyMP3
happyMP3 = {}
for mp3 in os.listdir("music/happy"):
if mp3.endswith("mp3"):
happyMP3[mp3] = pyglet.media.load("music/happy/" + mp3,streaming=False)
global sadMP3
sadMP3 = {}
for mp3 in os.listdir("music/sad"):
print mp3
if mp3.endswith("mp3"):
sadMP3[mp3] = pyglet.media.load("music/sad/" + mp3,streaming=False)
# thread1 = threading.Thread(target=loadMP3())qq
# thread1.start()
def mainMousePressed(root, event, data):
for button in data.mainButtonList:
if button.inBounds(event.x,event.y):
button.function(root,data)
# if button.fill == None: button.fill="white"
# elif button.fill == "white": button.fill = None
def mainKeyPressed(root, event, data):
pass
def mainTimerFired(root, data):
data.snapshot = facial.getCameraSnapShot()
time = data.mainCounter * data.timerDelay # in milli seconds
if time % (data.mainWait * 1000) == 0:
print 98765467897654678654123123
emotion, prob = facial.getUserEmotion()# facial.EMO_SAD #
if emotion != facial.EMO_NOTFOUND or prob > 0.5:
data.mainEmotion = emotion
print data.mainEmotion
data.mainCounter = (data.mainCounter + 1) % 10000
def mainRedrawAll(root, canvas, data):
canvas.create_image((540,360), image=data.snapshot)
# implement figure on the left
if data.mainEmotion == facial.EMO_SAD:
canvas.create_image(data.center, image=data.sad)
if data.mainEmotion == facial.EMO_HAPPY:
canvas.create_image(data.center, image=data.happy)
if data.mainEmotion == facial.EMO_ANGRY:
canvas.create_image(data.center, image=data.angry)
########################################
# HAPPYS1
########################################
def happyS1Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def happyS1MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def happyS1KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def happyS1TimerFired(root, data):
pass
def happyS1RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# HAPPYS2
########################################
def happyS2Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def happyS2MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def happyS2KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def happyS2TimerFired(root, data):
pass
def happyS2RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# HAPPYS3
########################################
def happyS3Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
data.HAPPYS3flag = True
def happyS3MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
if data.HAPPYS3flag:
db.newBrowserTab("http://www.rottentomatoes.com")
data.HAPPYS3flag = False
def happyS3KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def happyS3TimerFired(root, data):
pass
def happyS3RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# SADS1
########################################
def sadS1Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def sadS1MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def sadS1KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def sadS1TimerFired(root, data):
pass
def sadS1RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# SADS2
########################################
def sadS2Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def sadS2MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def sadS2KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def sadS2TimerFired(root, data):
pass
def sadS2RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# SADS3
########################################
def sadS3Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
data.SADS3flag = True
def sadS3MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
if data.SADS3flag:
db.newBrowserTab("https://www.youtube.com/watch?v=Zwef7-CuZlg")
data.SADS3flag = False
def sadS3KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def sadS3TimerFired(root, data):
pass
def sadS3RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# ANGRYS1
########################################
def angryS1Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def angryS1MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def angryS1KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def angryS1TimerFired(root, data):
pass
def angryS1RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# angryS2
########################################
def angryS2Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def angryS2MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def angryS2KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def angryS2TimerFired(root, data):
pass
def angryS2RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# angryS3
########################################
def angryS3Init(data):
exec("data.%sbg = ImageTk.PhotoImage(file=data.utilPicPath+'%s.png')"
% (data.mode, data.mode))
def angryS3MousePressed(root, event, data):
for button in data.utilButtonList:
if button.inBounds(event.x, event.y):
button.function(root,data)
def angryS3KeyPressed(root, event, data):
if event.keysym == "Escape":
data.button.BACK.function(root,data)
def angryS3TimerFired(root, data):
pass
def angryS3RedrawAll(root, canvas, data):
canvas.create_image(data.center,image=eval("data.%sbg" % data.mode))
########################################
# Run Function
########################################
def run(width=300, height=300):
def mousePressedWrapper(root, event, canvas, data):
if not data.terminate:
mousePressed(root, event, data)
redrawAllWrapper(root, canvas, data)
def keyPressedWrapper(root, event, canvas, data):
if not data.terminate:
keyPressed(root, event, data)
redrawAllWrapper(root, canvas, data)
def redrawAllWrapper(root, canvas, data):
if not data.terminate:
canvas.delete(ALL)
redrawAll(root, canvas, data)
canvas.update()
def timerFiredWrapper(root, canvas, data):
if not data.terminate:
timerFired(root, data)
redrawAllWrapper(root, canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, root, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
init(data)
# create the root and the canvas
root = Tk()
root.title("Outside In")
initModes(data)
global canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
loadingFace = ImageTk.PhotoImage(Image.open(data.utilPicPath+"NAME.png"))
canvas.delete(ALL)
canvas.create_image(data.center,image=loadingFace)
canvas.update()
train()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(root, event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(root, event, canvas, data))
timerFiredWrapper(root, canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("Exited.")
if __name__ == '__main__':
run(1080,720)
|
|
import warnings
from functools import partial
from typing import Any, Callable, List, Optional, Sequence
import torch
from torch import nn, Tensor
from .._internally_replaced_utils import load_state_dict_from_url
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
from ..utils import _log_api_usage_once
from ._utils import _make_divisible
__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
model_urls = {
"mobilenet_v3_large": "https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
"mobilenet_v3_small": "https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
}
class SqueezeExcitation(SElayer):
"""DEPRECATED"""
def __init__(self, input_channels: int, squeeze_factor: int = 4):
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
super().__init__(input_channels, squeeze_channels, scale_activation=nn.Hardsigmoid)
self.relu = self.activation
delattr(self, "activation")
warnings.warn(
"This SqueezeExcitation class is deprecated since 0.12 and will be removed in 0.14. "
"Use torchvision.ops.SqueezeExcitation instead.",
FutureWarning,
)
class InvertedResidualConfig:
# Stores information listed at Tables 1 and 2 of the MobileNetV3 paper
def __init__(
self,
input_channels: int,
kernel: int,
expanded_channels: int,
out_channels: int,
use_se: bool,
activation: str,
stride: int,
dilation: int,
width_mult: float,
):
self.input_channels = self.adjust_channels(input_channels, width_mult)
self.kernel = kernel
self.expanded_channels = self.adjust_channels(expanded_channels, width_mult)
self.out_channels = self.adjust_channels(out_channels, width_mult)
self.use_se = use_se
self.use_hs = activation == "HS"
self.stride = stride
self.dilation = dilation
@staticmethod
def adjust_channels(channels: int, width_mult: float):
return _make_divisible(channels * width_mult, 8)
class InvertedResidual(nn.Module):
# Implemented as described at section 5 of MobileNetV3 paper
def __init__(
self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module] = partial(SElayer, scale_activation=nn.Hardsigmoid),
):
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU
# expand
if cnf.expanded_channels != cnf.input_channels:
layers.append(
Conv2dNormActivation(
cnf.input_channels,
cnf.expanded_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
# depthwise
stride = 1 if cnf.dilation > 1 else cnf.stride
layers.append(
Conv2dNormActivation(
cnf.expanded_channels,
cnf.expanded_channels,
kernel_size=cnf.kernel,
stride=stride,
dilation=cnf.dilation,
groups=cnf.expanded_channels,
norm_layer=norm_layer,
activation_layer=activation_layer,
)
)
if cnf.use_se:
squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
layers.append(se_layer(cnf.expanded_channels, squeeze_channels))
# project
layers.append(
Conv2dNormActivation(
cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
)
)
self.block = nn.Sequential(*layers)
self.out_channels = cnf.out_channels
self._is_cn = cnf.stride > 1
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result += input
return result
class MobileNetV3(nn.Module):
def __init__(
self,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
num_classes: int = 1000,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.2,
**kwargs: Any,
) -> None:
"""
MobileNet V3 main class
Args:
inverted_residual_setting (List[InvertedResidualConfig]): Network structure
last_channel (int): The number of channels on the penultimate layer
num_classes (int): Number of classes
block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once(self)
if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])
):
raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]")
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
layers: List[nn.Module] = []
# building first layer
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers.append(
Conv2dNormActivation(
3,
firstconv_output_channels,
kernel_size=3,
stride=2,
norm_layer=norm_layer,
activation_layer=nn.Hardswish,
)
)
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.append(block(cnf, norm_layer))
# building last several layers
lastconv_input_channels = inverted_residual_setting[-1].out_channels
lastconv_output_channels = 6 * lastconv_input_channels
layers.append(
Conv2dNormActivation(
lastconv_input_channels,
lastconv_output_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Hardswish,
)
)
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(lastconv_output_channels, last_channel),
nn.Hardswish(inplace=True),
nn.Dropout(p=dropout, inplace=True),
nn.Linear(last_channel, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _mobilenet_v3_conf(
arch: str, width_mult: float = 1.0, reduced_tail: bool = False, dilated: bool = False, **kwargs: Any
):
reduce_divider = 2 if reduced_tail else 1
dilation = 2 if dilated else 1
bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult)
if arch == "mobilenet_v3_large":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, False, "RE", 1, 1),
bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1
bneck_conf(24, 3, 72, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3
bneck_conf(80, 3, 200, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 480, 112, True, "HS", 1, 1),
bneck_conf(112, 3, 672, 112, True, "HS", 1, 1),
bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1280 // reduce_divider) # C5
elif arch == "mobilenet_v3_small":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1
bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2
bneck_conf(24, 3, 88, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 120, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 144, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1024 // reduce_divider) # C5
else:
raise ValueError(f"Unsupported model type {arch}")
return inverted_residual_setting, last_channel
def _mobilenet_v3(
arch: str,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
pretrained: bool,
progress: bool,
**kwargs: Any,
):
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError(f"No checkpoint is available for model type {arch}")
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3:
"""
Constructs a large MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_large"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)
return _mobilenet_v3(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs)
def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3:
"""
Constructs a small MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_small"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)
return _mobilenet_v3(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs)
|
|
import typing as t
from . import nodes
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(
nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(
node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols:
def __init__(
self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level: int = level
self.parent = parent
self.refs: t.Dict[str, str] = {}
self.loads: t.Dict[str, t.Any] = {}
self.stores: t.Set[str] = set()
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(
self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
) -> str:
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target: str) -> t.Optional[t.Any]:
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
return None
def find_ref(self, name: str) -> t.Optional[str]:
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
return None
def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self) -> "Symbols":
rv = t.cast(Symbols, object.__new__(self.__class__))
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name: str) -> None:
if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
stores: t.Dict[str, int] = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name) # type: ignore
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self) -> t.Dict[str, str]:
rv: t.Dict[str, str] = {}
node: t.Optional["Symbols"] = self
while node is not None:
for name in sorted(node.stores):
if name not in rv:
rv[name] = self.find_ref(name) # type: ignore
node = node.parent
return rv
def dump_param_targets(self) -> t.Set[str]:
rv = set()
node: t.Optional["Symbols"] = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = _simple_visit
visit_Block = _simple_visit
visit_Macro = _simple_visit
visit_FilterBlock = _simple_visit
visit_Scope = _simple_visit
visit_If = _simple_visit
visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
if branch:
for item in branch:
self.sym_visitor.visit(item)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols: "Symbols") -> None:
self.symbols = symbols
def visit_Name(
self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
) -> None:
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
self.symbols.load(node.name)
def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
self.symbols.store(node.name)
def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
self.visit(node.filter, **kwargs)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
"""Stop visiting at scopes."""
def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
"""Do not visit into overlay scopes."""
|
|
"""The tests for the Media group platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.group import DOMAIN
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_VOLUME_LEVEL,
DOMAIN as MEDIA_DOMAIN,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_SEEK,
SERVICE_PLAY_MEDIA,
SERVICE_SHUFFLE_SET,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_SET,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_MUTED,
SERVICE_CLEAR_PLAYLIST,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
@pytest.fixture(name="mock_media_seek")
def media_player_media_seek_fixture():
"""Mock demo YouTube player media seek."""
with patch(
"homeassistant.components.demo.media_player.DemoYoutubePlayer.media_seek",
autospec=True,
) as seek:
yield seek
async def test_default_state(hass):
"""Test media group default state."""
hass.states.async_set("media_player.player_1", "on")
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: {
"platform": DOMAIN,
"entities": ["media_player.player_1", "media_player.player_2"],
"name": "Media group",
"unique_id": "unique_identifier",
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state is not None
assert state.state == STATE_ON
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes.get(ATTR_ENTITY_ID) == [
"media_player.player_1",
"media_player.player_2",
]
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("media_player.media_group")
assert entry
assert entry.unique_id == "unique_identifier"
async def test_state_reporting(hass):
"""Test the state reporting."""
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: {
"platform": DOMAIN,
"entities": ["media_player.player_1", "media_player.player_2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_UNKNOWN
hass.states.async_set("media_player.player_1", STATE_ON)
hass.states.async_set("media_player.player_2", STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_ON
hass.states.async_set("media_player.player_1", STATE_ON)
hass.states.async_set("media_player.player_2", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_ON
hass.states.async_set("media_player.player_1", STATE_OFF)
hass.states.async_set("media_player.player_2", STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_OFF
hass.states.async_set("media_player.player_1", STATE_UNAVAILABLE)
hass.states.async_set("media_player.player_2", STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_UNAVAILABLE
async def test_supported_features(hass):
"""Test supported features reporting."""
pause_play_stop = SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP
play_media = SUPPORT_PLAY_MEDIA
volume = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: {
"platform": DOMAIN,
"entities": ["media_player.player_1", "media_player.player_2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"media_player.player_1", STATE_ON, {ATTR_SUPPORTED_FEATURES: 0}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
hass.states.async_set(
"media_player.player_1",
STATE_ON,
{ATTR_SUPPORTED_FEATURES: pause_play_stop},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == pause_play_stop
hass.states.async_set(
"media_player.player_2",
STATE_OFF,
{ATTR_SUPPORTED_FEATURES: play_media | volume},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== pause_play_stop | play_media | volume
)
hass.states.async_set(
"media_player.player_2", STATE_OFF, {ATTR_SUPPORTED_FEATURES: play_media}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == pause_play_stop | play_media
async def test_service_calls(hass, mock_media_seek):
"""Test service calls."""
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: [
{"platform": "demo"},
{
"platform": DOMAIN,
"entities": [
"media_player.bedroom",
"media_player.kitchen",
"media_player.living_room",
],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.media_group").state == STATE_PLAYING
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_OFF
assert hass.states.get("media_player.kitchen").state == STATE_OFF
assert hass.states.get("media_player.living_room").state == STATE_OFF
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_PLAYING
assert hass.states.get("media_player.kitchen").state == STATE_PLAYING
assert hass.states.get("media_player.living_room").state == STATE_PLAYING
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_PAUSE,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_PAUSED
assert hass.states.get("media_player.kitchen").state == STATE_PAUSED
assert hass.states.get("media_player.living_room").state == STATE_PAUSED
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_PLAY,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_PLAYING
assert hass.states.get("media_player.kitchen").state == STATE_PLAYING
assert hass.states.get("media_player.living_room").state == STATE_PLAYING
# ATTR_MEDIA_TRACK is not supported by bedroom and living_room players
assert hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_TRACK] == 1
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_TRACK] == 2
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_PREVIOUS_TRACK,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_TRACK] == 1
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: "media_player.media_group",
ATTR_MEDIA_CONTENT_TYPE: "some_type",
ATTR_MEDIA_CONTENT_ID: "some_id",
},
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_CONTENT_ID]
== "some_id"
)
# media_player.kitchen is skipped because it always returns "bounzz-1"
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_CONTENT_ID]
== "some_id"
)
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] & SUPPORT_SEEK
assert not mock_media_seek.called
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_SEEK,
{
ATTR_ENTITY_ID: "media_player.media_group",
ATTR_MEDIA_SEEK_POSITION: 100,
},
)
await hass.async_block_till_done()
assert mock_media_seek.called
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 1
)
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: "media_player.media_group",
ATTR_MEDIA_VOLUME_LEVEL: 0.5,
},
blocking=True,
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_VOLUME_UP,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.6
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.6
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.6
)
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_VOLUME_DOWN,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_LEVEL]
== 0.5
)
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_MUTED]
is False
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_MUTED]
is False
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_MUTED]
is False
)
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_VOLUME_MUTE,
{ATTR_ENTITY_ID: "media_player.media_group", ATTR_MEDIA_VOLUME_MUTED: True},
blocking=True,
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_VOLUME_MUTED]
is True
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_VOLUME_MUTED]
is True
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_VOLUME_MUTED]
is True
)
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_SHUFFLE] is False
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_SHUFFLE] is False
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_SHUFFLE]
is False
)
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_SHUFFLE_SET,
{ATTR_ENTITY_ID: "media_player.media_group", ATTR_MEDIA_SHUFFLE: True},
blocking=True,
)
await hass.async_block_till_done()
assert (
hass.states.get("media_player.bedroom").attributes[ATTR_MEDIA_SHUFFLE] is True
)
assert (
hass.states.get("media_player.kitchen").attributes[ATTR_MEDIA_SHUFFLE] is True
)
assert (
hass.states.get("media_player.living_room").attributes[ATTR_MEDIA_SHUFFLE]
is True
)
assert hass.states.get("media_player.bedroom").state == STATE_PLAYING
assert hass.states.get("media_player.kitchen").state == STATE_PLAYING
assert hass.states.get("media_player.living_room").state == STATE_PLAYING
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_CLEAR_PLAYLIST,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
# SERVICE_CLEAR_PLAYLIST is not supported by bedroom and living_room players
assert hass.states.get("media_player.kitchen").state == STATE_OFF
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_PLAY,
{ATTR_ENTITY_ID: "media_player.kitchen"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_PLAYING
assert hass.states.get("media_player.kitchen").state == STATE_PLAYING
assert hass.states.get("media_player.living_room").state == STATE_PLAYING
await hass.services.async_call(
MEDIA_DOMAIN,
SERVICE_MEDIA_STOP,
{ATTR_ENTITY_ID: "media_player.media_group"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("media_player.bedroom").state == STATE_OFF
assert hass.states.get("media_player.kitchen").state == STATE_OFF
assert hass.states.get("media_player.living_room").state == STATE_OFF
async def test_nested_group(hass):
"""Test nested media group."""
hass.states.async_set("media_player.player_1", "on")
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: [
{
"platform": DOMAIN,
"entities": ["media_player.group_1"],
"name": "Nested Group",
},
{
"platform": DOMAIN,
"entities": ["media_player.player_1", "media_player.player_2"],
"name": "Group 1",
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("media_player.group_1")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ENTITY_ID) == [
"media_player.player_1",
"media_player.player_2",
]
state = hass.states.get("media_player.nested_group")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ENTITY_ID) == ["media_player.group_1"]
|
|
"""
The following file creates and executes models with SIR structure. It
then graphs the results and presents the models" underlying
compartmental structure.
Although the structure of the model are simple and widely accepted,
the specific parameter values are taken from the following text: "An
Introduction to Infectious Disease Modelling" by Emilia Vynnycky and
Richard G White available at
http://www.anintroductiontoinfectiousdiseasemodelling.com/ with Excel-
based model solutions.
It uses methods from the BaseModel class in the basepop.py file from
this module (one directory above) to create the model objects for SIR
and SEIR models.
The purpose of this file is to present examples of how such models can
be built in Python within this popdynamics module. Specifically, the
user should note how inherited methods from BaseModel are used to
ensure processes such as compartment initiation and setting of flows
(entry, transfer and exit) are performed correctly.
Suggestion to get started:
- Adjust some parameters within the dictionaries of parameter values
in infection_param_dictionaries in line 317 and observe how model
outputs change.
- Try adapting the SEIR model without demography to an SEIS model, by
removing the recovered compartment and changing the recovery
transition to move patients from infectious to susceptible (rather
than recovered).
"""
from __future__ import print_function
from __future__ import division
from builtins import zip
from past.utils import old_div
# # hack to allow basepop to be loaded from the parent directory
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import basepop
import pylab
import matplotlib
# Create the SIR Model Object
class SirModel(basepop.BaseModel):
"""
Based on the SIR models from Vynnycky and White Chapter 2
and the corresponding on-line Excel difference equation-based
models for measles and for flu.
"""
def __init__(self, params={}):
"""
In the initialization, params that are epidemiological
meaningful are converted to parameters that can be
expressed as coefficients in the resulting ODE.
:param params: e.g. {
"population": 500,
"start_infectious": 1.,
"r0": 2.,
"duration_preinfectious": 2.,
"duration_infectious": 2.,
"life_expectancy": 70. * 365.
}
"""
basepop.BaseModel.__init__(self)
default_params = {
"population": 50,
"start_infectious": 1.,
"r0": 12.,
"duration_preinfectious": 8.,
"duration_infectious": 7.,
"life_expectancy": 70. * 365.
}
for key, value in default_params.items():
self.set_param(key, value)
for key, value in params.items():
self.set_param(key, value)
# define compartments and set their starting values
self.set_compartment(
"susceptible",
self.params["population"] - self.params["start_infectious"])
self.set_compartment(
"infectious",
self.params["start_infectious"])
self.set_compartment(
"immune", 0.)
# set model parameters that can be refered to
# by `self.set_fixed_transfer_rate_flow` and
# by var calculations
self.set_param(
"infection_beta",
self.params["r0"] /
(self.params["duration_infectious"] * self.params["population"]))
self.set_param(
"infection_rate_recover",
1. / self.params["duration_infectious"])
def set_flows(self):
"""
Connects up compartments in the flow diagram of disease
progression. Each flow between two compartment refers
to either a fixed param or a var. The values of a var
in the calculation will be calculated in `self.calculate_vars`
for each time-point.
"""
# set variable infection transition flow, the rate refers
# to values in self.vars, which are recalculated at every
# time-step in self.calculate_vars.
self.set_var_transfer_rate_flow(
"susceptible", "infectious", "rate_force")
# set fixed inter-compartmental flows, the rate refers
# to values in self.params, which are fixed
self.set_fixed_transfer_rate_flow(
"infectious", "immune", "infection_rate_recover")
def calculate_vars(self):
"""
Calculates variables for every time-point in the simulation.
These can be used as rates for dynamic transmission
in var transfer flows.
"""
# track total population size
self.vars["population"] = sum(self.compartments.values())
# force of infection, infection_beta is derived from R0 in __init__
self.vars["rate_force"] = \
self.params["infection_beta"] * \
self.compartments["infectious"]
def calculate_diagnostic_vars(self):
"""
Calculates diagnostic variables at the end of the simulation.
These are only calculated for the specified time-points, using
cached values of the compartments from the simulation run.
"""
# calculate incidence
self.vars["incidence"] = 0.
for from_label, to_label, rate in self.var_transfer_rate_flows:
val = self.compartments[from_label] * self.vars[rate]
if "infectious" in to_label:
self.vars["incidence"] += old_div(val, self.vars["population"])
# calculate prevalence
self.vars["prevalence"] = \
old_div(self.compartments["infectious"], self.vars["population"])
# Plotting functions for the model
def plot_overlays(times, solutions, ylabel, title, png):
"""
:param times: list of [Float]
:param solutions: list of ["key": Array(Float)]
:param png: string
"""
colors = []
for name in "bgrykcm":
rgb = matplotlib.colors.colorConverter.to_rgb(name)
if len(solutions) > 1:
rgba = list(rgb) + [0.1]
colors.append(rgba)
else:
colors.append(rgb)
pylab.clf()
y_max = 0
for i_soln, soln in enumerate(solutions):
for i_key, key in enumerate(soln):
y_vals = soln[key]
color = colors[i_key % len(colors)]
if i_soln == 0:
# generate a fake dot so that legend can extract color/label
pylab.plot([0], [0], label=key, color=color[:3])
pylab.plot(times, y_vals, linewidth=2, color=color)
y_max = max(max(y_vals), y_max)
pylab.ylim([0, y_max * 1.1])
pylab.legend()
pylab.ylabel(ylabel)
pylab.title(title)
pylab.savefig(png)
def plot_epidemiological_indicators(models, png):
indicators = ["incidence", "prevalence"]
solutions = []
for model in models:
solution = {}
for indicator in indicators:
solution[indicator] = model.get_var_soln(indicator)
solutions.append(solution)
times = model.target_times
plot_overlays(
times, solutions, "per day (except prevalence), per person",
"Indicators", png)
def plot_rn(models, png):
solutions = []
for model in models:
r0 = model.params["r0"]
times = model.target_times
susceptibles = model.get_compartment_soln("susceptible")
populations = model.get_var_soln("population")
proportions = [old_div(i, j) for i, j in zip(susceptibles, populations)]
solution = {"Rn": [p * r0 for p in proportions]}
solutions.append(solution)
plot_overlays(times, solutions, "", "Rn", png)
def plot_populations(models, png):
solutions = []
for model in models:
solution = {}
for key in model.compartments.keys():
solution[key] = model.get_compartment_soln(key)
solutions.append(solution)
plot_overlays(models[0].target_times, solutions, "persons", "Populations", png)
def generate_output(models, out_dir, modifier):
models[0].make_flow_diagram_png(
os.path.join(out_dir, "flow_diagram"))
plot_epidemiological_indicators(
models, os.path.join(out_dir, modifier + "_indicators.png"))
plot_populations(
models, os.path.join(out_dir, modifier + "_compartment_sizes.png"))
plot_rn(
models, os.path.join(out_dir, modifier + "_rn.png"))
# The main routine
out_dir = "sir_graphs"
basepop.ensure_out_dir(out_dir)
model = SirModel()
model.make_times(0, 50, 1)
model.integrate()
generate_output([model], out_dir, 'explicit')
# stochastic continuous-time models
n_replica = 20
models = []
for i in range(n_replica):
new_model = SirModel()
new_model.make_times(0, 50, 1)
new_model.integrate('continuous_time_stochastic')
models.append(new_model)
generate_output(models, out_dir, "discrete_stochastic")
# stochastic discrete-time models
n_replica = 20
models = []
for i in range(n_replica):
new_model = SirModel()
new_model.make_times(0, 50, 1)
new_model.integrate('discrete_time_stochastic')
models.append(new_model)
generate_output(models, out_dir, "continuous_stochastic")
basepop.open_pngs_in_dir(out_dir)
|
|
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import json
import structlog
from twisted.internet import reactor, defer
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
from voltha.protos.device_pb2 import Device
from adtran_olt_handler import AdtranOltHandler
from net.adtran_rest import RestInvalidResponseCode
_MAX_EXPEDITE_COUNT = 5
_EXPEDITE_SECS = 2
_HW_SYNC_SECS = 60
class Onu(object):
"""
Wraps an ONU
"""
DEFAULT_PASSWORD = ''
def __init__(self, onu_info):
self._onu_id = onu_info['onu-id']
if self._onu_id is None:
raise ValueError('No ONU ID available')
pon = onu_info['pon']
self._olt = pon.olt
self._pon_id = pon.pon_id
self._name = '{}@{}'.format(pon.physical_port_name, self._onu_id)
self.log = structlog.get_logger(pon_id=self._pon_id, onu_id=self._onu_id)
self._valid = True # Set false during delete/cleanup
self._serial_number_base64 = Onu.string_to_serial_number(onu_info['serial-number'])
self._serial_number_string = onu_info['serial-number']
self._device_id = onu_info['device-id']
self._password = onu_info['password']
self._created = False
self._proxy_address = Device.ProxyAddress(device_id=self.olt.device_id,
channel_id=self.olt.pon_id_to_port_number(self._pon_id),
onu_id=self._onu_id,
onu_session_id=self._onu_id)
self._sync_tick = _HW_SYNC_SECS
self._expedite_sync = False
self._expedite_count = 0
self._resync_flows = False
self._sync_deferred = None # For sync of ONT config to hardware
self._gem_ports = {} # gem-id -> GemPort
self._tconts = {} # alloc-id -> TCont
self._uni_ports = onu_info['uni-ports']
# Provisionable items
self._enabled = onu_info['enabled']
self._upstream_fec_enable = onu_info.get('upstream-fec')
# KPI related items
self._rssi = -9999
self._equalization_delay = 0
self._fiber_length = 0
self._timestamp = None # Last time the KPI items were updated
def __str__(self):
return "ONU-{}:{}, SN: {}/{}".format(self._pon_id, self._onu_id,
self._serial_number_string, self._serial_number_base64)
@staticmethod
def serial_number_to_string(value):
sval = base64.decodestring(value)
unique = [elem.encode("hex") for elem in sval[4:8]]
return '{}{}{}{}{}'.format(sval[:4], unique[0], unique[1], unique[2], unique[3]).upper()
@staticmethod
def string_to_serial_number(value):
bvendor = [octet for octet in value[:4]]
bunique = [binascii.a2b_hex(value[offset:offset + 2]) for offset in xrange(4, 12, 2)]
bvalue = ''.join(bvendor + bunique)
return base64.b64encode(bvalue)
@property
def olt(self):
return self._olt
@property
def pon(self):
return self.olt.southbound_ports[self._pon_id]
@property
def intf_id(self):
return self.pon.intf_id
@property
def pon_id(self):
return self._pon_id
@property
def onu_id(self):
return self._onu_id
@property
def device_id(self):
return self._device_id
@property
def name(self):
return self._name
@property
def upstream_fec_enable(self):
return self._upstream_fec_enable
@upstream_fec_enable.setter
def upstream_fec_enable(self, value):
assert isinstance(value, bool), 'upstream FEC enabled is a boolean'
if self._upstream_fec_enable != value:
self._upstream_fec_enable = value
# Recalculate PON upstream FEC
self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
@property
def password(self):
"""
Get password. Base 64 format
"""
return self._password
@password.setter
def password(self, value):
"""
Set the password
:param value: (str) base 64 encoded value
"""
if self._password is None and value is not None:
self._password = value
reg_id = (value.decode('base64')).rstrip('\00').lstrip('\00')
# Must remove any non-printable characters
reg_id = ''.join([i if 127 > ord(i) > 31 else '_' for i in reg_id])
# Generate alarm here for regID
from voltha.extensions.alarms.onu.onu_active_alarm import OnuActiveAlarm
self.log.info('onu-Active-Alarm', serial_number=self._serial_number_string)
device = self._olt.adapter_agent.get_device(self._olt.device_id)
OnuActiveAlarm(self._olt.alarms, self._olt.device_id, self._pon_id,
self._serial_number_string, reg_id, device.serial_number,
ipv4_address=device.ipv4_address).raise_alarm()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
if self._enabled != value:
self._enabled = value
self._resync_flows = True
self.set_config('enable', self._enabled)
if self._enabled:
self.start()
else:
self.stop()
# Recalculate PON upstream FEC
self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
@property
def uni_ports(self):
return self._uni_ports
@property
def logical_port(self):
"""Return the logical PORT number of this ONU's UNI"""
# TODO: once we support multiple UNIs, this needs to be revisited
return self._uni_ports[0]
@property
def gem_ports(self):
return self._gem_ports.values()
@property
def proxy_address(self):
return self._proxy_address
@property
def serial_number_64(self):
return self._serial_number_base64
@property
def serial_number(self):
return self._serial_number_string
@property
def timestamp(self):
# Last time the KPI items were updated
return self._timestamp
@timestamp.setter
def timestamp(self, value):
self._timestamp = value
@property
def rssi(self):
"""The received signal strength indication of the ONU"""
return self._rssi
@rssi.setter
def rssi(self, value):
if self._rssi != value:
self._rssi = value
# TODO: Notify anyone?
@property
def equalization_delay(self):
"""Equalization delay (bits)"""
return self._equalization_delay
@equalization_delay.setter
def equalization_delay(self, value):
if self._equalization_delay != value:
self._equalization_delay = value
# TODO: Notify anyone?
@property
def fiber_length(self):
"""Distance to ONU"""
return self._fiber_length
@fiber_length.setter
def fiber_length(self, value):
if self._fiber_length != value:
self._fiber_length = value
# TODO: Notify anyone?
def _cancel_deferred(self):
d, self._sync_deferred = self._sync_deferred, None
if d is not None and not d.called:
try:
d.cancel()
except Exception:
pass
@inlineCallbacks
def create(self, reflow=False):
"""
Create (or reflow) this ONU to hardware
:param reflow: (boolean) Flag, if True, indicating if this is a reflow ONU
information after an unmanaged OLT hardware reboot
"""
self.log.debug('create', reflow=reflow)
self._cancel_deferred()
data = json.dumps({'onu-id': self._onu_id,
'serial-number': self._serial_number_base64,
'enable': self._enabled})
uri = AdtranOltHandler.GPON_ONU_CONFIG_LIST_URI.format(self._pon_id)
name = 'onu-create-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
self._serial_number_base64, self._enabled)
first_sync = self._sync_tick if self._created else 5
if not self._created or reflow:
try:
yield self.olt.rest_client.request('POST', uri, data=data, name=name)
self._created = True
except Exception as e:
self.log.exception('onu-create', e=e)
# See if it failed due to already being configured
url = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
url += '/serial-number'
try:
results = yield self.olt.rest_client.request('GET', uri, name=name)
self.log.debug('onu-create-check', results=results)
if len(results) == 1 and results[0].get('serial-number', '') != self._serial_number_base64:
self._created = True
except Exception as _e:
self.log.warn('onu-exists-check', pon_id=self.pon_id, onu_id=self.onu_id,
serial_number=self.serial_number)
self._sync_deferred = reactor.callLater(first_sync, self._sync_hardware)
# Recalculate PON upstream FEC
self.pon.upstream_fec_enable = self.pon.any_upstream_fec_enabled
returnValue('created')
@inlineCallbacks
def delete(self):
"""
Clean up ONU (gems/tconts). ONU removal from OLT h/w done by PonPort
:return: (deferred)
"""
self._valid = False
self._cancel_deferred()
# Remove from H/W
gem_ids = self._gem_ports.keys()
alloc_ids = self._tconts.keys()
dl = []
for gem_id in gem_ids:
dl.append(self.remove_gem_id(gem_id))
try:
yield defer.gatherResults(dl, consumeErrors=True)
except Exception as _e:
pass
dl = []
for alloc_id in alloc_ids:
dl.append(self.remove_tcont(alloc_id))
try:
yield defer.gatherResults(dl, consumeErrors=True)
except Exception as _e:
pass
self._gem_ports.clear()
self._tconts.clear()
olt, self._olt = self._olt, None
uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
name = 'onu-delete-{}-{}-{}: {}'.format(self._pon_id, self._onu_id,
self._serial_number_base64, self._enabled)
try:
yield olt.rest_client.request('DELETE', uri, name=name)
except RestInvalidResponseCode as e:
if e.code != 404:
self.log.exception('onu-delete', e=e)
except Exception as e:
self.log.exception('onu-delete', e=e)
# Release resource manager resources for this ONU
pon_intf_id_onu_id = (self.pon_id, self.onu_id)
olt.resource_mgr.free_pon_resources_for_onu(pon_intf_id_onu_id)
returnValue('deleted')
def start(self):
self._cancel_deferred()
self._sync_deferred = reactor.callLater(0, self._sync_hardware)
def stop(self):
self._cancel_deferred()
def restart(self):
if not self._valid:
return succeed('Deleting')
self._cancel_deferred()
self._sync_deferred = reactor.callLater(0, self._sync_hardware)
return self.create()
def _sync_hardware(self):
from codec.olt_config import OltConfig
self.log.debug('sync-hardware')
def read_config(results):
self.log.debug('read-config', results=results)
dl = []
try:
config = OltConfig.Pon.Onu.decode([results])
assert self.onu_id in config, 'sync-onu-not-found-{}'.format(self.onu_id)
config = config[self.onu_id]
if self._enabled != config.enable:
dl.append(self.set_config('enable', self._enabled))
if self.serial_number_64 != config.serial_number_64:
dl.append(self.set_config('serial-number', self.serial_number_64))
if self._enabled:
# Sync TCONTs if everything else in sync
if len(dl) == 0:
dl.extend(sync_tconts(config.tconts))
# Sync GEM Ports if everything else in sync
if len(dl) == 0:
dl.extend(sync_gem_ports(config.gem_ports))
if len(dl) == 0:
sync_flows()
except Exception as e:
self.log.exception('hw-sync-read-config', e=e)
# Run h/w sync again a bit faster if we had to sync anything
self._expedite_sync = len(dl) > 0
# TODO: do checks
return defer.gatherResults(dl, consumeErrors=True)
def sync_tconts(hw_tconts):
hw_alloc_ids = frozenset(hw_tconts.iterkeys())
my_alloc_ids = frozenset(self._tconts.iterkeys())
dl = []
try:
extra_alloc_ids = hw_alloc_ids - my_alloc_ids
dl.extend(sync_delete_extra_tconts(extra_alloc_ids))
missing_alloc_ids = my_alloc_ids - hw_alloc_ids
dl.extend(sync_add_missing_tconts(missing_alloc_ids))
matching_alloc_ids = my_alloc_ids & hw_alloc_ids
matching_hw_tconts = {alloc_id: tcont
for alloc_id, tcont in hw_tconts.iteritems()
if alloc_id in matching_alloc_ids}
dl.extend(sync_matching_tconts(matching_hw_tconts))
except Exception as e2:
self.log.exception('hw-sync-tconts', e=e2)
return dl
def sync_delete_extra_tconts(alloc_ids):
return [self.remove_tcont(alloc_id) for alloc_id in alloc_ids]
def sync_add_missing_tconts(alloc_ids):
return [self.add_tcont(self._tconts[alloc_id], reflow=True) for alloc_id in alloc_ids]
def sync_matching_tconts(hw_tconts):
from xpon.traffic_descriptor import TrafficDescriptor
dl = []
# TODO: sync TD & Best Effort. Only other TCONT leaf is the key
for alloc_id, hw_tcont in hw_tconts.iteritems():
my_tcont = self._tconts[alloc_id]
my_td = my_tcont.traffic_descriptor
hw_td = hw_tcont.traffic_descriptor
if my_td is None:
continue
my_additional = TrafficDescriptor.AdditionalBwEligibility.\
to_string(my_td.additional_bandwidth_eligibility)
reflow = hw_td is None or \
my_td.fixed_bandwidth != hw_td.fixed_bandwidth or \
my_td.assured_bandwidth != hw_td.assured_bandwidth or \
my_td.maximum_bandwidth != hw_td.maximum_bandwidth or \
my_additional != hw_td.additional_bandwidth_eligibility
if not reflow and \
my_td.additional_bandwidth_eligibility == \
TrafficDescriptor.AdditionalBwEligibility.BEST_EFFORT_SHARING and \
my_td.best_effort is not None:
hw_be = hw_td.best_effort
my_be = my_td.best_effort
reflow = hw_be is None or \
my_be.bandwidth != hw_be.bandwidth or \
my_be.priority != hw_be.priority or \
my_be.weight != hw_be.weight
if reflow:
dl.append(my_tcont.add_to_hardware(self.olt.rest_client))
return dl
def sync_gem_ports(hw_gem_ports):
hw_gems_ids = frozenset(hw_gem_ports.iterkeys())
my_gems_ids = frozenset(self._gem_ports.iterkeys())
dl = []
try:
extra_gems_ids = hw_gems_ids - my_gems_ids
dl.extend(sync_delete_extra_gem_ports(extra_gems_ids))
missing_gem_ids = my_gems_ids - hw_gems_ids
dl.extend(sync_add_missing_gem_ports(missing_gem_ids))
matching_gem_ids = my_gems_ids & hw_gems_ids
matching_hw_gem_ports = {gem_id: gem_port
for gem_id, gem_port in hw_gem_ports.iteritems()
if gem_id in matching_gem_ids}
dl.extend(sync_matching_gem_ports(matching_hw_gem_ports))
self._resync_flows |= len(dl) > 0
except Exception as ex:
self.log.exception('hw-sync-gem-ports', e=ex)
return dl
def sync_delete_extra_gem_ports(gem_ids):
return [self.remove_gem_id(gem_id) for gem_id in gem_ids]
def sync_add_missing_gem_ports(gem_ids):
return [self.add_gem_port(self._gem_ports[gem_id], reflow=True)
for gem_id in gem_ids]
def sync_matching_gem_ports(hw_gem_ports):
dl = []
for gem_id, hw_gem_port in hw_gem_ports.iteritems():
gem_port = self._gem_ports[gem_id]
if gem_port.alloc_id != hw_gem_port.alloc_id or\
gem_port.encryption != hw_gem_port.encryption or\
gem_port.omci_transport != hw_gem_port.omci_transport:
dl.append(gem_port.add_to_hardware(self.olt.rest_client,
operation='PATCH'))
return dl
def sync_flows():
from flow.flow_entry import FlowEntry
reflow, self._resync_flows = self._resync_flows, False
return FlowEntry.sync_flows_by_onu(self, reflow=reflow)
def failure(_reason):
# self.log.error('hardware-sync-get-config-failed', reason=_reason)
pass
def reschedule(_):
import random
delay = self._sync_tick if self._enabled else 5 * self._sync_tick
# Speed up sequential resync a limited number of times if out of sync
# With 60 second initial an typical worst case resync of 4 times, this
# should resync an ONU and all it's gem-ports and tconts within <90 seconds
if self._expedite_sync and self._enabled:
self._expedite_count += 1
if self._expedite_count < _MAX_EXPEDITE_COUNT:
delay = _EXPEDITE_SECS
else:
self._expedite_count = 0
delay += random.uniform(-delay / 10, delay / 10)
self._sync_deferred = reactor.callLater(delay, self._sync_hardware)
self._expedite_sync = False
# If PON is not enabled, skip hw-sync. If ONU not enabled, do it but less
# frequently
if not self.pon.enabled:
return reschedule('not-enabled')
try:
self._sync_deferred = self._get_config()
self._sync_deferred.addCallbacks(read_config, failure)
self._sync_deferred.addBoth(reschedule)
except Exception as e:
self.log.exception('hw-sync-main', e=e)
return reschedule('sync-exception')
def _get_config(self):
uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self.onu_id)
name = 'pon-get-onu_config-{}-{}'.format(self._pon_id, self.onu_id)
return self.olt.rest_client.request('GET', uri, name=name)
def set_config(self, leaf, value):
self.log.debug('set-config', leaf=leaf, value=value)
data = json.dumps({leaf: value})
uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, self._onu_id)
name = 'onu-set-config-{}-{}-{}: {}'.format(self._pon_id, self._onu_id, leaf, value)
return self.olt.rest_client.request('PATCH', uri, data=data, name=name)
@property
def alloc_ids(self):
"""
Get alloc-id's of all T-CONTs
"""
return frozenset(self._tconts.keys())
@inlineCallbacks
def add_tcont(self, tcont, reflow=False):
"""
Creates/ a T-CONT with the given alloc-id
:param tcont: (TCont) Object that maintains the TCONT properties
:param reflow: (boolean) If true, force add (used during h/w resync)
:return: (deferred)
"""
if not self._valid:
returnValue('Deleting')
if not reflow and tcont.alloc_id in self._tconts:
returnValue('already created')
self.log.info('add', tcont=tcont, reflow=reflow)
self._tconts[tcont.alloc_id] = tcont
try:
results = yield tcont.add_to_hardware(self.olt.rest_client)
except Exception as e:
self.log.exception('tcont', tcont=tcont, reflow=reflow, e=e)
results = 'resync needed'
returnValue(results)
@inlineCallbacks
def remove_tcont(self, alloc_id):
tcont = self._tconts.get(alloc_id)
if tcont is None:
returnValue('nop')
del self._tconts[alloc_id]
try:
results = yield tcont.remove_from_hardware(self.olt.rest_client)
except RestInvalidResponseCode as e:
results = None
if e.code != 404:
self.log.exception('tcont-delete', e=e)
except Exception as e:
self.log.exception('delete', e=e)
raise
returnValue(results)
def gem_port(self, gem_id):
return self._gem_ports.get(gem_id)
def gem_ids(self, tech_profile_id):
"""Get all GEM Port IDs used by this ONU"""
assert tech_profile_id >= DEFAULT_TECH_PROFILE_TABLE_ID
return sorted([gem_id for gem_id, gem in self._gem_ports.items()
if not gem.multicast and
tech_profile_id == gem.tech_profile_id])
@inlineCallbacks
def add_gem_port(self, gem_port, reflow=False):
"""
Add a GEM Port to this ONU
:param gem_port: (GemPort) GEM Port to add
:param reflow: (boolean) If true, force add (used during h/w resync)
:return: (deferred)
"""
if not self._valid:
returnValue('Deleting')
if not reflow and gem_port.gem_id in self._gem_ports:
returnValue('nop')
self.log.info('add', gem_port=gem_port, reflow=reflow)
self._gem_ports[gem_port.gem_id] = gem_port
try:
results = yield gem_port.add_to_hardware(self.olt.rest_client)
except Exception as e:
self.log.exception('gem-port', gem_port=gem_port, reflow=reflow, e=e)
results = 'resync needed'
returnValue(results)
@inlineCallbacks
def remove_gem_id(self, gem_id):
gem_port = self._gem_ports.get(gem_id)
if gem_port is None:
returnValue('nop')
del self._gem_ports[gem_id]
try:
yield gem_port.remove_from_hardware(self.olt.rest_client)
except RestInvalidResponseCode as e:
if e.code != 404:
self.log.exception('onu-delete', e=e)
except Exception as ex:
self.log.exception('gem-port-delete', e=ex)
raise
returnValue('done')
@staticmethod
def gem_id_to_gvid(gem_id):
"""Calculate GEM VID (gvid) for a given GEM port id"""
return gem_id - 2048
|
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bdb import Bdb, Breakpoint, checkfuncname, BdbQuit
from contextlib import contextmanager
from functools import partial
import json
from pprint import pformat
import signal
import sys
import traceback
from uuid import uuid4
from logbook import Logger, FileHandler
from qdb.comm import TerminalCommandManager, fmt_msg
from qdb.compat import items, ExitStack, StringIO
from qdb.config import QdbConfig
from qdb.errors import (
QdbUnreachableBreakpoint,
QdbQuit,
QdbExecutionTimeout,
QdbPrognEndsInStatement,
)
from qdb.output import RemoteOutput, OutputTee
from qdb.utils import (
Timeout,
default_eval_fn,
default_exception_serializer,
progn,
)
log = Logger('Qdb')
class BoundCmdManager(object):
"""
Binds the tracer to the first argument of all the methods of the
command manager.
"""
def __init__(self, tracer, cmd_manager):
self._tracer = tracer
self._cmd_manager = cmd_manager
def __getattr__(self, name):
return partial(getattr(self._cmd_manager, name), self._tracer)
@contextmanager
def capture_output():
"""
Captures stdout and stderr for the duration of the body.
example
with capture_output() as (out, err):
print 'hello'
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.close()
sys.stderr.close()
sys.stdout = old_stdout
sys.stderr = old_stderr
class Qdb(Bdb, object):
"""
The Quantopian Remote Debugger.
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""
Qdb objects are singletons that persist until their disable method is
called.
"""
if not cls._instance:
inst = super(Qdb, cls).__new__(cls)
# `_init` might raise, so don't save as `_instance` yet:
inst._init(*args, **kwargs)
cls._instance = inst
return cls._instance
def __init__(self, *args, **kwargs):
pass
def _init(self, config=None, merge=False, **kwargs):
"""
See qdb.config for more information about the configuration of
qdb.
merge denotes how config and kwargs should be merged.
QdbConfig.kwargs_first says config will trample kwargs,
QdbConfig.config_first says kwargs will trample config.
Otherwise, kwargs and config cannot both be passed.
"""
self.super_ = super(Qdb, self)
self.super_.__init__()
self.reset()
if config and kwargs:
if merge == QdbConfig.kwargs_first:
first = kwargs
second = config
elif merge == QdbConfig.config_first:
first = config
second = kwargs
else:
raise TypeError('Cannot pass config and kwargs')
config = first.merge(second)
else:
config = QdbConfig.get_config(config or kwargs)
self.address = config.host, config.port
self.set_default_file(config.default_file)
self.default_namespace = config.default_namespace or {}
self.exception_serializer = config.exception_serializer or \
default_exception_serializer
self.eval_fn = config.eval_fn or default_eval_fn
self._file_cache = {}
self.retry_attempts = config.retry_attempts
self.repr_fn = config.repr_fn
self._skip_fn = config.skip_fn or (lambda _: False)
self.pause_signal = config.pause_signal \
if config.pause_signal else signal.SIGUSR2
self.uuid = str(config.uuid or uuid4())
self.watchlist = {}
self.execution_timeout = config.execution_timeout
self.reset()
self.log_handler = None
if config.log_file:
self.log_handler = FileHandler(config.log_file)
self.log_handler.push_application()
self.bound_cmd_manager = config.cmd_manager or TerminalCommandManager()
self.bound_cmd_manager.start(config.auth_msg)
# We need to be able to send stdout back to the user debugging the
# program. We hold a handle to this in case the program resets stdout.
self._old_stdout = sys.stdout
self._old_stderr = sys.stderr
self.redirect_output = (
config.redirect_output and
not isinstance(self.cmd_manager, TerminalCommandManager)
)
if self.redirect_output:
sys.stdout = OutputTee(
sys.stdout,
RemoteOutput(self.cmd_manager, '<stdout>'),
)
sys.stderr = OutputTee(
sys.stderr,
RemoteOutput(self.cmd_manager, '<stderr>'),
)
@property
def bound_cmd_manager(self):
return self.__cmd_manager
@bound_cmd_manager.setter
def bound_cmd_manager(self, value):
self.cmd_manager = value
self.__cmd_manager = BoundCmdManager(self, value)
def skip_fn(self, path):
return self._skip_fn(self.canonic(path))
def restore_output_streams(self):
"""
Restores the original output streams.
"""
if self.redirect_output:
sys.stdout = self._old_stdout
sys.stderr = self._old_stderr
def _new_execution_timeout(self, src):
"""
Return a new execution timeout context manager.
If not execution timeout is in place, returns ExitStack()
"""
# We use no_gevent=True because this could be cpu bound. This will
# still throw to the proper greenlet if this is gevented.
return (
Timeout(
self.execution_timeout,
QdbExecutionTimeout(src, self.execution_timeout),
no_gevent=True,
) if self.execution_timeout else ExitStack()
)
def set_default_file(self, filename):
"""
Safely sets the new default file.
"""
self.default_file = self.canonic(filename) if filename else None
def get_line(self, filename, line):
"""
Checks for any user cached files before deferring to the linecache.
"""
# The line - 1 is so that querying line 1 gives us the first line in
# the file.
try:
return self.get_file_lines(filename)[line - 1]
except IndexError:
return 'No source available for this line.'
def get_file(self, filename):
"""
Retrieves a file out of cache or opens and caches it.
"""
return '\n'.join(self.get_file_lines(filename))
def get_file_lines(self, filename):
"""
Retrieves the file from the file cache as a list of lines.
If the file does not exist in the cache, it is cached from
disk.
"""
canonic_name = self.canonic(filename)
try:
return self._file_cache[canonic_name]
except KeyError:
if not self.cache_file(canonic_name):
return []
return self._file_cache.get(canonic_name)
def cache_file(self, filename, contents=None):
"""
Caches filename from disk into memory.
This overrides whatever was cached for filename previously.
If contents is provided, it allows the user to cache a filename to a
string.
Returns True if the file caching succeeded, otherwise returns false.
"""
canonic_name = self.canonic(filename)
if contents:
self._file_cache[canonic_name] = contents.splitlines()
return True
try:
with open(canonic_name, 'r') as f:
self._file_cache[canonic_name] = f.read().splitlines()
return True
except IOError:
# The caching operation failed.
return False
def set_break(self, filename, lineno, temporary=False, cond=None,
funcname=None, **kwargs):
"""
Sets a breakpoint. This is overridden to account for the filecache
and for unreachable lines.
**kwargs are ignored. This is to work with payloads that pass extra
fields to the set_break payload.
"""
filename = self.canonic(filename) if filename else self.default_file
try:
self.get_line(filename, lineno)
except IndexError:
raise QdbUnreachableBreakpoint({
'file': filename,
'line': lineno,
'temp': temporary,
'cond': cond,
'func': funcname,
})
blist = self.breaks.setdefault(filename, [])
if lineno not in blist:
blist.append(lineno)
Breakpoint(filename, lineno, temporary, cond, funcname)
def clear_break(self, filename, lineno, *args, **kwargs):
"""
Wrapper to make the breakpoint json standardized for setting
and removing of breakpoints.
This means that the same json data that was used to set a break point
may be fed into this function with the extra values ignored.
"""
self.super_.clear_break(filename, lineno)
def canonic(self, filename):
canonic_filename = self.super_.canonic(filename)
if canonic_filename.endswith('pyc'):
return canonic_filename[:-1]
return canonic_filename
def reset(self):
self.botframe = None
self._set_stopinfo(None, None)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup_stack(self, stackframe, traceback):
"""
Sets up the state of the debugger object for this frame.
"""
self.forget()
self.stack, self.curindex = self.get_stack(stackframe, traceback)
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.update_watchlist()
def extend_watchlist(self, *args):
"""
Adds every arg to the watchlist and updates.
"""
for expr in args:
self.watchlist[expr] = (False, '')
self.update_watchlist()
def update_watchlist(self):
"""
Updates the watchlist by evaluating all the watched expressions in
our current frame.
"""
id_ = lambda n: n # Why is this NOT a builtin?
for expr in self.watchlist:
try:
with self._new_execution_timeout(expr), \
self.inject_default_namespace() as stackframe:
self.watchlist[expr] = (
None,
(self.repr_fn or id_)(
self.eval_fn(expr, stackframe)
)
)
except Exception as e:
self.watchlist[expr] = (
type(e).__name__,
self.exception_serializer(e)
)
def effective(self, file, line, stackframe):
"""
Finds the effective breakpoint for this line; called only
when we know that there is a breakpoint here.
returns the breakpoint paired with a flag denoting if we should
remove this breakpoint or not.
"""
for breakpoint in Breakpoint.bplist[file, line]:
if breakpoint.enabled == 0:
continue
if not checkfuncname(breakpoint, stackframe):
continue
# Count every hit when breakpoint is enabled
breakpoint.hits = breakpoint.hits + 1
if not breakpoint.cond:
# If unconditional, and ignoring go on to next, else break
if breakpoint.ignore > 0:
breakpoint.ignore = breakpoint.ignore - 1
continue
else:
return breakpoint, True
else:
# Conditional breakpoint
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
with self._new_execution_timeout(breakpoint.cond), \
self.inject_default_namespace(stackframe) as frame:
val = self.eval_fn(
breakpoint.cond,
frame,
'eval'
)
except Exception as e:
# Send back a message to let the user know there was an
# issue with their breakpoint.
self.cmd_manager.send_error(
'condition', {
'cond': breakpoint.cond,
'line': line,
'exc': type(e).__name__,
'output': self.exception_serializer(e),
}
)
# Return this breakpoint to be safe. The user will be
# stopped here so that they can fix the breakpoint.
return breakpoint, False
if val:
if breakpoint.ignore > 0:
breakpoint.ignore = breakpoint.ignore - 1
else:
return breakpoint, True
return None, False
def break_here(self, stackframe):
"""
Checks if we should break execution in this stackframe.
This function handles the cleanup and ignore counts for breakpoints.
Returns True iff we should stop in the stackframe, False otherwise.
"""
filename = self.canonic(stackframe.f_code.co_filename)
if filename not in self.breaks:
return False
lineno = stackframe.f_lineno
if lineno not in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = stackframe.f_code.co_firstlineno
if lineno not in self.breaks[filename]:
return False
# flag says ok to delete temporary breakpoints.
breakpoint, flag = self.effective(filename, lineno, stackframe)
if breakpoint:
self.currentbp = breakpoint.number
if flag and breakpoint.temporary:
self.do_clear(breakpoint.number)
return True
else:
return False
def trace_dispatch(self, stackframe, event, arg):
"""
Trace function that does some preliminary checks and then defers to
the event handler for each type of event.
"""
if self.quitting:
# We were told to quit by the user, bubble this up to their code.
return
if self.skip_fn(stackframe.f_code.co_filename):
# We want to skip this, don't stop but keep tracing.
return self.trace_dispatch
try:
return self.super_.trace_dispatch(stackframe, event, arg)
except BdbQuit:
raise QdbQuit() # Rewrap as a QdbError object.
def user_call(self, stackframe, arg):
if self.break_here(stackframe):
self.user_line(stackframe)
def user_line(self, stackframe):
self.setup_stack(stackframe, None)
bound_cmd_manager = self.bound_cmd_manager
bound_cmd_manager.send_watchlist()
bound_cmd_manager.send_stack()
bound_cmd_manager.next_command()
def user_return(self, stackframe, return_value):
stackframe.f_locals['__return__'] = return_value
self.setup_stack(stackframe, None)
bound_cmd_manager = self.bound_cmd_manager
bound_cmd_manager.send_watchlist()
bound_cmd_manager.send_stack()
bound_cmd_manager.next_command(
fmt_msg('return', str(return_value), serial=json.dumps),
)
def user_exception(self, stackframe, exc_info):
exc_type, exc_value, exc_traceback = exc_info
stackframe.f_locals['__exception__'] = exc_type, exc_value
self.setup_stack(stackframe, exc_traceback)
bound_cmd_manager = self.bound_cmd_manager
bound_cmd_manager.send_watchlist()
bound_cmd_manager.send_stack()
msg = fmt_msg(
'exception', {
'type': exc_type.__name__,
'value': str(exc_value),
'traceback': traceback.format_tb(exc_traceback)
},
serial=json.dumps,
)
return self.bound_cmd_manager.next_command(msg)
def do_clear(self, bpnum):
"""
Handles deletion of temporary breakpoints.
"""
if not (0 <= bpnum < len(Breakpoint.bpbynumber)):
return
self.clear_bpbynumber(bpnum)
def set_quit(self):
"""
Sets the quitting state and restores the program state.
"""
self.quitting = True
def eval_(self, code, pprint=False):
repr_fn = self.repr_fn
outexc = None
outmsg = None
with capture_output() as (out, err), \
self._new_execution_timeout(code), \
self.inject_default_namespace() as stackframe:
try:
if not repr_fn and not pprint:
self.eval_fn(
code,
stackframe,
'single',
)
else:
try:
# Do some some custom single mode magic that lets us
# call the repr function on the last expr.
value = progn(
code,
self.eval_fn,
stackframe,
)
except QdbPrognEndsInStatement:
# Statements have no value to print.
pass
else:
if pprint:
value = pformat(value)
if repr_fn:
value = repr_fn(value)
print(value)
except Exception as e:
outexc = type(e).__name__
outmsg = self.exception_serializer(e)
else:
outmsg = out.getvalue().rstrip('\n')
if outexc is not None or outmsg is not None:
self.cmd_manager.send_print(code, outexc, outmsg)
self.update_watchlist()
def _stack_jump_to(self, index):
"""
Jumps the stack to a specific index.
Raises an IndexError if the desired index does not exist.
"""
# Try to jump here first. This could raise an IndexError which will
# prevent the tracer's state from being corrupted.
self.curframe = self.stack[index][0]
self.curindex = index
self.curframe_locals = self.curframe.f_locals
self.update_watchlist()
def stack_shift_direction(self, direction):
"""
Shifts the stack up or down depending on direction.
If direction is positive, travel up, if direction is negative, travel
down. If direction is 0, do nothing.
If you cannot shift in the desired direction, an IndexError will be
raised.
"""
if direction == 0:
return # nop
stride = -1 if direction > 0 else 1
stack = self.stack
stacksize = len(stack)
curindex = self.curindex
skip_fn = self.skip_fn
target = None
def pred_up(idx):
return idx > 0
def pred_down(idx):
return idx < stacksize - 1
pred = pred_up if direction > 0 else pred_down
while pred(curindex):
curindex += stride
if not skip_fn(stack[curindex][0].f_code.co_filename):
target = curindex
break
if target is None:
raise IndexError('Shifted off the stack')
self._stack_jump_to(target)
def disable(self, mode='soft'):
"""
Stops tracing.
"""
try:
if mode == 'soft':
self.clear_all_breaks()
self.set_continue()
# Remove this instance so that new ones may be created.
self.__class__._instance = None
elif mode == 'hard':
sys.exit(1)
else:
raise ValueError("mode must be 'hard' or 'soft'")
finally:
self.restore_output_streams()
if self.log_handler:
self.log_handler.pop_application()
self.cmd_manager.stop()
if sys.gettrace() is self.trace_dispatch:
sys.settrace(None)
def __enter__(self):
self.set_trace(sys._getframe().f_back, stop=False)
return self
def __exit__(self, type, value, traceback):
self.disable('soft')
def set_trace(self, stackframe=None, stop=True):
"""
Starts debugging in stackframe or in the callers frame.
If stop is True, begin stepping from here, otherwise, wait for
the first breakpoint or exception.
"""
# We need to look back 1 frame to get our caller.
stackframe = stackframe or sys._getframe().f_back
self.reset()
while stackframe:
stackframe.f_trace = self.trace_dispatch
self.botframe = stackframe
stackframe = stackframe.f_back
if stop:
self.set_step()
else:
self.set_continue()
sys.settrace(self.trace_dispatch)
@contextmanager
def inject_default_namespace(self, stackframe=None):
"""
Adds the default namespace to the frame, or if no frame is provided,
self.curframe is used.
"""
stackframe = stackframe or self.curframe
to_remove = set()
for k, v in items(self.default_namespace):
if k not in stackframe.f_globals:
# Only add the default things if the name is unbound.
stackframe.f_globals[k] = v
to_remove.add(k)
try:
yield stackframe
finally:
for k in to_remove:
try:
del stackframe.f_globals[k]
except IndexError:
# The body of this manager might have del'd this.
pass
# Prevent exceptions from generating ref cycles.
del stackframe
|
|
from logs import logDecorator as lD
import jsonref, sqlite3
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.databaseIO.sqLiteIO'
@lD.log(logBase + '.getAllData')
def getAllData(logger, query, values=None, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with the data, it is going
to return the value of None, and log the error. Your program needs to check
whether there was an error with the query by checking for a None return
value. Note that the location of the dataabses are assumed to be present
within the file ``../config/db.json``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
list or None
A list of tuples containing the values is returned. In case
there is an error, the error will be logged, and a None will
be return
'''
vals = None
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
# We assume that the data is small so we
# can download the entire thing here ...
# -------------------------------------------
vals = cur.fetchall()
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\n{values}'.format(query, values))
logger.error(str(e))
try:
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals
@lD.log(logBase + '.getDataIterator')
def getDataIterator(logger, query, values=None, chunks=100, dbName=None):
'''Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize ``chunks``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
chunks : {number}, optional
This is the number of rows that the data is going to return at every call
if __next__() to this function. (the default is 100)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time.
'''
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchmany(chunks)
if len(vals) == 0:
break
yield vals
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return
@lD.log(logBase + '.getSingleDataIterator')
def getSingleDataIterator(logger, query, values=None, dbName=None):
'''Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize 1.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time.
'''
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchone()
if vals is None:
break
yield vals
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return
@lD.log(logBase + '.commitData')
def commitData(logger, query, values=None, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of ``None``, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
On successful completion, a ``True`` is returned. In case
there is an error, the error will be logged, and a ``None`` will
be returnd
'''
vals = True
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
vals = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals
@lD.log(logBase + '.commitDataList')
def commitDataList(logger, query, values, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of None, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
A successful completion of this function returns a ``True``.
In case there is an error, the error will be logged, and a ``None`` will
be returned
'''
val = True
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
cur.executemany(query, values)
except Exception as e:
logger.error('Unable to execute query for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
val = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return None
return val
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1APIServiceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ca_bundle': 'str',
'group': 'str',
'group_priority_minimum': 'int',
'insecure_skip_tls_verify': 'bool',
'service': 'V1beta1ServiceReference',
'version': 'str',
'version_priority': 'int'
}
attribute_map = {
'ca_bundle': 'caBundle',
'group': 'group',
'group_priority_minimum': 'groupPriorityMinimum',
'insecure_skip_tls_verify': 'insecureSkipTLSVerify',
'service': 'service',
'version': 'version',
'version_priority': 'versionPriority'
}
def __init__(self, ca_bundle=None, group=None, group_priority_minimum=None, insecure_skip_tls_verify=None, service=None, version=None, version_priority=None):
"""
V1beta1APIServiceSpec - a model defined in Swagger
"""
self._ca_bundle = None
self._group = None
self._group_priority_minimum = None
self._insecure_skip_tls_verify = None
self._service = None
self._version = None
self._version_priority = None
self.discriminator = None
self.ca_bundle = ca_bundle
if group is not None:
self.group = group
self.group_priority_minimum = group_priority_minimum
if insecure_skip_tls_verify is not None:
self.insecure_skip_tls_verify = insecure_skip_tls_verify
self.service = service
if version is not None:
self.version = version
self.version_priority = version_priority
@property
def ca_bundle(self):
"""
Gets the ca_bundle of this V1beta1APIServiceSpec.
CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
:return: The ca_bundle of this V1beta1APIServiceSpec.
:rtype: str
"""
return self._ca_bundle
@ca_bundle.setter
def ca_bundle(self, ca_bundle):
"""
Sets the ca_bundle of this V1beta1APIServiceSpec.
CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
:param ca_bundle: The ca_bundle of this V1beta1APIServiceSpec.
:type: str
"""
if ca_bundle is None:
raise ValueError("Invalid value for `ca_bundle`, must not be `None`")
if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle):
raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`")
self._ca_bundle = ca_bundle
@property
def group(self):
"""
Gets the group of this V1beta1APIServiceSpec.
Group is the API group name this server hosts
:return: The group of this V1beta1APIServiceSpec.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1beta1APIServiceSpec.
Group is the API group name this server hosts
:param group: The group of this V1beta1APIServiceSpec.
:type: str
"""
self._group = group
@property
def group_priority_minimum(self):
"""
Gets the group_priority_minimum of this V1beta1APIServiceSpec.
GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s
:return: The group_priority_minimum of this V1beta1APIServiceSpec.
:rtype: int
"""
return self._group_priority_minimum
@group_priority_minimum.setter
def group_priority_minimum(self, group_priority_minimum):
"""
Sets the group_priority_minimum of this V1beta1APIServiceSpec.
GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s
:param group_priority_minimum: The group_priority_minimum of this V1beta1APIServiceSpec.
:type: int
"""
if group_priority_minimum is None:
raise ValueError("Invalid value for `group_priority_minimum`, must not be `None`")
self._group_priority_minimum = group_priority_minimum
@property
def insecure_skip_tls_verify(self):
"""
Gets the insecure_skip_tls_verify of this V1beta1APIServiceSpec.
InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.
:return: The insecure_skip_tls_verify of this V1beta1APIServiceSpec.
:rtype: bool
"""
return self._insecure_skip_tls_verify
@insecure_skip_tls_verify.setter
def insecure_skip_tls_verify(self, insecure_skip_tls_verify):
"""
Sets the insecure_skip_tls_verify of this V1beta1APIServiceSpec.
InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.
:param insecure_skip_tls_verify: The insecure_skip_tls_verify of this V1beta1APIServiceSpec.
:type: bool
"""
self._insecure_skip_tls_verify = insecure_skip_tls_verify
@property
def service(self):
"""
Gets the service of this V1beta1APIServiceSpec.
Service is a reference to the service for this API server. It must communicate on port 443 If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled.
:return: The service of this V1beta1APIServiceSpec.
:rtype: V1beta1ServiceReference
"""
return self._service
@service.setter
def service(self, service):
"""
Sets the service of this V1beta1APIServiceSpec.
Service is a reference to the service for this API server. It must communicate on port 443 If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled.
:param service: The service of this V1beta1APIServiceSpec.
:type: V1beta1ServiceReference
"""
if service is None:
raise ValueError("Invalid value for `service`, must not be `None`")
self._service = service
@property
def version(self):
"""
Gets the version of this V1beta1APIServiceSpec.
Version is the API version this server hosts. For example, \"v1\"
:return: The version of this V1beta1APIServiceSpec.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1beta1APIServiceSpec.
Version is the API version this server hosts. For example, \"v1\"
:param version: The version of this V1beta1APIServiceSpec.
:type: str
"""
self._version = version
@property
def version_priority(self):
"""
Gets the version_priority of this V1beta1APIServiceSpec.
VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) Since it's inside of a group, the number can be small, probably in the 10s.
:return: The version_priority of this V1beta1APIServiceSpec.
:rtype: int
"""
return self._version_priority
@version_priority.setter
def version_priority(self, version_priority):
"""
Sets the version_priority of this V1beta1APIServiceSpec.
VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) Since it's inside of a group, the number can be small, probably in the 10s.
:param version_priority: The version_priority of this V1beta1APIServiceSpec.
:type: int
"""
if version_priority is None:
raise ValueError("Invalid value for `version_priority`, must not be `None`")
self._version_priority = version_priority
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1APIServiceSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple but robust implementation of generator/coroutine-based
pipelines in Python. The pipelines may be run either sequentially
(single-threaded) or in parallel (one thread per pipeline stage).
This implementation supports pipeline bubbles (indications that the
processing for a certain item should abort). To use them, yield the
BUBBLE constant from any stage coroutine except the last.
In the parallel case, the implementation transparently handles thread
shutdown when the processing is complete and when a stage raises an
exception. KeyboardInterrupts (^C) are also handled.
When running a parallel pipeline, it is also possible to use
multiple coroutines for the same pipeline stage; this lets you speed
up a bottleneck stage by dividing its work among multiple threads.
To do so, pass an iterable of coroutines to the Pipeline constructor
in place of any single coroutine.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import Queue
from threading import Thread, Lock
import sys
BUBBLE = b'__PIPELINE_BUBBLE__'
POISON = b'__PIPELINE_POISON__'
DEFAULT_QUEUE_SIZE = 16
def _invalidate_queue(q, val=None, sync=True):
"""Breaks a Queue such that it never blocks, always has size 1,
and has no maximum size. get()ing from the queue returns `val`,
which defaults to None. `sync` controls whether a lock is
required (because it's not reentrant!).
"""
def _qsize(len=len):
return 1
def _put(item):
pass
def _get():
return val
if sync:
q.mutex.acquire()
try:
q.maxsize = 0
q._qsize = _qsize
q._put = _put
q._get = _get
q.not_empty.notifyAll()
q.not_full.notifyAll()
finally:
if sync:
q.mutex.release()
class CountedQueue(Queue.Queue):
"""A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are
finished with the queue.
"""
def __init__(self, maxsize=0):
Queue.Queue.__init__(self, maxsize)
self.nthreads = 0
self.poisoned = False
def acquire(self):
"""Indicate that a thread will start putting into this queue.
Should not be called after the queue is already poisoned.
"""
with self.mutex:
assert not self.poisoned
assert self.nthreads >= 0
self.nthreads += 1
def release(self):
"""Indicate that a thread that was putting into this queue has
exited. If this is the last thread using the queue, the queue
is poisoned.
"""
with self.mutex:
self.nthreads -= 1
assert self.nthreads >= 0
if self.nthreads == 0:
# All threads are done adding to this queue. Poison it
# when it becomes empty.
self.poisoned = True
# Replacement _get invalidates when no items remain.
_old_get = self._get
def _get():
out = _old_get()
if not self.queue:
_invalidate_queue(self, POISON, False)
return out
if self.queue:
# Items remain.
self._get = _get
else:
# No items. Invalidate immediately.
_invalidate_queue(self, POISON, False)
class MultiMessage(object):
"""A message yielded by a pipeline stage encapsulating multiple
values to be sent to the next stage.
"""
def __init__(self, messages):
self.messages = messages
def multiple(messages):
"""Yield multiple([message, ..]) from a pipeline stage to send
multiple values to the next pipeline stage.
"""
return MultiMessage(messages)
def stage(func):
"""Decorate a function to become a simple stage.
>>> @stage
... def add(n, i):
... return i + n
>>> pipe = Pipeline([
... iter([1, 2, 3]),
... add(2),
... ])
>>> list(pipe.pull())
[3, 4, 5]
"""
def coro(*args):
task = None
while True:
task = yield task
task = func(*(args + (task,)))
return coro
def mutator_stage(func):
"""Decorate a function that manipulates items in a coroutine to
become a simple stage.
>>> @mutator_stage
... def setkey(key, item):
... item[key] = True
>>> pipe = Pipeline([
... iter([{'x': False}, {'a': False}]),
... setkey('x'),
... ])
>>> list(pipe.pull())
[{'x': True}, {'a': False, 'x': True}]
"""
def coro(*args):
task = None
while True:
task = yield task
func(*(args + (task,)))
return coro
def _allmsgs(obj):
"""Returns a list of all the messages encapsulated in obj. If obj
is a MultiMessage, returns its enclosed messages. If obj is BUBBLE,
returns an empty list. Otherwise, returns a list containing obj.
"""
if isinstance(obj, MultiMessage):
return obj.messages
elif obj == BUBBLE:
return []
else:
return [obj]
class PipelineThread(Thread):
"""Abstract base class for pipeline-stage threads."""
def __init__(self, all_threads):
super(PipelineThread, self).__init__()
self.abort_lock = Lock()
self.abort_flag = False
self.all_threads = all_threads
self.exc_info = None
def abort(self):
"""Shut down the thread at the next chance possible.
"""
with self.abort_lock:
self.abort_flag = True
# Ensure that we are not blocking on a queue read or write.
if hasattr(self, 'in_queue'):
_invalidate_queue(self.in_queue, POISON)
if hasattr(self, 'out_queue'):
_invalidate_queue(self.out_queue, POISON)
def abort_all(self, exc_info):
"""Abort all other threads in the system for an exception.
"""
self.exc_info = exc_info
for thread in self.all_threads:
thread.abort()
class FirstPipelineThread(PipelineThread):
"""The thread running the first stage in a parallel pipeline setup.
The coroutine should just be a generator.
"""
def __init__(self, coro, out_queue, all_threads):
super(FirstPipelineThread, self).__init__(all_threads)
self.coro = coro
self.out_queue = out_queue
self.out_queue.acquire()
self.abort_lock = Lock()
self.abort_flag = False
def run(self):
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the value from the generator.
try:
msg = self.coro.next()
except StopIteration:
break
# Send messages to the next stage.
for msg in _allmsgs(msg):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except:
self.abort_all(sys.exc_info())
return
# Generator finished; shut down the pipeline.
self.out_queue.release()
class MiddlePipelineThread(PipelineThread):
"""A thread running any stage in the pipeline except the first or
last.
"""
def __init__(self, coro, in_queue, out_queue, all_threads):
super(MiddlePipelineThread, self).__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
self.out_queue = out_queue
self.out_queue.acquire()
def run(self):
try:
# Prime the coroutine.
self.coro.next()
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Invoke the current stage.
out = self.coro.send(msg)
# Send messages to next stage.
for msg in _allmsgs(out):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except:
self.abort_all(sys.exc_info())
return
# Pipeline is shutting down normally.
self.out_queue.release()
class LastPipelineThread(PipelineThread):
"""A thread running the last stage in a pipeline. The coroutine
should yield nothing.
"""
def __init__(self, coro, in_queue, all_threads):
super(LastPipelineThread, self).__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
def run(self):
# Prime the coroutine.
self.coro.next()
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Send to consumer.
self.coro.send(msg)
except:
self.abort_all(sys.exc_info())
return
class Pipeline(object):
"""Represents a staged pattern of work. Each stage in the pipeline
is a coroutine that receives messages from the previous stage and
yields messages to be sent to the next stage.
"""
def __init__(self, stages):
"""Makes a new pipeline from a list of coroutines. There must
be at least two stages.
"""
if len(stages) < 2:
raise ValueError('pipeline must have at least two stages')
self.stages = []
for stage in stages:
if isinstance(stage, (list, tuple)):
self.stages.append(stage)
else:
# Default to one thread per stage.
self.stages.append((stage,))
def run_sequential(self):
"""Run the pipeline sequentially in the current thread. The
stages are run one after the other. Only the first coroutine
in each stage is used.
"""
list(self.pull())
def run_parallel(self, queue_size=DEFAULT_QUEUE_SIZE):
"""Run the pipeline in parallel using one thread per stage. The
messages between the stages are stored in queues of the given
size.
"""
queue_count = len(self.stages) - 1
queues = [CountedQueue(queue_size) for i in range(queue_count)]
threads = []
# Set up first stage.
for coro in self.stages[0]:
threads.append(FirstPipelineThread(coro, queues[0], threads))
# Middle stages.
for i in range(1, queue_count):
for coro in self.stages[i]:
threads.append(MiddlePipelineThread(
coro, queues[i - 1], queues[i], threads
))
# Last stage.
for coro in self.stages[-1]:
threads.append(
LastPipelineThread(coro, queues[-1], threads)
)
# Start threads.
for thread in threads:
thread.start()
# Wait for termination. The final thread lasts the longest.
try:
# Using a timeout allows us to receive KeyboardInterrupt
# exceptions during the join().
while threads[-1].isAlive():
threads[-1].join(1)
except:
# Stop all the threads immediately.
for thread in threads:
thread.abort()
raise
finally:
# Make completely sure that all the threads have finished
# before we return. They should already be either finished,
# in normal operation, or aborted, in case of an exception.
for thread in threads[:-1]:
thread.join()
for thread in threads:
exc_info = thread.exc_info
if exc_info:
# Make the exception appear as it was raised originally.
raise exc_info[0], exc_info[1], exc_info[2]
def pull(self):
"""Yield elements from the end of the pipeline. Runs the stages
sequentially until the last yields some messages. Each of the messages
is then yielded by ``pulled.next()``. If the pipeline has a consumer,
that is the last stage does not yield any messages, then pull will not
yield any messages. Only the first coroutine in each stage is used
"""
coros = [stage[0] for stage in self.stages]
# "Prime" the coroutines.
for coro in coros[1:]:
coro.next()
# Begin the pipeline.
for out in coros[0]:
msgs = _allmsgs(out)
for coro in coros[1:]:
next_msgs = []
for msg in msgs:
out = coro.send(msg)
next_msgs.extend(_allmsgs(out))
msgs = next_msgs
for msg in msgs:
yield msg
# Smoke test.
if __name__ == b'__main__':
import time
# Test a normally-terminating pipeline both in sequence and
# in parallel.
def produce():
for i in range(5):
print('generating %i' % i)
time.sleep(1)
yield i
def work():
num = yield
while True:
print('processing %i' % num)
time.sleep(2)
num = yield num * 2
def consume():
while True:
num = yield
time.sleep(1)
print('received %i' % num)
ts_start = time.time()
Pipeline([produce(), work(), consume()]).run_sequential()
ts_seq = time.time()
Pipeline([produce(), work(), consume()]).run_parallel()
ts_par = time.time()
Pipeline([produce(), (work(), work()), consume()]).run_parallel()
ts_end = time.time()
print('Sequential time:', ts_seq - ts_start)
print('Parallel time:', ts_par - ts_seq)
print('Multiply-parallel time:', ts_end - ts_par)
print()
# Test a pipeline that raises an exception.
def exc_produce():
for i in range(10):
print('generating %i' % i)
time.sleep(1)
yield i
def exc_work():
num = yield
while True:
print('processing %i' % num)
time.sleep(3)
if num == 3:
raise Exception()
num = yield num * 2
def exc_consume():
while True:
num = yield
print('received %i' % num)
Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import sys
import maya.cmds as cmds
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMaya as OpenMaya
import maya.utils as utils
from Qt import QtGui, QtCore, QtWidgets
from .renderglobals import RenderGlobals
from .utils import wait, viewport_state, get_maya_window
# Py3 compat
if sys.version_info > (3, 0):
basestring = str
long = int
EDITOR_PROPERTIES = [
'activeComponentsXray',
'activeOnly',
'backfaceCulling',
'bufferMode',
'bumpResolution',
'camera',
'cameras',
'clipGhosts',
'colorResolution',
'controllers',
'controlVertices',
'cullingOverride',
'deformers',
'depthOfField',
'dimensions',
'displayAppearance',
'displayLights',
'displayTextures',
'dynamicConstraints',
'dynamics',
'fluids',
'fogColor',
'fogDensity',
'fogEnd',
'fogging',
'fogMode',
'fogSource',
'fogStart',
'follicles',
'gpuCacheDisplayFilter',
'greasePencils',
'grid',
'hairSystems',
'handles',
'headsUpDisplay',
'hulls',
'ignorePanZoom',
'ikHandles',
'imagePlane',
'interactiveBackFaceCull',
'interactiveDisableShadows',
'isFiltered',
'joints',
'jointXray',
'lights',
'lineWidth',
'locators',
'lowQualityLighting',
'manipulators',
'maxConstantTransparency',
'maximumNumHardwareLights',
'motionTrails',
'nCloths',
'nParticles',
'nRigids',
'nurbsCurves',
'nurbsSurfaces',
'objectFilterShowInHUD',
'occlusionCulling',
'particleInstancers',
'pivots',
'planes',
'pluginShapes',
'polymeshes',
'rendererName',
'selectionHiliteDisplay',
'shadingModel',
'shadows',
'smallObjectCulling',
'smallObjectThreshold',
'smoothWireframe',
'sortTransparent',
'strokes',
'subdivSurfaces',
'textureAnisotropic',
'textureCompression',
'textureDisplay',
'textureHilight',
'textureMaxSize',
'textures',
'textureSampling',
'transparencyAlgorithm',
'transpInShadows',
'twoSidedLighting',
'useBaseRenderer',
'useDefaultMaterial',
'useInteractiveMode',
'useReducedRenderer',
'viewSelected',
'wireframeOnShaded',
'xray',
]
CAMERA_PROPERTIES = [
'displayFilmGate',
'displayResolution',
'displayGateMask',
'displayFieldChart',
'displaySafeAction',
'displaySafeTitle',
'displayFilmPivot',
'displayFilmOrigin',
'overscan',
'displayGateMaskOpacity',
'displayGateMaskColor'
]
def deferred_close(view):
panel = view.panel
wait(0.1)
utils.executeDeferred(cmds.deleteUI, panel, panel=True)
def playblast(camera=None, state=None, **kwargs):
'''Playblast the active viewport.
Arguments:
:param camera: Camera to playblast
:param state: Viewport state
Playblast Arguments:
:param width: Resolution width
:param height: Resolution height
:param format: Render format like qt or image
:param compression: Render compression
:param viewer: Launch the default viewer afterwards
'''
playblast_kwargs = {
'offScreen': False,
'percent': 100,
'quality': 100,
'viewer': False,
'width': 960,
'height': 540,
'framePadding': 4,
'format': 'qt',
'compression': 'H.264',
'forceOverwrite': True,
}
playblast_kwargs.update(kwargs)
active = Viewport.active()
state = state or active.get_state()
if camera:
state['camera'] = camera
with viewport_state(active, state):
file = cmds.playblast(**playblast_kwargs)
return file
class EditorProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, inst, typ=None):
'''Gets a model editor property.'''
if not inst:
return self
try:
val = cmds.modelEditor(
inst.panel,
**{'query': True, self.name: True}
)
except TypeError:
val = cmds.modelEditor(
inst.panel,
**{'query': True, 'qpo': self.name}
)
if self.name == 'smallObjectThreshold':
val = val[0]
return val
def __set__(self, inst, value):
'''Sets a model editor property.'''
try:
cmds.modelEditor(
inst.panel,
**{'edit': True, self.name: value}
)
except TypeError:
cmds.modelEditor(
inst.panel,
**{'edit': True, 'po': [self.name, value]}
)
class CameraProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, inst, typ=None):
'''Gets a model panels camera property'''
if not inst:
return self
attr = inst.camera + '.' + self.name
value = cmds.getAttr(attr)
if isinstance(value, list):
if len(value) == 1 and isinstance(value[0], (list, tuple)):
value = value[0]
return value
def __set__(self, inst, value):
'''Sets a model panels camera property'''
attr = inst.camera + '.' + self.name
locked = cmds.getAttr(attr, lock=True)
if locked:
return
has_connections = cmds.listConnections(attr, s=True, d=False)
if has_connections:
return
try:
if isinstance(value, (int, float)):
cmds.setAttr(attr, value)
elif isinstance(value, basestring):
cmds.setAttr(attr, value, type='string')
elif isinstance(value, (list, tuple)):
cmds.setAttr(attr, *value)
else:
cmds.setAttr(attr, value)
except Except as e:
print('Failed to set state: %s %s' % (attr, value))
print(e)
class Viewport(object):
'''A convenient api for manipulating Maya 3D Viewports. While you can
manually construct a Viewport from an OpenMayaUI.M3dView instance, it is
much easier to use the convenience methods Viewport.iter,
Viewport.active and Viewport.get::
# Get the active view
v = Viewport.active()
assert v.focus == True
# Assuming we have a second modelPanel available
# Get an inactive view and make it the active view
v2 = Viewport.get(1)
v2.focus = True
assert v.focus == False
assert v2.focus == True
Viewport provides standard attribute lookup to all modelEditor properties::
# Hide nurbsCurves and show polymeshes in the viewport
v.nurbsCurves = False
v.polymeshes = True
:param m3dview: OpenMayaUI.M3dView instance.
'''
for p in EDITOR_PROPERTIES:
locals()[p] = EditorProperty(p)
for p in CAMERA_PROPERTIES:
locals()[p] = CameraProperty(p)
def __init__(self, m3dview):
self._m3dview = m3dview
self.highlight = self._highlight
self.identify = self._highlight
def __hash__(self):
return hash(self._m3dview)
def __eq__(self, other):
if hasattr(other, '_m3dview'):
return self._m3dview == other._m3dview
return self.panel == other
def copy(self):
'''Tear off a copy of the viewport.
:returns: A new torn off copy of Viewport'''
panel = cmds.modelPanel(tearOffCopy=self.panel)
view = self.from_panel(panel)
view.focus = True
return view
__copy__ = copy
__deepcopy__ = copy
def float(self):
'''Tear off the panel.'''
copied_view = self.copy()
deferred_close(self)
self._m3dview = copied_view._m3dview
@classmethod
def new(cls):
panel = cmds.modelPanel()
view = cls.from_panel(panel)
view.float()
view.focus = True
return view
def close(self):
'''Close this viewport'''
deferred_close(self)
@property
def properties(self):
'''A list including all editor property names.'''
return EDITOR_PROPERTIES + CAMERA_PROPERTIES
def get_state(self):
'''Get a state dictionary of all modelEditor properties.'''
active_state = {}
active_state['RenderGlobals'] = RenderGlobals.get_state()
for ep in EDITOR_PROPERTIES + CAMERA_PROPERTIES:
active_state[ep] = getattr(self, ep)
return active_state
def set_state(self, state):
'''Sets a dictionary of properties all at once.
:param state: Dictionary including property, value pairs'''
cstate = state.copy()
renderglobals_state = cstate.pop('RenderGlobals', None)
if renderglobals_state:
RenderGlobals.set_state(renderglobals_state)
for k, v in cstate.items():
setattr(self, k, v)
def playblast(self, camera=None, state=None, **kwargs):
'''Playblasting with reasonable default arguments. Automatically sets
this viewport to the active view, ensuring that we playblast the
correct view.
:param kwargs: Same kwargs as :func:`maya.cmds.playblast`'''
if not self.focus:
self.focus = True
playblast(camera=None, state=None, **kwargs)
@property
def screen_geometry(self):
qapp = QtWidgets.QApplication.instance()
desktop = qapp.desktop()
screen = desktop.screenNumber(self.widget)
return desktop.screenGeometry(screen)
def center(self):
screen_center = self.screen_geometry.center()
window_center = self.window.rect().center()
pnt = screen_center - window_center
self.window.move(pnt)
@property
def size(self):
return self._m3dview.portWidth(), self._m3dview.portHeight()
@size.setter
def size(self, wh):
w1, h1 = self.size
win_size = self.window.size()
w2, h2 = win_size.width(), win_size.height()
w_offset = w2 - w1
h_offset = h2 - h1
self.window.resize(wh[0] + w_offset, wh[1] + h_offset)
@property
def widget(self):
'''Returns a QWidget object for the viewport.'''
try:
from shiboken import wrapInstance
except ImportError: # PySide2 compat
from shiboken2 import wrapInstance
w = wrapInstance(long(self._m3dview.widget()), QtWidgets.QWidget)
return w
@property
def window(self):
'''Returns a QWidget object for the viewports parent window'''
return self.widget.window()
@property
def panel(self):
'''Returns a panel name for the Viewport.'''
panel = OpenMayaUI.MQtUtil.fullName(long(self._m3dview.widget()))
return panel.split('|')[-2]
@property
def index(self):
'''Returns the index of the viewport'''
i = 0
for i, view in Viewport.iter():
if self == view:
return i
i += 1
raise IndexError('Can not find index')
@property
def focus(self):
'''Check if current Viewport is the active Viewport.'''
return self == self.active()
@focus.setter
def focus(self, value):
'''Set focus to Viewport instance.'''
if not value:
try:
Viewport.get(1).focus = True
except:
pass
return
cmds.modelEditor(self.panel, edit=True, activeView=True)
@property
def camera(self):
'''Get the short name of the active camera.'''
camera = OpenMaya.MDagPath()
self._m3dview.getCamera(camera)
camera.pop()
return camera.partialPathName()
@camera.setter
def camera(self, camera_path):
'''Set the active camera for the Viewport.'''
sel = OpenMaya.MSelectionList()
sel.add(camera_path)
camera = OpenMaya.MDagPath()
sel.getDagPath(0, camera)
util = OpenMaya.MScriptUtil(0)
int_ptr = util.asUintPtr()
camera.numberOfShapesDirectlyBelow(int_ptr)
num_shapes = util.getUint(int_ptr)
if num_shapes:
camera.extendToShape()
self._m3dview.setCamera(camera)
self._m3dview.refresh(False, False)
@property
def background(self):
'''Get the background color of the Viewport'''
color = self._m3dview.backgroundColor()
return color[0], color[1], color[2]
@background.setter
def background(self, values):
'''Set the background color of the Viewport.
:param values: RGB value'''
cmds.displayRGBColor('background', *values)
def _highlight(self, msec=2000):
'''Draws an identifier in a Viewport.'''
highlight = Highlight(self)
highlight.display(msec)
@classmethod
def identify(cls, delay=2000):
'''Shows identifiers in all Viewports::
Viewport.identify()
:param delay: Length of time in ms to leave up identifier
'''
cls.highlight()
@classmethod
def highlight(cls, msec=2000):
'''Draws QLabels indexing each Viewport. These indices can be used to
with :method:`get` to return a corresponding Viewport object.'''
for viewport in cls.iter():
if viewport.widget.isVisible():
viewport.highlight(msec)
@staticmethod
def count():
'''The number of 3D Viewports.'''
return OpenMayaUI.M3dView.numberOf3dViews()
@classmethod
def from_panel(cls, panel):
m3dview = OpenMayaUI.M3dView()
OpenMayaUI.M3dView.getM3dViewFromModelPanel(panel, m3dview)
return cls(m3dview)
@classmethod
def get(cls, index):
'''Get the Viewport at index.'''
m3dview = OpenMayaUI.M3dView()
OpenMayaUI.M3dView.get3dView(index, m3dview)
return cls(m3dview)
@classmethod
def active(cls):
'''Get the active Viewport.'''
m3dview = OpenMayaUI.M3dView.active3dView()
return cls(m3dview)
@classmethod
def iter(cls):
'''Yield all Viewport objects.
usage::
for view in Viewport.iter():
print(v.panel)
'''
for index in range(cls.count()):
m3dview = OpenMayaUI.M3dView()
OpenMayaUI.M3dView.get3dView(index, m3dview)
yield cls(m3dview)
class Highlight(QtWidgets.QDialog):
'''Outline a viewport panel and show the panel name.'''
def __init__(self, view):
super(Highlight, self).__init__(parent=get_maya_window())
self.view = view
self.widget = self.view.widget
self.setWindowFlags(
self.windowFlags()
| QtCore.Qt.FramelessWindowHint
)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents)
wrect = self.widget.geometry()
rect = QtCore.QRect(
self.widget.mapToGlobal(
wrect.topLeft(),
),
wrect.size(),
)
self.setGeometry(
rect
)
def display(self, msec):
w = QtWidgets.QApplication.instance().activeWindow()
self.show()
w.raise_()
QtCore.QTimer.singleShot(msec, self.accept)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
pen = QtGui.QPen(QtCore.Qt.red)
pen.setWidth(8)
font = QtGui.QFont()
font.setPointSize(48)
painter.setFont(font)
painter.setPen(pen)
painter.setBrush(QtCore.Qt.transparent)
painter.drawRect(self.rect())
painter.drawText(self.rect(), QtCore.Qt.AlignCenter, self.view.panel)
|
|
import weakref
import networkx as nx
from openmdao.main.expreval import ConnectedExprEvaluator
from openmdao.main.pseudocomp import PseudoComponent
from openmdao.units import PhysicalQuantity
class ExprMapper(object):
"""A mapping between source expressions and destination expressions"""
def __init__(self, scope):
self._exprgraph = nx.DiGraph() # graph of source expressions to destination expressions
self._scope = None if scope is None else weakref.ref(scope)
def __getstate__(self):
state = self.__dict__.copy()
state['_scope'] = self.scope
return state
def __setstate__(self, state):
self.__dict__.update(state)
scope = state['_scope']
self._scope = None if scope is None else weakref.ref(scope)
@property
def scope(self):
return None if self._scope is None else self._scope()
def get_expr(self, text):
node = self._exprgraph.node.get(text)
if node:
return node['expr']
return None
def list_connections(self, show_passthrough=True, visible_only=False):
"""Return a list of tuples of the form (outvarname, invarname).
"""
lst = self._exprgraph.edges(data=True)
if not show_passthrough:
lst = [(u, v, data) for u, v, data in lst if '.' in u and '.' in v]
if visible_only:
newlst = []
scope = self.scope
for u, v, data in lst:
pcomp = data.get('pcomp')
if pcomp is not None:
newlst.extend(pcomp.list_connections(is_hidden=True))
else:
srccmp = getattr(scope, u.split('.', 1)[0], None)
dstcmp = getattr(scope, v.split('.', 1)[0], None)
if isinstance(srccmp, PseudoComponent) or \
isinstance(dstcmp, PseudoComponent):
continue
newlst.append((u, v))
return newlst
return [(u, v) for u, v, data in lst]
def get_source(self, dest_expr):
"""Returns the text of the source expression that is connected to the
given destination expression.
"""
dct = self._exprgraph.pred.get(dest_expr)
if dct:
return dct.keys()[0]
else:
return None
def get_dests(self, src_expr):
"""Returns the list of destination expressions that are connected to
the given source expression.
"""
graph = self._exprgraph
return [graph.node(name)['expr']
for name in self._exprgraph.succ[src_expr].keys()]
def remove(self, compname):
"""Remove any connections referring to the given component"""
refs = self.find_referring_exprs(compname)
if refs:
self._exprgraph.remove_nodes_from(refs)
self._remove_disconnected_exprs()
def connect(self, srcexpr, destexpr, scope, pseudocomp=None):
src = srcexpr.text
dest = destexpr.text
srcvars = srcexpr.get_referenced_varpaths(copy=False)
destvar = destexpr.get_referenced_varpaths().pop()
destcompname, destcomp, destvarname = scope._split_varpath(destvar)
desttrait = None
srccomp = None
if not isinstance(destcomp, PseudoComponent) and \
not destvar.startswith('parent.') and not len(srcvars) > 1:
for srcvar in srcvars:
if not srcvar.startswith('parent.'):
srccompname, srccomp, srcvarname = scope._split_varpath(srcvar)
if not isinstance(srccomp, PseudoComponent):
src_io = 'in' if srccomp is scope else 'out'
srccomp.get_dyn_trait(srcvarname, src_io)
if desttrait is None:
dest_io = 'out' if destcomp is scope else 'in'
desttrait = destcomp.get_dyn_trait(destvarname, dest_io)
if not isinstance(srccomp, PseudoComponent) and \
desttrait is not None:
# punt if dest is not just a simple var name.
# validity will still be checked at execution time
if destvar == destexpr.text:
ttype = desttrait.trait_type
if not ttype:
ttype = desttrait
srcval = srcexpr.evaluate()
if ttype.validate:
ttype.validate(destcomp, destvarname, srcval)
else:
# no validate function on destination trait. Most likely
# it's a property trait. No way to validate without
# unknown side effects. Have to wait until later
# when data actually gets passed via the connection.
pass
if src not in self._exprgraph:
self._exprgraph.add_node(src, expr=srcexpr)
if dest not in self._exprgraph:
self._exprgraph.add_node(dest, expr=destexpr)
self._exprgraph.add_edge(src, dest)
if pseudocomp is not None:
self._exprgraph[src][dest]['pcomp'] = pseudocomp
def find_referring_exprs(self, name):
"""Returns a list of expression strings that reference the given name,
which can refer to either a variable or a component.
"""
return [node for node, data in self._exprgraph.nodes(data=True)
if data['expr'].refers_to(name)]
def _remove_disconnected_exprs(self):
# remove all expressions that are no longer connected to anything
to_remove = []
graph = self._exprgraph
for expr in graph.nodes():
if graph.in_degree(expr) == 0 and graph.out_degree(expr) == 0:
to_remove.append(expr)
graph.remove_nodes_from(to_remove)
return to_remove
def disconnect(self, srcpath, destpath=None):
"""Disconnect the given expressions/variables/components.
Returns a list of edges to remove and a list of pseudocomponents
to remove.
"""
graph = self._exprgraph
to_remove = set()
exprs = []
pcomps = set()
if destpath is None:
exprs = self.find_referring_exprs(srcpath)
for expr in exprs:
to_remove.update(graph.edges(expr))
to_remove.update(graph.in_edges(expr))
else:
if srcpath in graph and destpath in graph:
to_remove.add((srcpath, destpath))
data = graph[srcpath][destpath]
if 'pcomp' in data:
pcomps.add(data['pcomp'].name)
else:
# assume they're disconnecting two variables, so find connected
# exprs that refer to them
src_exprs = set(self.find_referring_exprs(srcpath))
dest_exprs = set(self.find_referring_exprs(destpath))
to_remove.update([(src, dest) for src, dest in graph.edges()
if src in src_exprs and dest in dest_exprs])
added = []
scope = self.scope
for src, dest in to_remove:
if src.startswith('_pseudo_'):
pcomp = getattr(scope, src.split('.', 1)[0])
elif dest.startswith('_pseudo_'):
pcomp = getattr(scope, dest.split('.', 1)[0])
else:
continue
added.extend(pcomp.list_connections())
pcomps.add(pcomp.name)
to_remove.update(added)
graph.remove_edges_from(to_remove)
graph.remove_nodes_from(exprs)
self._remove_disconnected_exprs()
return to_remove, pcomps
def check_connect(self, src, dest, scope):
"""Check validity of connecting a source expression to a destination
expression, and determine if we need to create links to pseudocomps.
"""
if self.get_source(dest) is not None:
scope.raise_exception("'%s' is already connected to source '%s'" %
(dest, self.get_source(dest)), RuntimeError)
destexpr = ConnectedExprEvaluator(dest, scope, is_dest=True)
srcexpr = ConnectedExprEvaluator(src, scope,
getter='get_attr')
srccomps = srcexpr.get_referenced_compnames()
destcomps = list(destexpr.get_referenced_compnames())
if destcomps and destcomps[0] in srccomps:
raise RuntimeError("'%s' and '%s' refer to the same component."
% (src, dest))
return srcexpr, destexpr, self._needs_pseudo(scope, srcexpr, destexpr)
def _needs_pseudo(self, parent, srcexpr, destexpr):
"""Return a non-None pseudo_type if srcexpr and destexpr require a
pseudocomp to be created.
"""
srcrefs = list(srcexpr.refs())
if srcrefs and srcrefs[0] != srcexpr.text:
# expression is more than just a simple variable reference,
# so we need a pseudocomp
return 'multi_var_expr'
destmeta = destexpr.get_metadata('units')
srcmeta = srcexpr.get_metadata('units')
# compare using get_unit_name() to account for unit aliases
if srcmeta:
srcunit = srcmeta[0][1]
if srcunit:
srcunit = PhysicalQuantity(1., srcunit).unit
else:
srcunit = None
if destmeta:
destunit = destmeta[0][1]
if destunit:
destunit = PhysicalQuantity(1., destunit).unit
else:
destunit = None
if destunit and srcunit:
if destunit.powers != srcunit.powers or \
destunit.factor != srcunit.factor or \
destunit.offset != srcunit.offset:
return 'units'
return None
def list_pseudocomps(self):
return [data['pcomp'].name for u, v, data in
self._exprgraph.edges(data=True) if 'pcomp' in data]
|
|
"""Helper functions for the NAPALM base."""
import itertools
import logging
# std libs
import os
import re
import sys
from typing import Optional, Dict, Any, List, Union, Tuple, TypeVar, Callable
from collections.abc import Iterable
# third party libs
import jinja2
import textfsm
from lxml import etree
from netaddr import EUI
from netaddr import IPAddress
from netaddr import mac_unix
from netutils.config.parser import IOSConfigParser
try:
from ttp import quick_parse as ttp_quick_parse
TTP_INSTALLED = True
except ImportError:
TTP_INSTALLED = False
# local modules
import napalm.base.exceptions
from napalm.base import constants
from napalm.base.test.models import ConfigDict
from napalm.base.utils.jinja_filters import CustomJinjaFilters
from napalm.base.canonical_map import base_interfaces, reverse_mapping
T = TypeVar("T")
R = TypeVar("R")
# -------------------------------------------------------------------
# Functional Global
# -------------------------------------------------------------------
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------
# helper classes -- will not be exported
# -------------------------------------------------------------------
class _MACFormat(mac_unix):
pass
_MACFormat.word_fmt = "%.2X"
# -------------------------------------------------------------------
# callable helpers
# -------------------------------------------------------------------
def load_template(
cls: "napalm.base.NetworkDriver",
template_name: str,
template_source: Optional[str] = None,
template_path: Optional[str] = None,
openconfig: bool = False,
jinja_filters: Dict = {},
**template_vars: Any,
) -> None:
try:
search_path = []
if isinstance(template_source, str):
template = jinja2.Template(template_source)
else:
if template_path is not None:
if (
isinstance(template_path, str)
and os.path.isdir(template_path)
and os.path.isabs(template_path)
):
# append driver name at the end of the custom path
search_path.append(
os.path.join(template_path, cls.__module__.split(".")[-1])
)
else:
raise IOError(
"Template path does not exist: {}".format(template_path)
)
else:
# Search modules for template paths
for c in cls.__class__.mro():
if c is object:
continue
module = sys.modules[c.__module__].__file__
if module:
path = os.path.abspath(module)
else:
continue
if path:
path_to_append = os.path.dirname(path)
else:
continue
if path_to_append:
search_path.append(path_to_append)
if openconfig:
search_path = ["{}/oc_templates".format(s) for s in search_path]
else:
search_path = ["{}/templates".format(s) for s in search_path]
loader = jinja2.FileSystemLoader(search_path)
environment = jinja2.Environment(loader=loader)
for filter_name, filter_function in itertools.chain(
CustomJinjaFilters.filters().items(), jinja_filters.items()
):
environment.filters[filter_name] = filter_function
template = environment.get_template(
"{template_name}.j2".format(template_name=template_name)
)
configuration = template.render(**template_vars)
except jinja2.exceptions.TemplateNotFound:
raise napalm.base.exceptions.TemplateNotImplemented(
"Config template {template_name}.j2 not found in search path: {sp}".format(
template_name=template_name, sp=search_path
)
)
except (
jinja2.exceptions.UndefinedError,
jinja2.exceptions.TemplateSyntaxError,
) as jinjaerr:
raise napalm.base.exceptions.TemplateRenderException(
"Unable to render the Jinja config template {template_name}: {error}".format(
template_name=template_name, error=str(jinjaerr)
)
)
return cls.load_merge_candidate(config=configuration)
def netutils_parse_parents(
parent: str, child: str, config: Union[str, List[str]]
) -> List[str]:
"""
Use Netutils to find parent lines that contain a specific child line.
:param parent: The parent line to search for
:param child: The child line required under the given parent
:param config: The device running/startup config
"""
# Check if the config is a list, if it is a list, then join it to make a string.
if isinstance(config, list):
config = "\n".join(config)
config = config + "\n"
# Config tree is the entire configuration in a tree format,
# followed by getting the individual lines that has the formats:
# ConfigLine(config_line=' ip address 192.0.2.10 255.255.255.0',
# parents=('interface GigabitEthernet1',))
# ConfigLine(config_line='Current configuration : 1624 bytes', parents=())
config_tree = IOSConfigParser(str(config))
configuration_lines = config_tree.build_config_relationship()
# Return config is the list that will be returned
return_config = []
# Loop over each of the configuration lines
for line in configuration_lines:
# Loop over any line that has a parent line. If there are no parents for a line item then
# the parents is an empty tuple.
for parent_line in line.parents:
if (
child in line.config_line
and re.match(parent, parent_line) is not None
and parent_line not in return_config
):
return_config.append(parent_line)
return return_config
def netutils_parse_objects(
cfg_section: str, config: Union[str, List[str]]
) -> List[str]:
"""
Use Netutils to find and return a section of Cisco IOS config.
Similar to "show run | section <cfg_section>"
:param cfg_section: The section of the config to return eg. "router bgp"
:param config: The running/startup config of the device to parse
"""
# Check if the config is a list, if it is a list, then join it to make a string.
if isinstance(config, list):
config = "\n".join(config)
config = config + "\n"
# Config tree is the entire configuration in a tree format,
# followed by getting the individual lines that has the formats:
# ConfigLine(config_line=' ip address 192.0.2.10 255.255.255.0',
# parents=('interface GigabitEthernet1',))
# ConfigLine(config_line='Current configuration : 1624 bytes', parents=())
config_tree = IOSConfigParser(str(config))
lines = config_tree.build_config_relationship()
# Return config is the list that will be returned
return_config = []
for line in lines:
# The parent configuration is expected on the function that this is replacing,
# add the parent line to the base of the return_config
if cfg_section in line.config_line:
return_config.append(line.config_line)
# Check if the tuple is greater than 0
if len(line.parents) > 0:
# Check the eldest parent, if that is part of the config section, then append
# the current line being checked to it.
if cfg_section in line.parents[0]:
return_config.append(line.config_line)
return return_config
def regex_find_txt(pattern: str, text: str, default: str = "") -> Any:
""" ""
RegEx search for pattern in text. Will try to match the data type of the "default" value
or return the default value if no match is found.
This is to parse IOS config like below:
regex_find_txt(r"remote-as (65000)", "neighbor 10.0.0.1 remote-as 65000", default=0)
RETURNS: 65001
:param pattern: RegEx pattern to match on
:param text: String of text ot search for "pattern" in
:param default="": Default value and type to return on error
"""
text = str(text)
value = re.findall(pattern, text)
try:
if not value:
logger.error("No Regex match found for pattern: %s" % (str(pattern)))
raise Exception("No Regex match found for pattern: %s" % (str(pattern)))
if not isinstance(value, type(default)):
if isinstance(value, list) and len(value) == 1:
value = value[0]
value = type(default)(value) # type: ignore
except Exception as regexFindTxtErr01: # in case of any exception, returns default
logger.error(
'errorCode="regexFindTxtErr01" in napalm.base.helpers with systemMessage="%s"\
message="Error while attempting to find regex pattern, \
default to empty string"'
% (regexFindTxtErr01)
)
value = default # type: ignore
return value
def textfsm_extractor(
cls: "napalm.base.NetworkDriver", template_name: str, raw_text: str
) -> List[Dict]:
"""
Applies a TextFSM template over a raw text and return the matching table.
Main usage of this method will be to extract data form a non-structured output
from a network device and return the values in a table format.
:param cls: Instance of the driver class
:param template_name: Specifies the name of the template to be used
:param raw_text: Text output as the devices prompts on the CLI
:return: table-like list of entries
"""
textfsm_data = list()
fsm_handler = None
for c in cls.__class__.mro():
if c is object:
continue
module = sys.modules[c.__module__].__file__
if module:
current_dir = os.path.dirname(os.path.abspath(module))
else:
continue
template_dir_path = "{current_dir}/utils/textfsm_templates".format(
current_dir=current_dir
)
template_path = "{template_dir_path}/{template_name}.tpl".format(
template_dir_path=template_dir_path, template_name=template_name
)
try:
with open(template_path) as f:
fsm_handler = textfsm.TextFSM(f)
for obj in fsm_handler.ParseText(raw_text):
entry = {}
for index, entry_value in enumerate(obj):
entry[fsm_handler.header[index].lower()] = entry_value
textfsm_data.append(entry)
return textfsm_data
except IOError as textfsmExtractorErr01: # Template not present in this class
logger.error(
'errorCode="textfsmExtractorErr01" in napalm.base.helpers with systemMessage="%s"\
message="Error while attempting to apply a textfsm template to \
format the output returned from the device,\
continuing loop..."'
% (textfsmExtractorErr01)
)
continue # Continue up the MRO
except textfsm.TextFSMTemplateError as tfte:
logging.error(
"Wrong format of TextFSM template {template_name}: {error}".format(
template_name=template_name, error=str(tfte)
)
)
raise napalm.base.exceptions.TemplateRenderException(
"Wrong format of TextFSM template {template_name}: {error}".format(
template_name=template_name, error=str(tfte)
)
)
raise napalm.base.exceptions.TemplateNotImplemented(
"TextFSM template {template_name}.tpl is not defined under {path}".format(
template_name=template_name, path=template_dir_path
)
)
def ttp_parse(
cls: "napalm.base.NetworkDriver",
template: str,
raw_text: str,
structure: str = "flat_list",
) -> Union[None, List, Dict]:
"""
Applies a TTP template over a raw text and return the parsing results.
Main usage of this method will be to extract data form a non-structured output
from a network device and return parsed values.
:param cls: Instance of the driver class
:param template: Specifies the name or the content of the template to be used
:param raw_text: Text output as the devices prompts on the CLI
:param structure: Results structure to apply to parsing results
:return: parsing results structure
``template`` can be inline TTP template string, reference to TTP Templates
repository template in a form of ``ttp://path/to/template`` or name of template
file within ``{NAPALM_install_dir}/utils/ttp_templates/{template}.txt`` folder.
"""
if not TTP_INSTALLED:
msg = "\nTTP is not installed. Please PIP install ttp:\n" "pip install ttp\n"
raise napalm.base.exceptions.ModuleImportError(msg)
result = None
for c in cls.__class__.mro():
if c is object:
continue
module = sys.modules[c.__module__].__file__
if module:
current_dir = os.path.dirname(os.path.abspath(module))
else:
continue
template_dir_path = "{current_dir}/utils/ttp_templates".format(
current_dir=current_dir
)
# check if inline template given, use it as is
if "{{" in template and "}}" in template:
template = template
# check if template from ttp_templates repo, use it as is
elif template.startswith("ttp://"):
template = template
# default to using template in NAPALM folder
else:
template = "{template_dir_path}/{template}.txt".format(
template_dir_path=template_dir_path, template=template
)
if not os.path.exists(template):
msg = "Template '{template}' not found".format(template=template)
logging.error(msg)
raise napalm.base.exceptions.TemplateRenderException(msg)
# parse data
try:
result = ttp_quick_parse(
data=str(raw_text),
template=template,
result_kwargs={"structure": structure},
parse_kwargs={"one": True},
)
break
except Exception as e:
msg = "TTP template:\n'{template}'\nError: {error}".format(
template=template, error=e
)
logging.exception(e)
logging.error(msg)
raise napalm.base.exceptions.TemplateRenderException(msg)
return result
def find_txt(
xml_tree: etree._Element,
path: str,
default: str = "",
namespaces: Optional[Dict] = None,
) -> str:
"""
Extracts the text value from an XML tree, using XPath.
In case of error or text element unavailability, will return a default value.
:param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>.
:param path: XPath to be applied, in order to extract the desired data.
:param default: Value to be returned in case of error.
:param namespaces: prefix-namespace mappings to process XPath
:return: a str value.
"""
value = ""
try:
xpath_applied = xml_tree.xpath(
path, namespaces=namespaces
) # will consider the first match only
xpath_length = len(xpath_applied) # get a count of items in XML tree
if xpath_length and xpath_applied[0] is not None:
xpath_result = xpath_applied[0]
if isinstance(xpath_result, type(xml_tree)):
if xpath_result.text:
value = xpath_result.text.strip()
else:
value = default
else:
value = xpath_result
else:
if xpath_applied == "":
logger.debug(
"Unable to find the specified-text-element/XML path: %s in \
the XML tree provided. Total Items in XML tree: %d "
% (path, xpath_length)
)
except Exception as findTxtErr01: # in case of any exception, returns default
logger.error(findTxtErr01)
value = default
return str(value)
def convert(to: Callable[[T], R], who: Optional[T], default: Optional[R] = None) -> R:
"""
Converts data to a specific datatype.
In case of error, will return a default value.
:param to: datatype to be casted to.
:param who: value to cast.
:param default: default value to return in case of an error with the conversion function.
:return: the result of the cast or a default value.
"""
if default is None:
# Mypy is currently unable to resolve the Optional[R] correctly, therefore the following
# assignments to 'default' need a 'type: ignore' statement.
# Ref: https://github.com/python/mypy/issues/8708
if to in [str, ip, mac]:
default = "" # type: ignore
elif to in [float, int]:
default = 0 # type: ignore
elif to == bool:
default = False # type: ignore
elif to == list:
default = [] # type: ignore
else:
raise ValueError(
f"Can't convert with callable {to} - no default is defined for this type."
)
# This is safe because the None-case if handled above. This needs to be here because Mypy is
# unable to infer that 'default' is in fact not None based of the chained if-statements above.
assert default is not None
if who is None:
return default
try:
return to(who)
except: # noqa
return default
def mac(raw: str) -> str:
"""
Converts a raw string to a standardised MAC Address EUI Format.
:param raw: the raw string containing the value of the MAC Address
:return: a string with the MAC Address in EUI format
Example:
.. code-block:: python
>>> mac('0123.4567.89ab')
u'01:23:45:67:89:AB'
Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid
(with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros
Example
>>> mac('a9:c5:2e:7b:6:')
u'A9:C5:2E:7B:60:00'
If Cisco or other obscure vendors use their own standards, will throw an error and we can fix
later, however, still works with weird formats like:
>>> mac('123.4567.89ab')
u'01:23:45:67:89:AB'
>>> mac('23.4567.89ab')
u'00:23:45:67:89:AB'
"""
if raw.endswith(":"):
flat_raw = raw.replace(":", "")
raw = "{flat_raw}{zeros_stuffed}".format(
flat_raw=flat_raw, zeros_stuffed="0" * (12 - len(flat_raw))
)
return str(EUI(raw, dialect=_MACFormat))
def ip(addr: str, version: Optional[int] = None) -> str:
"""
Converts a raw string to a valid IP address. Optional version argument will detect that \
object matches specified version.
Motivation: the groups of the IP addreses may contain leading zeros. IPv6 addresses can \
contain sometimes uppercase characters. E.g.: 2001:0dB8:85a3:0000:0000:8A2e:0370:7334 has \
the same logical value as 2001:db8:85a3::8a2e:370:7334. However, their values as strings are \
not the same.
:param raw: the raw string containing the value of the IP Address
:param version: insist on a specific IP address version.
:type version: int, optional.
:return: a string containing the IP Address in a standard format (no leading zeros, \
zeros-grouping, lowercase)
Example:
.. code-block:: python
>>> ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334')
u'2001:db8:85a3::8a2e:370:7334'
"""
addr_obj = IPAddress(addr)
if version and addr_obj.version != version:
raise ValueError("{} is not an ipv{} address".format(addr, version))
return str(addr_obj)
def as_number(as_number_val: str) -> int:
"""Convert AS Number to standardized asplain notation as an integer."""
as_number_str = str(as_number_val)
if "." in as_number_str:
big, little = as_number_str.split(".")
return (int(big) << 16) + int(little)
else:
return int(as_number_str)
def split_interface(intf_name: str) -> Tuple[str, str]:
"""Split an interface name based on first digit, slash, or space match."""
head = intf_name.rstrip(r"/\0123456789. ")
tail = intf_name[len(head) :].lstrip()
return (head, tail)
def canonical_interface_name(
interface: str, addl_name_map: Optional[Dict[str, str]] = None
) -> str:
"""Function to return an interface's canonical name (fully expanded name).
Use of explicit matches used to indicate a clear understanding on any potential
match. Regex and other looser matching methods were not implmented to avoid false
positive matches. As an example, it would make sense to do "[P|p][O|o]" which would
incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not
easily troubleshot, found, or known.
:param interface: The interface you are attempting to expand.
:param addl_name_map: A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:type addl_name_map: optional
"""
name_map = {}
name_map.update(base_interfaces)
interface_type, interface_number = split_interface(interface)
if isinstance(addl_name_map, dict):
name_map.update(addl_name_map)
# check in dict for mapping
if name_map.get(interface_type):
long_int = name_map.get(interface_type)
assert isinstance(long_int, str)
return long_int + str(interface_number)
# if nothing matched, return the original name
else:
return interface
def abbreviated_interface_name(
interface: str,
addl_name_map: Optional[Dict[str, str]] = None,
addl_reverse_map: Optional[Dict[str, str]] = None,
) -> str:
"""Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map: A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:type addl_name_map: optional
:param addl_reverse_map: A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
:type addl_reverse_map: optional
"""
name_map = {}
name_map.update(base_interfaces)
interface_type, interface_number = split_interface(interface)
if isinstance(addl_name_map, dict):
name_map.update(addl_name_map)
rev_name_map = {}
rev_name_map.update(reverse_mapping)
if isinstance(addl_reverse_map, dict):
rev_name_map.update(addl_reverse_map)
# Try to ensure canonical type.
if name_map.get(interface_type):
canonical_type = name_map.get(interface_type)
else:
canonical_type = interface_type
assert isinstance(canonical_type, str)
try:
abbreviated_name = rev_name_map[canonical_type] + str(interface_number)
return abbreviated_name
except KeyError:
pass
# If abbreviated name lookup fails, return original name
return interface
def transform_lldp_capab(capabilities: Union[str, Any]) -> List[str]:
if capabilities and isinstance(capabilities, str):
capabilities = capabilities.strip().lower().split(",")
return sorted(
[constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities]
)
else:
return []
def generate_regex_or(filters: Iterable) -> str:
"""
Build a regular expression logical-or from a list/tuple of regex patterns.
This allows a single regular expression operation to be used in contexts when a loop
and multiple patterns would otherwise be necessary.
For example, (pattern1|pattern2|pattern3)
Return the pattern.
"""
if isinstance(filters, str) or not isinstance(filters, Iterable):
raise ValueError("filters argument must be an iterable, but can't be a string.")
return_pattern = r"("
for pattern in filters:
return_pattern += rf"{pattern}|"
return_pattern += r")"
return return_pattern
def sanitize_config(config: str, filters: Dict) -> str:
"""
Given a dictionary of filters, remove sensitive data from the provided config.
"""
for filter_, replace in filters.items():
config = re.sub(filter_, replace, config, flags=re.M)
return config
def sanitize_configs(configs: ConfigDict, filters: Dict) -> ConfigDict:
"""
Apply sanitize_config on the dictionary of configs typically returned by
the get_config method.
"""
for cfg_name, config in configs.items():
assert isinstance(config, str)
if config.strip():
configs[cfg_name] = sanitize_config(config, filters) # type: ignore
return configs
|
|
#!/usr/bin/env python
"""Functional tests for tvnamer tests
"""
import os
from functional_runner import run_tvnamer, verify_out_data
from helpers import attr
import pytest
@attr("functional")
def test_simple_single_file():
"""Test most simple usage
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_input = "1\ny\n")
expected_files = ['Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_simple_multiple_files():
"""Tests simple interactive usage with multiple files
"""
input_files = [
'scrubs.s01e01.hdtv.fake.avi',
'my.name.is.earl.s01e01.fake.avi',
'a.nonsensical.fake.show.s12e24.fake.avi',
'total.access.s01e01.avi']
expected_files = [
'Scrubs - [01x01] - My First Day.avi',
'My Name Is Earl - [01x01] - Pilot.avi',
'a nonsensical fake show - [12x24].avi',
'Total Access 24_7 - [01x01] - Episode #1.avi']
out_data = run_tvnamer(
with_files = input_files,
with_input = "y\n1\ny\n1\ny\n1\ny\ny\n")
verify_out_data(out_data, expected_files)
@attr("functional")
def test_simple_batch_functionality():
"""Tests renaming single files at a time, in batch mode
"""
tests = [
{'in':'scrubs.s01e01.hdtv.fake.avi',
'expected':'Scrubs - [01x01] - My First Day.avi'},
{'in':'my.name.is.earl.s01e01.fake.avi',
'expected':'My Name Is Earl - [01x01] - Pilot.avi'},
{'in':'a.fake.show.s12e24.fake.avi',
'expected':'a.fake.show.s12e24.fake.avi'},
{'in': 'total.access.s01e01.avi',
'expected': 'Total Access 24_7 - [01x01] - Episode #1.avi'},
]
for curtest in tests:
print("Expecting %r to turn into %r" % (
curtest['in'], curtest['expected']))
out_data = run_tvnamer(
with_files = [curtest['in'], ],
with_flags = ['--batch'],
)
verify_out_data(out_data, [curtest['expected'], ])
@attr("functional")
def test_interactive_always_option():
"""Tests the "a" always rename option in interactive UI
"""
input_files = [
'scrubs.s01e01.hdtv.fake.avi',
'my.name.is.earl.s01e01.fake.avi',
'a.nonsensical.fake.show.s12e24.fake.avi',
'total.access.s01e01.avi']
expected_files = [
'Scrubs - [01x01] - My First Day.avi',
'My Name Is Earl - [01x01] - Pilot.avi',
'a nonsensical fake show - [12x24].avi',
'Total Access 24_7 - [01x01] - Episode #1.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ["--selectfirst"],
with_input = "a\n")
verify_out_data(out_data, expected_files)
@attr("functional")
@pytest.mark.skipif(os.getenv("TRAVIS", "false")=="true", reason="Test fails for some reason on Travis-CI")
def test_unicode_in_inputname():
"""Tests parsing a file with unicode in the input filename
"""
import os, sys
if os.getenv("TRAVIS", "false") == "true" and sys.version_info[0:2] == (2.6):
from nose.plugins.skip import SkipTest
raise SkipTest("Ignoring test which triggers bizarre bug in nosetests, in python 2.6, only on travis.")
input_files = [
'The Big Bang Theory - S02E07 - The Panty Pin\u0303ata Polarization.avi']
expected_files = [
'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ["--batch"])
verify_out_data(out_data, expected_files)
@attr("functional")
def test_unicode_in_search_results():
"""Show with unicode in search results
"""
input_files = [
'psych.s04e11.avi']
expected_files = [
'Psych - [04x11] - Thrill Seekers and Hell-Raisers.avi']
out_data = run_tvnamer(
with_files = input_files,
with_input = '1\ny\n')
verify_out_data(out_data, expected_files)
@attr("functional")
def test_renaming_always_doesnt_overwrite():
"""If trying to rename a file that exists, should not create new file
"""
input_files = [
'Scrubs.s01e01.avi',
'Scrubs - [01x01] - My First Day.avi']
expected_files = [
'Scrubs.s01e01.avi',
'Scrubs - [01x01] - My First Day.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ['--batch'])
verify_out_data(out_data, expected_files)
@attr("functional")
@pytest.mark.skipif(os.getenv("TRAVIS", "false")=="true", reason="Test fails for some reason on Travis-CI")
@pytest.mark.skipif(os.getenv("CI", "false")=="true", reason="Test fails for some reason on GH Actions")
def test_not_overwritting_unicode_filename():
"""Test no error occurs when warning about a unicode filename being overwritten
"""
input_files = [
'The Big Bang Theory - S02E07.avi',
'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
expected_files = [
'The Big Bang Theory - S02E07.avi',
'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ['--batch'])
verify_out_data(out_data, expected_files)
@attr("functional")
def test_not_recursive():
"""Tests the nested files aren't found when not recursive
"""
input_files = [
'Scrubs.s01e01.avi',
'nested/subdir/Scrubs.s01e02.avi']
expected_files = [
'Scrubs - [01x01] - My First Day.avi',
'nested/subdir/Scrubs.s01e02.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ['--not-recursive', '--batch'],
run_on_directory = True)
verify_out_data(out_data, expected_files)
@attr("functional")
def test_correct_filename():
"""If the filename is already correct, don't prompt
"""
out_data = run_tvnamer(
with_files = ['Scrubs - [01x01] - My First Day.avi'],
with_input = "1\ny\n")
expected_files = ['Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_filename_already_exists():
"""Don't overwrite
"""
out_data = run_tvnamer(
with_files = ['Scrubs - [01x01] - My First Day.avi', 'Scrubs.s01e01.avi'],
with_input = "1\ny\n")
expected_files = ['Scrubs - [01x01] - My First Day.avi', 'Scrubs.s01e01.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_no_seasonnumber():
"""Test episode with no series number
"""
out_data = run_tvnamer(
with_files = ['scrubs.e01.avi'],
with_flags = ['--batch'])
expected_files = ['Scrubs - [01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_skipping_after_replacements():
"""When custom-replacement is specified, should still skip file if name is correct
"""
conf = """
{"select_first": true,
"input_filename_replacements": [
{"is_regex": false,
"match": "v",
"replacement": "u"}
],
"output_filename_replacements": [
{"is_regex": false,
"match": "u",
"replacement": "v"}
]
}
"""
out_data = run_tvnamer(
with_files = ['Scrvbs - [01x01] - My First Day.avi'],
with_config = conf,
with_input = "")
expected_files = ['Scrvbs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_dvd_order():
"""Tests TvDB dvd order naming
"""
input_files = [
'batman the animated series s01e01.xvid']
expected_files = [
'Batman - The Animated Series - [01x01] - On Leather Wings.xvid']
conf = r"""
{
"output_filename_replacements": [
{"is_regex": true,
"match": ": ",
"replacement": " - "}
]
}
"""
out_data = run_tvnamer(
with_files = input_files,
with_flags = ["--order", 'dvd'],
with_input = "1\ny\n",
with_config = conf)
verify_out_data(out_data, expected_files)
@attr("functional")
def test_show_version():
"""Tests the --version arg
"""
input_files = ['scrubs.s01e01.avi']
# Shouldn't touch files
expected_files = ['scrubs.s01e01.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ["--version"])
print(out_data['output'])
import tvnamer
assert "%s" % (tvnamer.__version__,) in out_data['output']
|
|
import datetime
import decimal
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes a SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def value_to_db_ipaddress(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [first, second]
def get_db_converters(self, expression):
"""Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for coverter functions.
"""
return []
def convert_durationfield_value(self, value, expression, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
|
|
# -*- test-case-name: twisted.test.test_unix,twisted.internet.test.test_unix,twisted.internet.test.test_posixbase -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
# System imports
import os, sys, stat, socket, struct
from errno import EINTR, EMSGSIZE, EAGAIN, EWOULDBLOCK, ECONNREFUSED, ENOBUFS
from zope.interface import implements, implementsOnly, implementedBy
if not hasattr(socket, 'AF_UNIX'):
raise ImportError("UNIX sockets not supported on this platform")
# Twisted imports
from twisted.internet import main, base, tcp, udp, error, interfaces, protocol, address
from twisted.internet.error import CannotListenError
from twisted.python.util import untilConcludes
from twisted.python import lockfile, log, reflect, failure
try:
from twisted.python import sendmsg
except ImportError:
sendmsg = None
def _ancillaryDescriptor(fd):
"""
Pack an integer into an ancillary data structure suitable for use with
L{sendmsg.send1msg}.
"""
packed = struct.pack("i", fd)
return [(socket.SOL_SOCKET, sendmsg.SCM_RIGHTS, packed)]
class _SendmsgMixin(object):
"""
Mixin for stream-oriented UNIX transports which uses sendmsg and recvmsg to
offer additional functionality, such as copying file descriptors into other
processes.
@ivar _writeSomeDataBase: The class which provides the basic implementation
of C{writeSomeData}. Ultimately this should be a subclass of
L{twisted.internet.abstract.FileDescriptor}. Subclasses which mix in
L{_SendmsgMixin} must define this.
@ivar _sendmsgQueue: A C{list} of C{int} holding file descriptors which are
currently buffered before being sent.
@ivar _fileDescriptorBufferSize: An C{int} giving the maximum number of file
descriptors to accept and queue for sending before pausing the
registered producer, if there is one.
"""
implements(interfaces.IUNIXTransport)
_writeSomeDataBase = None
_fileDescriptorBufferSize = 64
def __init__(self):
self._sendmsgQueue = []
def _isSendBufferFull(self):
"""
Determine whether the user-space send buffer for this transport is full
or not.
This extends the base determination by adding consideration of how many
file descriptors need to be sent using L{sendmsg.send1msg}. When there
are more than C{self._fileDescriptorBufferSize}, the buffer is
considered full.
@return: C{True} if it is full, C{False} otherwise.
"""
# There must be some bytes in the normal send buffer, checked by
# _writeSomeDataBase._isSendBufferFull, in order to send file
# descriptors from _sendmsgQueue. That means that the buffer will
# eventually be considered full even without this additional logic.
# However, since we send only one byte per file descriptor, having lots
# of elements in _sendmsgQueue incurs more overhead and perhaps slows
# things down. Anyway, try this for now, maybe rethink it later.
return (
len(self._sendmsgQueue) > self._fileDescriptorBufferSize
or self._writeSomeDataBase._isSendBufferFull(self))
def sendFileDescriptor(self, fileno):
"""
Queue the given file descriptor to be sent and start trying to send it.
"""
self._sendmsgQueue.append(fileno)
self._maybePauseProducer()
self.startWriting()
def writeSomeData(self, data):
"""
Send as much of C{data} as possible. Also send any pending file
descriptors.
"""
# Make it a programming error to send more file descriptors than you
# send regular bytes. Otherwise, due to the limitation mentioned below,
# we could end up with file descriptors left, but no bytes to send with
# them, therefore no way to send those file descriptors.
if len(self._sendmsgQueue) > len(data):
return error.FileDescriptorOverrun()
# If there are file descriptors to send, try sending them first, using a
# little bit of data from the stream-oriented write buffer too. It is
# not possible to send a file descriptor without sending some regular
# data.
index = 0
try:
while index < len(self._sendmsgQueue):
fd = self._sendmsgQueue[index]
try:
untilConcludes(
sendmsg.send1msg, self.socket.fileno(), data[index], 0,
_ancillaryDescriptor(fd))
except socket.error, se:
if se.args[0] in (EWOULDBLOCK, ENOBUFS):
return index
else:
return main.CONNECTION_LOST
else:
index += 1
finally:
del self._sendmsgQueue[:index]
# Hand the remaining data to the base implementation. Avoid slicing in
# favor of a buffer, in case that happens to be any faster.
limitedData = buffer(data, index)
result = self._writeSomeDataBase.writeSomeData(self, limitedData)
try:
return index + result
except TypeError:
return result
def doRead(self):
"""
Calls L{IFileDescriptorReceiver.fileDescriptorReceived} and
L{IProtocol.dataReceived} with all available data.
This reads up to C{self.bufferSize} bytes of data from its socket, then
dispatches the data to protocol callbacks to be handled. If the
connection is not lost through an error in the underlying recvmsg(),
this function will return the result of the dataReceived call.
"""
try:
data, flags, ancillary = untilConcludes(
sendmsg.recv1msg, self.socket.fileno(), 0, self.bufferSize)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
if ancillary:
fd = struct.unpack('i', ancillary[0][2])[0]
if interfaces.IFileDescriptorReceiver.providedBy(self.protocol):
self.protocol.fileDescriptorReceived(fd)
else:
log.msg(
format=(
"%(protocolName)s (on %(hostAddress)r) does not "
"provide IFileDescriptorReceiver; closing file "
"descriptor received (from %(peerAddress)r)."),
hostAddress=self.getHost(), peerAddress=self.getPeer(),
protocolName=self._getLogPrefix(self.protocol),
)
os.close(fd)
return self._dataReceived(data)
class _UnsuportedSendmsgMixin(object):
"""
Behaviorless placeholder used when L{twisted.python.sendmsg} is not
available, preventing L{IUNIXTransport} from being supported.
"""
if sendmsg:
_SendmsgMixin = _SendmsgMixin
else:
_SendmsgMixin = _UnsuportedSendmsgMixin
class Server(_SendmsgMixin, tcp.Server):
_writeSomeDataBase = tcp.Server
def __init__(self, sock, protocol, client, server, sessionno, reactor):
_SendmsgMixin.__init__(self)
tcp.Server.__init__(self, sock, protocol, (client, None), server, sessionno, reactor)
def getHost(self):
return address.UNIXAddress(self.socket.getsockname())
def getPeer(self):
return address.UNIXAddress(self.hostname or None)
def _inFilesystemNamespace(path):
"""
Determine whether the given unix socket path is in a filesystem namespace.
While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and
above support PF_UNIX sockets in an "abstract namespace" that does not
correspond to any path. This function returns C{True} if the given socket
path is stored in the filesystem and C{False} if the path is in this
abstract namespace.
"""
return path[:1] != "\0"
class _UNIXPort(object):
def getHost(self):
"""Returns a UNIXAddress.
This indicates the server's address.
"""
if sys.version_info > (2, 5) or _inFilesystemNamespace(self.port):
path = self.socket.getsockname()
else:
# Abstract namespace sockets aren't well supported on Python 2.4.
# getsockname() always returns ''.
path = self.port
return address.UNIXAddress(path)
class Port(_UNIXPort, tcp.Port):
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
transport = Server
lockFile = None
def __init__(self, fileName, factory, backlog=50, mode=0666, reactor=None, wantPID = 0):
tcp.Port.__init__(self, fileName, factory, backlog, reactor=reactor)
self.mode = mode
self.wantPID = wantPID
def __repr__(self):
factoryName = reflect.qual(self.factory.__class__)
if hasattr(self, 'socket'):
return '<%s on %r>' % (factoryName, self.port)
else:
return '<%s (not listening)>' % (factoryName,)
def _buildAddr(self, name):
return address.UNIXAddress(name)
def startListening(self):
"""
Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
log.msg("%s starting on %r" % (
self._getLogPrefix(self.factory), self.port))
if self.wantPID:
self.lockFile = lockfile.FilesystemLock(self.port + ".lock")
if not self.lockFile.lock():
raise CannotListenError, (None, self.port, "Cannot acquire lock")
else:
if not self.lockFile.clean:
try:
# This is a best-attempt at cleaning up
# left-over unix sockets on the filesystem.
# If it fails, there's not much else we can
# do. The bind() below will fail with an
# exception that actually propagates.
if stat.S_ISSOCK(os.stat(self.port).st_mode):
os.remove(self.port)
except:
pass
self.factory.doStart()
try:
skt = self.createInternetSocket()
skt.bind(self.port)
except socket.error, le:
raise CannotListenError, (None, self.port, le)
else:
if _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _logConnectionLostMsg(self):
"""
Log message for closing socket
"""
log.msg('(UNIX Port %s Closed)' % (repr(self.port),))
def connectionLost(self, reason):
if _inFilesystemNamespace(self.port):
os.unlink(self.port)
if self.lockFile is not None:
self.lockFile.unlock()
tcp.Port.connectionLost(self, reason)
class Client(_SendmsgMixin, tcp.BaseClient):
"""A client for Unix sockets."""
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
_writeSomeDataBase = tcp.BaseClient
def __init__(self, filename, connector, reactor=None, checkPID = 0):
_SendmsgMixin.__init__(self)
self.connector = connector
self.realAddress = self.addr = filename
if checkPID and not lockfile.isLocked(filename + ".lock"):
self._finishInit(None, None, error.BadFileError(filename), reactor)
self._finishInit(self.doConnect, self.createInternetSocket(),
None, reactor)
def getPeer(self):
return address.UNIXAddress(self.addr)
def getHost(self):
return address.UNIXAddress(None)
class Connector(base.BaseConnector):
def __init__(self, address, factory, timeout, reactor, checkPID):
base.BaseConnector.__init__(self, factory, timeout, reactor)
self.address = address
self.checkPID = checkPID
def _makeTransport(self):
return Client(self.address, self, self.reactor, self.checkPID)
def getDestination(self):
return address.UNIXAddress(self.address)
class DatagramPort(_UNIXPort, udp.Port):
"""Datagram UNIX port, listening for packets."""
implements(interfaces.IUNIXDatagramTransport)
addressFamily = socket.AF_UNIX
def __init__(self, addr, proto, maxPacketSize=8192, mode=0666, reactor=None):
"""Initialize with address to listen on.
"""
udp.Port.__init__(self, addr, proto, maxPacketSize=maxPacketSize, reactor=reactor)
self.mode = mode
def __repr__(self):
protocolName = reflect.qual(self.protocol.__class__,)
if hasattr(self, 'socket'):
return '<%s on %r>' % (protocolName, self.port)
else:
return '<%s (not listening)>' % (protocolName,)
def _bindSocket(self):
log.msg("%s starting on %s"%(self.protocol.__class__, repr(self.port)))
try:
skt = self.createInternetSocket() # XXX: haha misnamed method
if self.port:
skt.bind(self.port)
except socket.error, le:
raise error.CannotListenError, (None, self.port, le)
if self.port and _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
def write(self, datagram, address):
"""Write a datagram."""
try:
return self.socket.sendto(datagram, address)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(datagram, address)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def connectionLost(self, reason=None):
"""Cleans up my socket.
"""
log.msg('(Port %s Closed)' % repr(self.port))
base.BasePort.connectionLost(self, reason)
if hasattr(self, "protocol"):
# we won't have attribute in ConnectedPort, in cases
# where there was an error in connection process
self.protocol.doStop()
self.connected = 0
self.socket.close()
del self.socket
del self.fileno
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
class ConnectedDatagramPort(DatagramPort):
"""
A connected datagram UNIX socket.
"""
implementsOnly(interfaces.IUNIXDatagramConnectedTransport,
*(implementedBy(base.BasePort)))
def __init__(self, addr, proto, maxPacketSize=8192, mode=0666,
bindAddress=None, reactor=None):
assert isinstance(proto, protocol.ConnectedDatagramProtocol)
DatagramPort.__init__(self, bindAddress, proto, maxPacketSize, mode,
reactor)
self.remoteaddr = addr
def startListening(self):
try:
self._bindSocket()
self.socket.connect(self.remoteaddr)
self._connectToProtocol()
except:
self.connectionFailed(failure.Failure())
def connectionFailed(self, reason):
"""
Called when a connection fails. Stop listening on the socket.
@type reason: L{Failure}
@param reason: Why the connection failed.
"""
self.stopListening()
self.protocol.connectionFailed(reason)
del self.protocol
def doRead(self):
"""
Called when my socket is ready for reading.
"""
read = 0
while read < self.maxThroughput:
try:
data, addr = self.socket.recvfrom(self.maxPacketSize)
read += len(data)
self.protocol.datagramReceived(data)
except socket.error, se:
no = se.args[0]
if no in (EAGAIN, EINTR, EWOULDBLOCK):
return
if no == ECONNREFUSED:
self.protocol.connectionRefused()
else:
raise
except:
log.deferr()
def write(self, data):
"""
Write a datagram.
"""
try:
return self.socket.send(data)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(data)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == ECONNREFUSED:
self.protocol.connectionRefused()
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def getPeer(self):
return address.UNIXAddress(self.remoteaddr)
|
|
from collections import defaultdict
from fcntl import LOCK_EX, LOCK_SH, LOCK_UN, flock
from functools import wraps
from math import floor
from os import fsync, SEEK_SET, SEEK_END
from time import time
import json
def accessor(f):
@wraps(f)
def wrapper(*args, **kwargs):
args[0]._refresh()
return f(*args, **kwargs)
return wrapper
def mutator(fn):
@wraps(fn)
def logger(self, *args, **kwargs):
self._log(fn.__name__, args, kwargs)
logger.replay = lambda s, a, k: fn(s, *a, **k)
return logger
class DB:
"""\
A database implementation that sits on top of a write-ahead log and an
in-memory cache. Mutations to the database are written to the log and
all reads first replay any new log entries to make sure the cache is
up to date with any changes made by this or other processes. We also
write out the cache to disk to avoid having to replay the whole log at
startup.
"""
def __init__(self, name):
self.file = name + '.data'
self.log = Log(name + '.log')
try:
self._load()
except:
self.low_water_mark = 0
self.cache = self.empty_cache()
def empty_cache(self):
"Return an empty cache for a new database."
pass
def fill_cache(self, data):
"Return a in-memory cache representing the data loaded from disk."e
pass
def cache_to_json(self):
"Convert the cache to the form we want to serialize as JSON to disk."
return self.cache
def _replay(self, record):
"Replay a log entry to reflect it in our in-memory cache."
entry = json.loads(record)
getattr(self.__class__, entry['name']).replay(self, entry['args'], entry['kwargs'])
def _log(self, name, args, kwargs):
"Log data to our transaction log."
entry = {'name': name, 'args': args, 'kwargs': kwargs}
self.log.write(json.dumps(entry))
def _refresh(self):
"Replay any new log entries against our in-memory cache."
new_lwm = self.low_water_mark
for (entry, lsn) in self.log.read(self.low_water_mark):
self._replay(entry)
new_lwm = lsn
if self.low_water_mark < new_lwm:
self.low_water_mark = new_lwm
self._save()
def _load(self):
"Load cached data from disk so we don't have to replay the whole log."
with open(self.file) as f:
flock(f, LOCK_EX)
data = json.load(f)
self.cache = self.fill_cache(data['cache'])
self.low_water_mark = data['low_water_mark']
flock(f, LOCK_UN)
def _save(self):
# We could check that our low water mark is greater than the
# one already on disk before we write since it's possible that
# someone else has read farther in the log than us and gotten
# in and written out their cache to disk. But it doesn't
# really matter since we never actually read from the on-disk
# cache except at startup. Rolling the cache back in time will,
# at worst, make some processes have to replay a few more log
# records than they might have otherwise.
with open(self.file, 'w') as f:
flock(f, LOCK_EX)
json.dump({
'cache': self.cache_to_json(),
'low_water_mark': self.low_water_mark
}, f, sort_keys=True, indent=2)
flock(f, LOCK_UN)
class LinkDB (DB):
"Database of link shortcuts."
def empty_cache(self):
return defaultdict(list)
def fill_cache(self, data):
return defaultdict(list, data)
# Accessors -- must check for new entries in log.
@accessor
def has_name(self, name):
return name in self.cache
@accessor
def get_patterns(self, name):
return [(n, p) for n, p in enumerate(self.cache[name]) if p is not None]
@accessor
def has_pattern(self, name, n):
return n < len(self.cache[name]) and self.cache[name][n] is not None
@accessor
def get_pattern(self, name, n):
return self.cache[name][n]
@accessor
def names(self):
return self.cache.keys()
# Mutators
@mutator
def delete_name(self, name):
del self.cache[name]
@mutator
def delete_pattern(self, name, n):
expand(self.cache[name], n)
self.cache[name][n] = None
shrink(self.cache[name])
@mutator
def set_pattern(self, name, n, pattern):
expand(self.cache[name], n)
self.cache[name][n] = pattern
class NonceDB (DB):
"Database of nonces we've seen."
def empty_cache(self):
return defaultdict(set)
def fill_cache(self, data):
return defaultdict(set, { k:set(v) for k, v in data.items() })
def cache_to_json(self):
return { k:list(v) for k, v in self.cache.items() }
def timekey(self, t):
return str(300 + ((floor(t) // 300) * 300))
# Accessors
@accessor
def used(self, t, nonce):
# Time recorded in nonce goes to a particular bucket. If the
# bucket is the current bucket but it doesn't contain the
# nonce, then we haven't seen it. If it's any other bucket
# then we consider it to have been seen.
current = self.timekey(time())
expired = self.timekey(t) != current
seen = expired or nonce in self.cache[current]
if not seen: self.add_nonce(t, nonce)
# While we're here, expire old nonces.
for k in self.cache.keys():
if k != current:
self.delete_chunk(k)
return seen
# Mutators
@mutator
def add_nonce(self, t, nonce):
self.cache[self.timekey(t)].add(nonce)
@mutator
def delete_chunk(self, chunk):
del self.cache[chunk]
class Log:
"Simple write-ahead log. Records each record as a line."
def __init__(self, file):
self.file = file
def write(self, data):
with open(self.file, mode='a') as f:
flock(f, LOCK_EX)
f.seek(0, SEEK_END)
print(data, file=f)
f.flush()
fsync(f.fileno())
flock(f, LOCK_UN)
return f.tell()
def read(self, low_water_mark):
try:
with open(self.file, mode='r') as f:
flock(f, LOCK_SH)
f.seek(low_water_mark, SEEK_SET)
while True:
line = f.readline()
pos = f.tell()
if line == '':
break
yield line[:-1], pos
flock(f, LOCK_UN)
except:
yield from []
#
# Utilities
#
def expand(list, size):
list += [None] * (1 + (size - len(list)))
def shrink(list):
while list and list[-1] is None: list.pop()
|
|
import datetime
import functools
import json
import operator
import re
import requests
from django.conf import settings
from django.contrib import auth
from django.core import signing
from django.db import transaction
from django.db.models import Q, F
from django.http import Http404, HttpResponseForbidden, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from password_reset.views import Recover
from tagging.models import Tag, TaggedItem
from tagging.utils import calculate_cloud, get_tag
from . import utils
from .constants import (MACHINETAGS_FROM_FIELDS, IMPROVIDERS_DICT,
SERVICES_DICT)
from .forms import (SkillsForm, SignupForm, PortfolioForm, BioForm,
LocationForm, FindingForm, AccountForm, PasswordForm,
DeletionRequestForm, AccountDeletionForm)
from .models import DjangoPerson, Country, User, Region, PortfolioSite
from ..django_openidauth.models import associate_openid, UserOpenID
from ..machinetags.utils import tagdict
from ..machinetags.models import MachineTaggedItem
NOTALPHA_RE = re.compile('[^a-zA-Z0-9]')
@utils.simple_decorator
def must_be_owner(view):
def inner(request, *args, **kwargs):
if 'username' in kwargs:
if (not request.user or request.user.is_anonymous or
request.user.username != kwargs['username']):
return HttpResponseForbidden('Not allowed')
else:
if (
not request.user or
request.user.is_anonymous or
request.user.username != args[0]
):
return HttpResponseForbidden('Not allowed')
return view(request, *args, **kwargs)
return inner
class IndexView(generic.TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
people = DjangoPerson.objects.all().select_related()
people = people.order_by('-id')[:100]
ctx = super().get_context_data(**kwargs)
ctx.update({
'people_list': people,
'people_list_limited': people[:4],
'total_people': DjangoPerson.objects.count(),
'countries': Country.objects.top_countries(),
'home': True,
})
return ctx
index = IndexView.as_view()
class AboutView(generic.TemplateView):
template_name = 'about.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'total_people': DjangoPerson.objects.count(),
'countries': Country.objects.top_countries(),
})
return ctx
about = AboutView.as_view()
class RecentView(generic.TemplateView):
template_name = 'recent.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
people = DjangoPerson.objects.all().select_related()
ctx.update({
'people': people.order_by('-auth_user.date_joined')[:50],
})
return ctx
recent = RecentView.as_view()
def redirect_to_logged_in_user_profile(request):
if request.user.is_authenticated:
url = reverse('user_profile', kwargs={'username': request.user})
else:
url = reverse('index')
return redirect(url)
def logout(request):
auth.logout(request)
request.session['openids'] = []
return redirect(reverse('index'))
class RecoverView(Recover):
search_fields = ['username']
recover = RecoverView.as_view()
class OpenIDWhatNext(generic.RedirectView):
"""
If user is already logged in, send them to /openid/associations/
Otherwise, send them to the signup page
"""
permanent = False
def get_redirect_url(self):
if not self.request.openid:
return reverse('index')
if self.request.user.is_anonymous:
# Have they logged in with an OpenID that matches an account?
try:
user_openid = UserOpenID.objects.get(
openid=str(self.request.openid),
)
except UserOpenID.DoesNotExist:
return reverse('signup')
# Log the user in
user = user_openid.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(self.request, user)
return reverse('user_profile', args=[user.username])
return reverse('openid_associations')
openid_whatnext = OpenIDWhatNext.as_view()
class SignupView(generic.FormView):
form_class = SignupForm
template_name = 'signup.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return redirect(reverse('index'))
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
creation_args = {
'username': form.cleaned_data['username'],
'email': form.cleaned_data['email'],
}
user = User.objects.create(**creation_args)
if form.cleaned_data.get('password1'):
user.set_password(form.cleaned_data['password1'])
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.save()
if self.request.openid:
associate_openid(user, str(self.request.openid))
region = None
if form.cleaned_data['region']:
region = Region.objects.get(
country__iso_code=form.cleaned_data['country'],
code=form.cleaned_data['region'],
)
# Now create the DjangoPerson
person = DjangoPerson.objects.create(
user=user,
bio=form.cleaned_data['bio'],
country=Country.objects.get(
iso_code=form.cleaned_data['country'],
),
region=region,
latitude=form.cleaned_data['latitude'],
longitude=form.cleaned_data['longitude'],
location_description=form.cleaned_data['location_description'],
)
# Set up the various machine tags
for fieldname, (namespace,
predicate) in MACHINETAGS_FROM_FIELDS.items():
if (
fieldname in form.cleaned_data and
form.cleaned_data[fieldname].strip()
):
value = form.cleaned_data[fieldname].strip()
person.add_machinetag(namespace, predicate, value)
# Finally, set their skill tags
person.skilltags = form.cleaned_data['skilltags']
# Log them in and redirect to their profile page
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(self.request, user)
self.person = person
return super().form_valid(form)
def get_success_url(self):
return self.person.get_absolute_url()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.openid:
kwargs['openid'] = self.request.openid
return kwargs
def get_initial(self):
initial = super().get_initial()
if self.request.openid and self.request.openid.sreg:
sreg = self.request.openid.sreg
first_name = ''
last_name = ''
username = ''
if sreg.get('fullname'):
bits = sreg['fullname'].split()
first_name = bits[0]
if len(bits) > 1:
last_name = ' '.join(bits[1:])
# Find a not-taken username
if sreg.get('nickname'):
username = derive_username(sreg['nickname'])
initial.update({
'first_name': first_name,
'last_name': last_name,
'email': sreg.get('email', ''),
'username': username,
})
return initial
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'openid': self.request.openid,
})
return ctx
signup = SignupView.as_view()
signup = transaction.atomic(signup)
def derive_username(nickname):
nickname = NOTALPHA_RE.sub('', nickname)
if not nickname:
return ''
base_nickname = nickname
to_add = 1
while True:
try:
DjangoPerson.objects.get(user__username=nickname)
except DjangoPerson.DoesNotExist:
break
nickname = base_nickname + str(to_add)
to_add += 1
return nickname
class CleverPaginator(object):
"""
A paginator that triggers pagination only if the 2nd page is
worth displaying.
"""
paginate_by = 100
def get_count(self):
raise NotImplementedError
def get_paginate_by(self, queryset):
count = self.get_count()
if count > self.paginate_by * 1.5:
return self.paginate_by
return count
class CountryView(CleverPaginator, generic.ListView):
template_name = 'country.html'
context_object_name = 'people_list'
def get_queryset(self):
self.country = get_object_or_404(
Country,
iso_code=self.kwargs['country_code'].upper()
)
self.all_people = self.country.djangoperson_set.select_related(
'country', 'user'
).order_by('user__first_name', 'user__last_name')
return self.all_people
def get_count(self):
return self.country.num_people
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'regions': self.country.top_regions(),
'country': self.country,
'people_list': self.all_people,
})
return context
country = CountryView.as_view()
class RegionView(CleverPaginator, generic.ListView):
template_name = 'country.html'
def get_queryset(self):
self.region = get_object_or_404(
Region,
country__iso_code=self.kwargs['country_code'].upper(),
code=self.kwargs['region_code'].upper(),
)
self.all_people = self.region.djangoperson_set.select_related(
'user', 'country',
).order_by('user__first_name', 'user__last_name')
return self.all_people
def get_count(self):
return self.region.num_people
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.region,
'people_list': self.all_people,
})
return context
region = RegionView.as_view()
class CountrySitesView(generic.ListView):
context_object_name = 'sites'
template_name = 'country_sites.html'
def get_queryset(self):
self.country = get_object_or_404(
Country, iso_code=self.kwargs['country_code'].upper(),
)
return PortfolioSite.objects.select_related().filter(
contributor__country=self.country,
).order_by('contributor')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.country,
})
return context
country_sites = CountrySitesView.as_view()
class ProfileView(generic.DetailView):
context_object_name = 'person'
template_name = 'profile.html'
def get_object(self):
person = get_object_or_404(DjangoPerson,
user__username=self.kwargs['username'])
DjangoPerson.objects.filter(pk=person.pk).update(
profile_views=F('profile_views') + 1,
)
return person
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
mtags = tagdict(self.object.machinetags.all())
# Set up convenient iterables for IM and services
ims = []
for key, value in mtags.get('im', {}).items():
shortname, name, icon = IMPROVIDERS_DICT.get(key, ('', '', ''))
if not shortname:
continue # Bad machinetag
ims.append({
'shortname': shortname,
'name': name,
'value': value,
})
ims.sort(key=lambda x: x['shortname'])
services = []
for key, value in mtags.get('services', {}).items():
shortname, name, icon = SERVICES_DICT.get(key, ('', '', ''))
if not shortname:
continue # Bad machinetag
services.append({
'shortname': shortname,
'name': name,
'value': value,
})
services.sort(key=lambda x: x['shortname'])
# Set up vars that control privacy stuff
privacy = {
'show_im': (
mtags['privacy']['im'] == 'public' or
not self.request.user.is_anonymous
),
'show_email': (
mtags['privacy']['email'] == 'public' or
(not self.request.user.is_anonymous and
mtags['privacy']['email'] == 'private')
),
'hide_from_search': mtags['privacy']['search'] != 'public',
'show_last_irc_activity': bool(self.object.last_active_on_irc and
self.object.irc_tracking_allowed()),
}
# Should we show the 'Finding X' section at all?
show_finding = (services or privacy['show_email'] or
(privacy['show_im'] and ims))
context.update({
'is_owner': self.request.user.username == self.kwargs['username'],
'skills_form': SkillsForm(instance=self.object),
'mtags': mtags,
'ims': ims,
'services': services,
'privacy': privacy,
'show_finding': show_finding,
'people_list': self.object.get_nearest(),
})
return context
profile = ProfileView.as_view()
class DjangoPersonEditViewBase(generic.UpdateView):
def get_object(self):
return get_object_or_404(DjangoPerson,
user__username=self.kwargs['username'])
def get_success_url(self):
return reverse('user_profile', args=[self.kwargs['username']])
class EditFindingView(DjangoPersonEditViewBase):
form_class = FindingForm
template_name = 'edit_finding.html'
def get_initial(self):
mtags = tagdict(self.object.machinetags.all())
initial = {
'email': self.object.user.email,
'first_name': self.object.user.first_name,
'last_name': self.object.user.last_name,
}
# Fill in other initial fields from machinetags
for fieldname, (namespace, predicate) in \
MACHINETAGS_FROM_FIELDS.items():
initial[fieldname] = mtags[namespace][predicate]
return initial
edit_finding = must_be_owner(EditFindingView.as_view())
class EditPortfolioView(DjangoPersonEditViewBase):
form_class = PortfolioForm
template_name = 'edit_portfolio.html'
edit_portfolio = must_be_owner(EditPortfolioView.as_view())
class EditAccountView(DjangoPersonEditViewBase):
form_class = AccountForm
template_name = 'edit_account.html'
edit_account = must_be_owner(EditAccountView.as_view())
class EditSkillsView(DjangoPersonEditViewBase):
form_class = SkillsForm
template_name = 'edit_skills.html'
edit_skills = must_be_owner(EditSkillsView.as_view())
class EditPassword(generic.UpdateView):
form_class = PasswordForm
template_name = 'edit_password.html'
def get_object(self):
return get_object_or_404(User, username=self.kwargs['username'])
def get_success_url(self):
return reverse('user_profile', args=[self.kwargs['username']])
edit_password = must_be_owner(EditPassword.as_view())
class EditBioView(DjangoPersonEditViewBase):
form_class = BioForm
template_name = 'edit_bio.html'
edit_bio = must_be_owner(EditBioView.as_view())
class EditLocationView(DjangoPersonEditViewBase):
form_class = LocationForm
template_name = 'edit_location.html'
def get_initial(self):
initial = super().get_initial()
initial.update({
'country': self.object.country.iso_code,
})
return initial
edit_location = must_be_owner(EditLocationView.as_view())
class SkillCloudView(generic.TemplateView):
template_name = 'skills.html'
def get_context_data(self, **kwargs):
tags = DjangoPerson.skilltags.cloud(steps=5)
calculate_cloud(tags, 5)
context = super().get_context_data(**kwargs)
context.update({
'tags': tags,
})
return context
skill_cloud = SkillCloudView.as_view()
class CountrySkillCloudView(generic.DetailView):
context_object_name = 'country'
template_name = 'skills.html'
def get_object(self):
return get_object_or_404(Country,
iso_code=self.kwargs['country_code'].upper())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tags = Tag.objects.cloud_for_model(DjangoPerson, steps=5, filters={
'country': self.object,
})
calculate_cloud(tags, 5)
context.update({
'tags': tags,
})
return context
country_skill_cloud = CountrySkillCloudView.as_view()
class TaggedObjectList(generic.ListView):
related_tags = False
related_tag_counts = True
select_related = False
def get_queryset(self):
self.tag_instance = get_tag(self.kwargs['tag'])
if self.tag_instance is None:
raise Http404(
_('No Tag found matching "%s".') % self.kwargs['tag']
)
queryset = TaggedItem.objects.get_by_model(self.model,
self.tag_instance)
if self.select_related:
queryset = queryset.select_related(*self.select_related)
filter_args = self.get_extra_filter_args()
if filter_args:
queryset = queryset.filter(**filter_args)
return queryset
def get_extra_filter_args(self):
return {}
def get_context_data(self, **kwargs):
kwargs.update({
'tag': self.kwargs['tag'],
})
if self.related_tags:
kwargs['related_tags'] = Tag.objects.related_for_model(
self.tag_instance,
self.model,
counts=self.related_tag_counts
)
ctx = super().get_context_data(**kwargs)
return ctx
class Skill(TaggedObjectList):
model = DjangoPerson
related_tags = True
template_name = 'skill.html'
context_object_name = 'people_list'
select_related = ['user', 'country']
skill = Skill.as_view()
class CountrySkill(TaggedObjectList):
model = DjangoPerson
related_tags = True
template_name = 'skill.html'
context_object_name = 'people_list'
def get_context_data(self, **kwargs):
kwargs['country'] = Country.objects.get(
iso_code=self.kwargs['country_code'].upper()
)
return super().get_context_data(**kwargs)
def get_extra_filter_args(self):
filters = super().get_extra_filter_args()
filters['country__iso_code'] = self.kwargs['country_code'].upper()
return filters
country_skill = CountrySkill.as_view()
class CountryLookingForView(generic.ListView):
context_object_name = 'people'
template_name = 'country_looking_for.html'
def get_queryset(self):
self.country = get_object_or_404(
Country, iso_code=self.kwargs['country_code'].upper(),
)
ids = [
o['object_id'] for o in MachineTaggedItem.objects.filter(
namespace='profile',
predicate='looking_for_work',
value=self.kwargs['looking_for'],
).values('object_id')
]
return DjangoPerson.objects.filter(country=self.country, id__in=ids)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.country,
'looking_for': self.kwargs['looking_for'],
})
return context
country_looking_for = CountryLookingForView.as_view()
class SearchView(generic.ListView):
context_object_name = 'people_list'
template_name = 'search.html'
def get_queryset(self):
self.q = self.request.GET.get('q', '')
self.has_badwords = [
w.strip() for w in self.q.split() if len(w.strip()) in (1, 2)
]
if self.q:
return self.search_people()
return []
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'q': self.q,
'has_badwords': self.has_badwords
})
return context
def search_people(self):
words = [w.strip() for w in self.q.split() if len(w.strip()) > 2]
if not words:
return []
terms = []
for word in words:
terms.append(Q(
user__username__icontains=word) |
Q(user__first_name__icontains=word) |
Q(user__last_name__icontains=word)
)
combined = functools.reduce(operator.and_, terms)
return DjangoPerson.objects.filter(
combined,
).select_related().distinct()
search = SearchView.as_view()
class IRCActiveView(generic.ListView):
context_object_name = 'people_list'
template_name = 'irc_active.html'
def get_queryset(self):
results = DjangoPerson.objects.filter(
last_active_on_irc__gt=(timezone.now() -
datetime.timedelta(hours=1))
).order_by('-last_active_on_irc')
# Filter out the people who don't want to be tracked (inefficient)
return [r for r in results if r.irc_tracking_allowed()]
irc_active = IRCActiveView.as_view()
class RequestFormMixin(object):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class DeletionRequestView(RequestFormMixin, generic.FormView):
form_class = DeletionRequestForm
template_name = 'delete_account_request.html'
def form_valid(self, form):
form.save()
return redirect(reverse('delete_account_next',
args=[self.request.user.username]))
delete_account_request = must_be_owner(DeletionRequestView.as_view())
class DeletionNext(generic.TemplateView):
template_name = 'delete_account_next.html'
delete_account_next = must_be_owner(DeletionNext.as_view())
class AccountDeletionView(RequestFormMixin, generic.FormView):
form_class = AccountDeletionForm
template_name = 'delete_account.html'
def dispatch(self, request, *args, **kwargs):
try:
self.key = signing.loads(kwargs['key'], max_age=3600,
salt='delete_account')
except signing.SignatureExpired:
return redirect(reverse('delete_account_request',
args=[request.user.username]))
except signing.BadSignature:
raise Http404
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.save()
return redirect(reverse('delete_account_done',
args=[self.request.user.username]))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['key'] = self.kwargs['key']
return ctx
delete_account = must_be_owner(AccountDeletionView.as_view())
class DeletionDone(generic.TemplateView):
template_name = 'delete_account_done.html'
def dispatch(self, request, *args, **kwargs):
if User.objects.filter(username=kwargs['username']).exists():
raise Http404
return super().dispatch(request, *args, **kwargs)
delete_account_done = DeletionDone.as_view()
def geonames(request):
params = dict(request.GET)
params['username'] = settings.GEONAMES_USERNAME
response = requests.get('https://api.geonames.org/findNearbyPlaceNameJSON',
params=params)
return HttpResponse(json.dumps(response.json()),
content_type='application/json')
|
|
"""
Objects for dealing with polynomials.
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `polydomain` -- Polynomial default domain, [-1,1].
- `polyzero` -- (Coefficients of the) "zero polynomial."
- `polyone` -- (Coefficients of the) constant polynomial 1.
- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
- `polyval` -- evaluate a polynomial at given points.
Calculus
--------
- `polyder` -- differentiate a polynomial.
- `polyint` -- integrate a polynomial.
Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- Given a straight line, return the equivalent polynomial
object.
Classes
-------
- `Polynomial` -- polynomial class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division
__all__ = ['polyzero', 'polyone', 'polyx', 'polydomain',
'polyline','polyadd', 'polysub', 'polymul', 'polydiv', 'polyval',
'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit',
'polytrim', 'polyroots', 'Polynomial']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
polytrim = pu.trimcoef
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Polynomial default domain.
polydomain = np.array([-1,1])
# Polynomial coefficients representing zero.
polyzero = np.array([0])
# Polynomial coefficients representing one.
polyone = np.array([1])
# Polynomial coefficients representing the identity x.
polyx = np.array([0,1])
#
# Polynomial series functions
#
def polyline(off, scl) :
"""
Returns an array representing a linear polynomial.
Parameters
----------
off, scl : scalars
The "y-intercept" and "slope" of the line, respectively.
Returns
-------
y : ndarray
This module's representation of the linear polynomial ``off +
scl*x``.
See Also
--------
chebline
Examples
--------
>>> from numpy import polynomial as P
>>> P.polyline(1,-1)
array([ 1, -1])
>>> P.polyval(1, P.polyline(1,-1)) # should be 0
0.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def polyfromroots(roots) :
"""
Generate a polynomial with the given roots.
Return the array of coefficients for the polynomial whose leading
coefficient (i.e., that of the highest order term) is `1` and whose
roots (a.k.a. "zeros") are given by *roots*. The returned array of
coefficients is ordered from lowest order term to highest, and zeros
of multiplicity greater than one must be included in *roots* a number
of times equal to their multiplicity (e.g., if `2` is a root of
multiplicity three, then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the polynomial's coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
chebfromroots
Notes
-----
What is returned are the :math:`a_i` such that:
.. math::
\\sum_{i=0}^{n} a_ix^i = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)``; note that this implies that `1` is always
returned for :math:`a_n`.
Examples
--------
>>> import numpy.polynomial as P
>>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
array([ 1.+0.j, 0.+0.j, 1.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.zeros(len(roots) + 1, dtype=roots.dtype)
prd[-1] = 1
for i in range(len(roots)) :
prd[-(i+2):-1] -= roots[i]*prd[-(i+1):]
return prd
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2"``.
Parameters
----------
c1, c2 : array_like
1-d arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymul, polydiv, polypow
Examples
--------
>>> from numpy import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2,c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymul(c1, c2):
"""
Multiply one polynomial by another.
Returns the product of two polynomials `c1` * `c2`. The arguments are
sequences of coefficients, from lowest order term to highest, e.g.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
Parameters
----------
c1, c2 : array_like
1-d arrays of coefficients representing a polynomial, relative to the
"standard" basis, and ordered from lowest order term to highest.
Returns
-------
out : ndarray
Of the coefficients of their product.
See Also
--------
polyadd, polysub, polydiv, polypow
Examples
--------
>>> import numpy.polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polymul(c1,c2)
array([ 3., 8., 14., 8., 3.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> import numpy.polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1 :
return c1/c2[-1], c1[:1]*0
elif len1 < len2 :
return c1[:1]*0, c1
else :
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0 :
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(cs, pow, maxpower=None) :
"""Raise a polynomial to a power.
Returns the polynomial `cs` raised to the power `pow`. The argument
`cs` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
cs : array_like
1d array of chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = cs
for i in range(2, power + 1) :
prd = np.convolve(prd, cs)
return prd
def polyder(cs, m=1, scl=1):
"""
Differentiate a polynomial.
Returns the polynomial `cs` differentiated `m` times. At each
iteration the result is multiplied by `scl` (the scaling factor is for
use in a linear change of variable). The argument `cs` is the sequence
of coefficients from lowest order term to highest, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
cs: array_like
1-d array of polynomial coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
Returns
-------
der : ndarray
Polynomial of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy import polynomial as P
>>> cs = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(cs) # (d/dx)(cs) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(cs,3) # (d**3/dx**3)(cs) = 24
array([ 24.])
>>> P.polyder(cs,scl=-1) # (d/d(-x))(cs) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(cs,2,-1) # (d**2/d(-x)**2)(cs) = 6 + 24x
array([ 6., 24.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0:
raise ValueError, "The order of derivation must be non-negative"
if not np.isscalar(scl):
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
n = len(cs)
d = np.arange(n)*scl
for i in range(cnt):
cs[i:] *= d[:n-i]
return cs[i+1:].copy()
def polyint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a polynomial.
Returns the polynomial `cs`, integrated `m` times from `lbnd` to `x`.
At each iteration the resulting series is **multiplied** by `scl` and
an integration constant, `k`, is added. The scaling factor is for use
in a linear change of variable. ("Buyer beware": note that, depending
on what one is doing, one may want `scl` to be the reciprocal of what
one might expect; for more information, see the Notes section below.)
The argument `cs` is a sequence of coefficients, from lowest order
term to highest, e.g., [1,2,3] represents the polynomial
``1 + 2*x + 3*x**2``.
Parameters
----------
cs : array_like
1-d array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
Coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Examples
--------
>>> from numpy import polynomial as P
>>> cs = (1,2,3)
>>> P.polyint(cs) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(cs,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(cs,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(cs,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(cs,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
if not np.isscalar(lbnd) :
raise ValueError, "The lbnd parameter must be a scalar"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
else:
k = list(k) + [0]*(cnt - len(k))
fac = np.arange(1, len(cs) + cnt)/scl
ret = np.zeros(len(cs) + cnt, dtype=cs.dtype)
ret[cnt:] = cs
for i in range(cnt) :
ret[cnt - i:] /= fac[:len(cs) + i]
ret[cnt - i - 1] += k[i] - polyval(lbnd, ret[cnt - i - 1:])
return ret
def polyval(x, cs):
"""
Evaluate a polynomial.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0] + cs[1]*x + ... + cs[n-1]*x**(n-1)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
If x is a list or tuple, it is converted to an ndarray. Otherwise
it must support addition and multiplication with itself and the
elements of `cs`.
cs : array_like
1-d array of Chebyshev coefficients ordered from low to high.
Returns
-------
values : ndarray
The return array has the same shape as `x`.
See Also
--------
polyfit
Notes
-----
The evaluation uses Horner's method.
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
c0 = cs[-1] + x*0
for i in range(2, len(cs) + 1) :
c0 = cs[-i] + c0*x
return c0
def polyvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray. If ``V`` is the returned matrix and `x` is a 2d array, then
the elements of ``V`` are ``V[i,j,k] = x[i,j]**k``
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex doubles.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
x = np.asarray(x) + 0.0
order = int(deg) + 1
v = np.ones(x.shape + (order,), dtype=x.dtype)
if order > 1 :
v[...,1] = x
for i in range(2, order) :
v[...,i] = x*v[...,i-1]
return v
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least-squares fit of a polynomial to data.
Fit a polynomial ``c0 + c1*x + c2*x**2 + ... + c[deg]*x**deg`` to
points (`x`, `y`). Returns a 1-d (if `y` is 1-d) or 2-d (if `y` is 2-d)
array of coefficients representing, from lowest order term to highest,
the polynomial(s) which minimize the total square error.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-d array that contains
one data set per column.
deg : int
Degree of the polynomial(s) to be fit.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-d,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : present when `full` == True
Sum of the squared residuals (SSR) of the least-squares fit; the
effective rank of the scaled Vandermonde matrix; its singular
values; and the specified value of `rcond`. For more information,
see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
chebfit : least squares fit using Chebyshev series.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solutions are the coefficients ``c[i]`` of the polynomial ``P(x)``
that minimizes the total squared error:
.. math :: E = \\sum_j (y_j - P(x_j))^2
This problem is solved by setting up the (typically) over-determined
matrix equation:
.. math :: V(x)*c = y
where `V` is the Vandermonde matrix of `x`, the elements of `c` are the
coefficients to be solved for, and the elements of `y` are the observed
values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev series are generally
better conditioned, but much can still depend on the distribution of
the sample points and the smoothness of the data. If the quality of
the fit is inadequate, splines may be a good alternative.
Examples
--------
>>> from numpy import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# set up the design matrix and solve the least squares equation
A = polyvander(x, deg)
scl = np.sqrt((A*A).sum(0))
c, resids, rank, s = la.lstsq(A/scl, y, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def polyroots(cs):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the "polynomial" `cs`, the
polynomial's coefficients from lowest order term to highest
(e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``).
Parameters
----------
cs : array_like of shape (M,)
1-d array of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then so is the dtype of ``out``; otherwise, ``out``'s dtype is
complex.
See Also
--------
chebroots
Examples
--------
>>> import numpy.polynomial as P
>>> P.polyroots(P.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> P.polyroots(P.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> P.polyroots(P.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([-cs[0]/cs[1]])
n = len(cs) - 1
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat.flat[n::n+1] = 1
cmat[:,-1] -= cs[:-1]/cs[-1]
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# polynomial class
#
exec polytemplate.substitute(name='Polynomial', nick='poly', domain='[-1,1]')
|
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
import os
import subprocess
import unittest
import environment
import utils
import tablet
# single shard / 2 tablets
shard_0_master = tablet.Tablet()
shard_0_slave = tablet.Tablet()
cert_dir = environment.tmproot + '/certs'
def openssl(cmd):
result = subprocess.call(['openssl'] + cmd, stderr=utils.devnull)
if result != 0:
raise utils.TestError('OpenSSL command failed: %s' % ' '.join(cmd))
def setUpModule():
try:
environment.topo_server().setup()
logging.debug('Creating certificates')
os.makedirs(cert_dir)
# Create CA certificate
ca_key = cert_dir + '/ca-key.pem'
ca_cert = cert_dir + '/ca-cert.pem'
openssl(['genrsa', '-out', cert_dir + '/ca-key.pem'])
ca_config = cert_dir + '/ca.config'
with open(ca_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql CA
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-new', '-x509', '-nodes', '-days', '3600', '-batch',
'-config', ca_config,
'-key', ca_key,
'-out', ca_cert])
# Create mysql server certificate, remove passphrase, and sign it
server_key = cert_dir + '/server-key.pem'
server_cert = cert_dir + '/server-cert.pem'
server_req = cert_dir + '/server-req.pem'
server_config = cert_dir + '/server.config'
with open(server_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Server
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch',
'-config', server_config,
'-keyout', server_key, '-out', server_req])
openssl(['rsa', '-in', server_key, '-out', server_key])
openssl(['x509', '-req',
'-in', server_req,
'-days', '3600',
'-CA', ca_cert,
'-CAkey', ca_key,
'-set_serial', '01',
'-out', server_cert])
# Create mysql client certificate, remove passphrase, and sign it
client_key = cert_dir + '/client-key.pem'
client_cert = cert_dir + '/client-cert.pem'
client_req = cert_dir + '/client-req.pem'
client_config = cert_dir + '/client.config'
with open(client_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Client
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch',
'-config', client_config,
'-keyout', client_key, '-out', client_req])
openssl(['rsa', '-in', client_key, '-out', client_key])
openssl(['x509', '-req',
'-in', client_req,
'-days', '3600',
'-CA', ca_cert,
'-CAkey', ca_key,
'-set_serial', '02',
'-out', client_cert])
extra_my_cnf = cert_dir + '/secure.cnf'
fd = open(extra_my_cnf, 'w')
fd.write('ssl-ca=' + ca_cert + '\n')
fd.write('ssl-cert=' + server_cert + '\n')
fd.write('ssl-key=' + server_key + '\n')
fd.close()
setup_procs = [
shard_0_master.init_mysql(extra_my_cnf=extra_my_cnf),
shard_0_slave.init_mysql(extra_my_cnf=extra_my_cnf),
]
utils.wait_procs(setup_procs)
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
shard_0_master.init_tablet('master', 'test_keyspace', '0')
shard_0_slave.init_tablet('replica', 'test_keyspace', '0')
# create databases so vttablet can start behaving normally
shard_0_master.create_db('vt_test_keyspace')
shard_0_slave.create_db('vt_test_keyspace')
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
shard_0_master.kill_vttablet()
shard_0_slave.kill_vttablet()
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_slave.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_slave.remove_tree()
class TestSecure(unittest.TestCase):
"""This test makes sure that we can use SSL replication with Vitess.
"""
def test_secure(self):
# start the tablets
shard_0_master.start_vttablet()
shard_0_slave.start_vttablet(wait_for_state='NOT_SERVING',
repl_extra_flags={
'flags': '2048',
'ssl-ca': cert_dir + '/ca-cert.pem',
'ssl-cert': cert_dir + '/client-cert.pem',
'ssl-key': cert_dir + '/client-key.pem',
})
# Reparent using SSL (this will also check replication works)
for t in [shard_0_master, shard_0_slave]:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
shard_0_master.tablet_alias], auto_log=True)
if __name__ == '__main__':
utils.main()
|
|
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import mock
import random
import socket
import string
import time
from cloudify import exceptions as cfy_exc
from cloudify import mocks as cfy_mocks
from server_plugin import server
from server_plugin import volume
from tests.integration import TestCase
from cloudify.mocks import MockCloudifyContext
from server_plugin.server import VCLOUD_VAPP_NAME
RANDOM_PREFIX_LENGTH = 5
class ServerNoNetworkTestCase(TestCase):
def setUp(self):
super(ServerNoNetworkTestCase, self).setUp()
chars = string.ascii_uppercase + string.digits
self.name_prefix = ('plugin_test_{0}_'
.format(''.join(
random.choice(chars)
for _ in range(RANDOM_PREFIX_LENGTH)))
)
server_test_dict = self.test_config['server']
name = self.name_prefix + 'server'
self.ctx = cfy_mocks.MockCloudifyContext(
node_id=name,
node_name=name,
properties={
'server':
{
'name': name,
'catalog': server_test_dict['catalog'],
'template': server_test_dict['template'],
'hardware': server_test_dict['hardware'],
'guest_customization':
server_test_dict.get('guest_customization')
},
'management_network': self.test_config['management_network'],
'vcloud_config': self.vcloud_config
}
)
self.ctx.node.properties['server']['guest_customization'][
'public_keys'] = [self.test_config['manager_keypair'],
self.test_config['agent_keypair']]
self.ctx.instance.relationships = []
ctx_patch1 = mock.patch('server_plugin.server.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
def tearDown(self):
try:
server.stop()
except Exception:
pass
try:
server.delete()
except Exception:
pass
super(ServerNoNetworkTestCase, self).tearDown()
def test_server_creation_validation(self):
success = True
msg = None
try:
server.creation_validation()
except cfy_exc.NonRecoverableError as e:
success = False
msg = e.message
self.assertTrue(success, msg)
def test_server_creation_validation_catalog_not_found(self):
self.ctx.node.properties['server']['catalog'] = 'fake-catalog'
self.assertRaises(cfy_exc.NonRecoverableError,
server.creation_validation)
def test_server_creation_validation_template_not_found(self):
self.ctx.node.properties['server']['template'] = 'fake-template'
self.assertRaises(cfy_exc.NonRecoverableError,
server.creation_validation)
def test_server_creation_validation_parameter_missing(self):
del self.ctx.node.properties['server']['template']
self.assertRaises(cfy_exc.NonRecoverableError,
server.creation_validation)
def test_server_create_delete(self):
server.create()
server.configure()
vdc = self.vca_client.get_vdc(self.vcloud_config['org'])
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertFalse(vapp is None)
self.assertFalse(server._vapp_is_on(vapp))
self.check_hardware(vapp)
server.delete()
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertTrue(vapp is None)
def test_server_stop_start(self):
server.create()
vdc = self.vca_client.get_vdc(self.vcloud_config['org'])
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertFalse(vapp is None)
self.assertFalse(server._vapp_is_on(vapp))
self._run_with_retry(server.start, self.ctx)
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertTrue(server._vapp_is_on(vapp))
server.stop()
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertFalse(server._vapp_is_on(vapp))
self._run_with_retry(server.start, self.ctx)
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertTrue(server._vapp_is_on(vapp))
def check_hardware(self, vapp):
data = vapp.get_vms_details()[0]
hardware = self.test_config['server']['hardware']
if hardware:
self.assertEqual(data['cpus'], hardware['cpu'])
self.assertEqual(data['memory'] * 1024, hardware['memory'])
class ServerWithNetworkTestCase(TestCase):
def setUp(self):
super(ServerWithNetworkTestCase, self).setUp()
chars = string.ascii_uppercase + string.digits
self.name_prefix = ('plugin_test_{0}_'
.format(''.join(
random.choice(chars)
for _ in range(RANDOM_PREFIX_LENGTH)))
)
server_test_dict = self.test_config['server']
name = self.name_prefix + 'server'
self.network_name = self.test_config['management_network']
port_node_context = cfy_mocks.MockNodeContext(
properties={
'port':
{
'network': self.network_name,
'ip_allocation_mode': 'pool',
'primary_interface': True
}
}
)
network_node_context = cfy_mocks.MockNodeContext(
properties={
'network':
{
'name': self.network_name
}
}
)
self.port_relationship = mock.Mock()
self.port_relationship.target = mock.Mock()
self.port_relationship.target.node = port_node_context
self.network_relationship = mock.Mock()
self.network_relationship.target = mock.Mock()
self.network_relationship.target.node = network_node_context
self.properties = {
'server':
{
'name': name,
'catalog': server_test_dict['catalog'],
'template': server_test_dict['template']
},
'management_network': self.network_name,
'vcloud_config': self.vcloud_config
}
self.ctx = cfy_mocks.MockCloudifyContext(
node_id=name,
node_name=name,
properties=self.properties
)
self.ctx.instance.relationships = []
ctx_patch1 = mock.patch('server_plugin.server.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
def tearDown(self):
try:
server.stop()
except Exception:
pass
try:
server.delete()
except Exception:
pass
super(ServerWithNetworkTestCase, self).tearDown()
def test_create_with_port_connection(self):
self.ctx.instance.relationships = [self.port_relationship]
self._create_test()
def test_create_with_network_connection(self):
self.ctx.instance.relationships = [self.network_relationship]
self._create_test()
def test_create_without_connections(self):
self.ctx.instance.relationships = []
self._create_test()
def _create_test(self):
server.create()
self._run_with_retry(server.start, self.ctx)
vdc = self.vca_client.get_vdc(self.vcloud_config['org'])
vapp = self.vca_client.get_vapp(
vdc,
self.ctx.node.properties['server']['name'])
self.assertFalse(vapp is None)
networks = server._get_vm_network_connections(vapp)
self.assertEqual(1, len(networks))
self.assertEqual(self.network_name, networks[0]['network_name'])
def test_get_state(self):
num_tries = 5
verified = False
server.create()
self._run_with_retry(server.start, self.ctx)
for _ in range(num_tries):
result = server._get_state(self.vca_client)
if result is True:
self.assertTrue('ip' in self.ctx.instance.runtime_properties)
self.assertTrue('networks'
in self.ctx.instance.runtime_properties)
self.assertEqual(1,
len(self.ctx.instance.
runtime_properties['networks'].keys()))
self.assertEqual(self.network_name,
self.ctx.instance.
runtime_properties['networks'].keys()[0])
ip_valid = True
try:
socket.inet_aton(
self.ctx.instance.runtime_properties['ip'])
except socket.error:
ip_valid = False
self.assertTrue(ip_valid)
verified = True
break
time.sleep(2)
self.assertTrue(verified)
class VolumeTestCase(TestCase):
def setUp(self):
super(VolumeTestCase, self).setUp()
self.volume_test_dict = self.test_config['volume']
name = 'volume'
self.properties = {
'volume':
{
'name': self.volume_test_dict['name'],
'size': self.volume_test_dict['size']
},
'use_external_resource': True,
'resource_id': self.volume_test_dict['name_exists'],
'vcloud_config': self.vcloud_config
}
self.target = MockCloudifyContext(
node_id="target",
properties={'vcloud_config': self.vcloud_config},
runtime_properties={
VCLOUD_VAPP_NAME: self.test_config['test_vm']
}
)
self.source = MockCloudifyContext(
node_id="source", properties=self.properties
)
self.nodectx = cfy_mocks.MockCloudifyContext(
node_id=name,
node_name=name,
properties=self.properties
)
self.relationctx = cfy_mocks.MockCloudifyContext(
node_id=name,
node_name=name,
target=self.target,
source=self.source
)
self.ctx = self.nodectx
ctx_patch1 = mock.patch('server_plugin.volume.ctx', self.nodectx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.nodectx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
def test_volume(self):
disks_count = lambda: len(
self.vca_client.get_disks(self.vcloud_config['vdc']))
volume.creation_validation()
disks_before = disks_count()
volume.create_volume()
if self.relationctx.source.node.properties['use_external_resource']:
self.assertEqual(disks_before, disks_count())
else:
self.assertEqual(disks_before + 1, disks_count())
self._attach_detach()
volume.delete_volume()
self.assertEqual(disks_before, disks_count())
def _attach_detach(self):
def links_count():
node_properties = self.relationctx.source.node.properties
if node_properties['use_external_resource']:
return [
len(d[1]) for d in self.vca_client.get_disks(
self.vcloud_config['vdc']
) if d[0].name == node_properties['resource_id']
][0]
else:
return [
len(d[1]) for d in self.vca_client.get_disks(
self.vcloud_config['vdc']
) if d[0].name == node_properties['volume']['name']
][0]
with mock.patch('server_plugin.volume.ctx', self.relationctx):
links_before = links_count()
volume.attach_volume()
self.assertEqual(links_before + 1, links_count())
volume.detach_volume()
self.assertEqual(links_before, links_count())
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/flort_dj_cspp.py
@author Jeremy Amundson
@brief Parser for the flort_dj_cspp dataset driver
Release notes:
initial release
"""
__author__ = 'Jeremy Amundson'
__license__ = 'Apache 2.0'
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
import re
from mi.core.instrument.data_particle import DataParticle
from mi.dataset.parser.common_regexes import INT_REGEX, FLOAT_REGEX, MULTIPLE_TAB_REGEX, END_OF_LINE_REGEX
from mi.dataset.parser.cspp_base import \
CsppParser, \
CsppMetadataDataParticle, \
MetadataRawDataKey, \
Y_OR_N_REGEX, encode_y_or_n
# A regex to match a date in MM/DD/YY format
FORMATTED_DATE_REGEX = r'\d{2}/\d{2}/\d{2}'
# A regex to match a time stamp in HH:MM:SS format
TIME_REGEX = r'\d{2}:\d{2}:\d{2}'
# A regular expression that should match a flort_dj data record
DATA_REGEX = '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Depth
DATA_REGEX += '(' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # suspect timestamp
DATA_REGEX += '(' + FORMATTED_DATE_REGEX + ')' + MULTIPLE_TAB_REGEX # date string
DATA_REGEX += '(' + TIME_REGEX + ')' + MULTIPLE_TAB_REGEX # time string
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # measurement_wavelength_beta
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # raw_signal_beta
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # measurement_wavelength_chl
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # raw_signal_chl
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # measurement_wavelength_cdom
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # raw_signal_cdom
DATA_REGEX += '(' + INT_REGEX + ')' + END_OF_LINE_REGEX # raw_internal_temp
IGNORE_REGEX = FLOAT_REGEX + MULTIPLE_TAB_REGEX # Profiler Timestamp
IGNORE_REGEX += FLOAT_REGEX + MULTIPLE_TAB_REGEX # Depth
IGNORE_REGEX += Y_OR_N_REGEX + MULTIPLE_TAB_REGEX # Suspect Timestamp
IGNORE_REGEX += r'[^\t]*' + END_OF_LINE_REGEX # any text after the Suspect
IGNORE_MATCHER = re.compile(IGNORE_REGEX)
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record only chunk.
"""
PROFILER_TIMESTAMP = 1
PRESSURE = 2
SUSPECT_TIMESTAMP = 3
DATE = 4
TIME = 5
BETA = 6
RAW_BETA = 7
CHLOROPHYLL = 8
RAW_CHLOROPHYLL = 9
CDOM = 10
RAW_CDOM = 11
TEMP = 12
class DataParticleType(BaseEnum):
"""
The data particle types that a flort_dj_cspp parser could generate
"""
METADATA_RECOVERED = 'flort_dj_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'flort_dj_cspp_instrument_recovered'
METADATA_TELEMETERED = 'flort_dj_cspp_metadata'
INSTRUMENT_TELEMETERED = 'flort_dj_cspp_instrument'
class FlortDjCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with flort_dj_cspp data particle parameters
"""
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
DATE = 'date_string'
TIME = 'time_string'
BETA = 'measurement_wavelength_beta'
RAW_BETA = 'raw_signal_beta'
CHLOROPHYLL = 'measurement_wavelength_chl'
RAW_CHLOROPHYLL = 'raw_signal_chl'
CDOM = 'measurement_wavelength_cdom'
RAW_CDOM = 'raw_signal_cdom'
TEMP = 'raw_internal_temp'
# A group of instrument data particle encoding rules used to simplify encoding using a loop
INSTRUMENT_PARTICLE_ENCODING_RULES = [
(FlortDjCsppParserDataParticleKey.PROFILER_TIMESTAMP, DataMatchesGroupNumber.PROFILER_TIMESTAMP, numpy.float),
(FlortDjCsppParserDataParticleKey.PRESSURE, DataMatchesGroupNumber.PRESSURE, float),
(FlortDjCsppParserDataParticleKey.SUSPECT_TIMESTAMP, DataMatchesGroupNumber.SUSPECT_TIMESTAMP, encode_y_or_n),
(FlortDjCsppParserDataParticleKey.DATE, DataMatchesGroupNumber.DATE, str),
(FlortDjCsppParserDataParticleKey.TIME, DataMatchesGroupNumber.TIME, str),
(FlortDjCsppParserDataParticleKey.BETA, DataMatchesGroupNumber.BETA, int),
(FlortDjCsppParserDataParticleKey.RAW_BETA, DataMatchesGroupNumber.RAW_BETA, int),
(FlortDjCsppParserDataParticleKey.CHLOROPHYLL, DataMatchesGroupNumber.CHLOROPHYLL, int),
(FlortDjCsppParserDataParticleKey.RAW_CHLOROPHYLL, DataMatchesGroupNumber.RAW_CHLOROPHYLL, int),
(FlortDjCsppParserDataParticleKey.CDOM, DataMatchesGroupNumber.CDOM, int),
(FlortDjCsppParserDataParticleKey.RAW_CDOM, DataMatchesGroupNumber.RAW_CDOM, int),
(FlortDjCsppParserDataParticleKey.TEMP, DataMatchesGroupNumber.RAW_CDOM, int)
]
class FlortDjCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Class for building a flort_dj_cspp metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
results = []
# Append the base metadata parsed values to the results to return
results += self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class FlortDjCsppMetadataRecoveredDataParticle(FlortDjCsppMetadataDataParticle):
"""
Class for building a flort_dj_cspp recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class FlortDjCsppMetadataTelemeteredDataParticle(FlortDjCsppMetadataDataParticle):
"""
Class for building a flort_dj_cspp telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class FlortDjCsppInstrumentDataParticle(DataParticle):
"""
Class for building a flort_dj_cspp instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
results = []
# Process each of the instrument particle parameters
for (name, index, encoding) in INSTRUMENT_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, self.raw_data.group(index), encoding))
# # Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class FlortDjCsppInstrumentRecoveredDataParticle(FlortDjCsppInstrumentDataParticle):
"""
Class for building a flort_dj_cspp recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class FlortDjCsppInstrumentTelemeteredDataParticle(FlortDjCsppInstrumentDataParticle):
"""
Class for building a flort_dj_cspp telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class FlortDjCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an FlortDjCsppParser object.
@param config The configuration for this FlortDjCsppParser parser
@param stream_handle The handle to the data stream containing the flort_dj_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(FlortDjCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX,
ignore_matcher=IGNORE_MATCHER)
|
|
"""A class for controling graph."""
import numpy as np
import matplotlib.pyplot as plt
from math import log10, ceil, sqrt
from more_itertools import chunked
import cPickle
from .type import is_number, float_list
class MGraph:
"""Control graphs and visualizaton."""
def __init__(self):
self.dir_to_save = "../data/"
def comparison_bar(self, data, labels, legend="", metric_label="", comparison_label="", lim=[],
horizontal=False, title="", filename="", show_flag=True):
'''Draw a bar graph for comparing items.'''
original_data = locals().copy()
fig, ax = plt.subplots()
if horizontal:
[set_lim1, set_lim2] = [ax.set_ylim, ax.set_xlim]
set_label1 = ax.set_xlabel
set_label2 = ax.set_ylabel
set_ticks = plt.yticks
else:
[set_lim1, set_lim2] = [ax.set_xlim, ax.set_ylim]
set_label1 = ax.set_ylabel
set_label2 = ax.set_xlabel
set_ticks = plt.xticks
if isinstance(data[0], (int, float)):
Y = range(len(data))
bar_height = 0.5
if horizontal:
ax.barh(Y, data, height=bar_height)
else:
ax.bar(Y, data, width=bar_height)
if len(labels) == len(data):
set_ticks([item + 0.25 for item in Y], labels)
elif len(labels) == len(data) + 1:
pos = [p - 0.25 for p in range(len(labels) + 1)]
set_ticks(pos, labels)
else:
Y = range(len(labels))
bar_height = 1. / (len(data) + 1)
cmap = plt.cm.rainbow
cmap_v = cmap.N / (len(data) - 1)
for idx, d in enumerate(data):
if horizontal:
ax.barh([y + bar_height * (len(data) - idx - 1) for y in Y], d,
height=bar_height, color=cmap(idx * cmap_v))
else:
rects = ax.bar([y + bar_height * idx for y in Y], d,
width=bar_height, color=cmap(idx * cmap_v))
self.__autolabel(rects, ax)
set_ticks([item + bar_height / 2 * len(data) for item in Y], labels)
set_lim1([-bar_height, len(labels)])
if lim:
set_lim2(lim)
if metric_label:
set_label1(metric_label)
if comparison_label:
set_label2(comparison_label)
if legend:
plt.legend(legend, loc='upper right')
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def __autolabel(self, rects, ax):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x(), 1.05 * height, '%0.3f' % height)
def line_scatter(self, x_data, y_data, hl_span=None, legend="", x_label="", y_label="",
xlim=[], ylim=[], title="", filename="", show_flag=True):
"""Draw a scatter graph connected by lines"""
original_data = locals().copy()
fig, ax = self.figure_with_side_space(0.7)
for x, y in zip(x_data, y_data):
if is_number(y):
y = [y]
ax.plot(x, float_list(y), '.-')
if hl_span:
ax.axvspan(hl_span[0], hl_span[1], alpha=0.2, color='red')
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
self.set_legend(ax, legend)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def figure_with_side_space(self, space_width):
aspect = 1. / (1. + space_width)
fig = plt.figure(figsize=plt.figaspect(aspect))
ax = fig.add_axes([.05, .1, aspect, .8])
return fig, ax
def set_legend(self, ax, legend):
if legend:
ax.legend(legend, bbox_to_anchor=(1.02, 1.), loc='upper left',
borderaxespad=0, fontsize=8)
def line_series(self, data, y_points, legend="", x_label="", y_label="", ylim=[],
markersize=10, title="", filename="", show_flag=True):
"""Draw a line graph of the series."""
original_data = locals().copy()
fig, ax = plt.subplots()
for item in data:
if markersize is 0:
ax.plot(y_points, item, '-')
else:
ax.plot(y_points, item, 'o--', markersize=markersize)
if legend:
ax.legend(legend)
ax.set_xlim(self.__calc_lim(y_points, 0.05))
if ylim:
ax.set_ylim(ylim)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def labeled_line_series(self, data, label, y_points,
x_label="", y_label="", ylim=[],
title="", filename="", show_flag=True):
"""Draw a line graph of the series."""
original_data = locals().copy()
fig, ax = plt.subplots()
for idx, item in enumerate(data):
if label[idx] == 0:
ax.plot(y_points, item, 'b-')
for idx, item in enumerate(data):
if label[idx] == 1:
ax.plot(y_points, item, 'r-')
ax.set_xlim(self.__calc_lim(y_points, 0.05))
if ylim:
ax.set_ylim(ylim)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def show_and_save(self, fig, filename, show_flag, data=None):
if len(filename) > 0:
path = self.dir_to_save + filename
fig.savefig(path)
if data is not None:
p_path = path + '.pkl'
f = open(p_path, 'w')
cPickle.dump(data, f)
f.close()
if show_flag:
fig.show()
def set_title(self, ax, title):
if title:
ax.set_title(title)
def set_label(self, ax, x_label, y_label):
if x_label:
ax.set_xlabel(x_label)
if y_label:
ax.set_ylabel(y_label)
def __calc_lim(self, values, margin_ratio):
margin = (max(values) - min(values)) * margin_ratio
return [min(values) - margin, max(values) + margin]
class Graph(MGraph):
"""Control all the graphs and visualizations."""
def __init__(self):
"""Initializer of Graph class."""
MGraph.__init__(self)
self.limit_timeseries = 25
def visualize_image(self, data,
h_len=28, n_cols=0, filename="", show_flag=True):
"""Visualizer of image data."""
if data.ndim == 1:
v_len = data.shape[0] / h_len
if n_cols == 0:
n_cols = 1
n_rows = 1
elif data.ndim == 2:
v_len = data.shape[1] / h_len
if n_cols == 0:
n_cols = int(ceil(sqrt(data.shape[0])))
n_rows = int(ceil(float(data.shape[0]) / n_cols))
else:
raise ValueError
plt.gray()
fig, axes = plt.subplots(n_rows, n_cols)
X, Y = np.meshgrid(range(h_len), range(v_len))
for i_v in range(n_rows):
for i_h in range(n_cols):
index = i_h + i_v * n_cols
if index < data.shape[0]:
if n_rows > 1:
ax = axes[i_v, i_h]
Z = data[index].reshape(v_len, h_len)
elif n_cols > 1:
ax = axes[i_h]
Z = data[index].reshape(v_len, h_len)
else:
ax = axes
Z = data.reshape(v_len, h_len)
Z = Z[::-1, :]
ax.set_xlim(0, h_len - 1)
ax.set_ylim(0, v_len - 1)
ax.pcolor(X, Y, Z)
ax.tick_params(labelbottom='off')
ax.tick_params(labelleft='off')
MGraph.show_and_save(self, fig, filename, show_flag)
def draw_lab_adm(self, admission, title, filename="", show_flag=True):
"""Draw lab tests data of admissions."""
base_time = admission.admit_dt
data = admission.labs
plot_list = self.__get_plot_list(base_time, data)
icu_ios = [self.__time_diff_in_hour(
[icustay.intime, icustay.outtime], base_time)
for icustay in admission.icustays]
self.__draw_series_with_legend(plot_list, icu_ios,
title, filename, show_flag)
def draw_med_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.medications
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag, 'o')
def draw_chart_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.charts
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag)
def draw_selected_chart_icu(self, icustay, itemid_list, base_time, title,
filename="", show_flag=True):
selected_ids = itemid_list
data = [item for item in icustay.charts if item.itemid in selected_ids]
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag)
def draw_io_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.ios
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag, 'o')
def draw_lab_adm_itemid(self, admission, itemids, title, filename="", show_flag=True):
base_time = admission.admit_dt
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
colors = ['b', 'r']
axis = [ax1, ax2]
for idx, id in enumerate(itemids):
data = admission.get_lab_itemid(id)
time_diff = self.__time_diff_in_hour(data.timestamps, base_time)
values = data.values
axis[idx].plot(time_diff, values, "%ss--" % colors[idx])
axis[idx].set_ylabel("%s [%s]" % (data.description, data.unit), color=colors[idx])
ax1.set_title(title)
ax1.set_xlabel("Hours since Admission")
base_time = admission.admit_dt
icu_ios = [self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
for icustay in admission.icustays]
for span in icu_ios:
ax1.axvspan(span[0], span[1], alpha=0.2, color='red')
MGraph.show_and_save(self, fig, filename, show_flag)
def draw_lab_distribution(self, expire_values, recover_values, title,
filename="", show_flag=True):
fig, ax = plt.subplots()
for value in expire_values:
ax.plot(value[0], value[1], "ro")
for value in recover_values:
ax.plot(value[0], value[1], "bo")
ax.set_xlabel("Creatinine [mg/dL]")
ax.set_ylabel("Urea Nitrogen[mg/dL]")
MGraph.show_and_save(self, fig, filename, show_flag)
def plot_classification(self, positive, negative, line, title,
filename="", show_flag=True, x_label="", y_label=""):
fig, ax = plt.subplots()
ax.plot(positive[:, 0], positive[:, 1], 'ro')
ax.plot(negative[:, 0], negative[:, 1], 'bo')
ax.plot([line[0], line[1]], [line[2], line[3]])
margin_rate = 0.05
x_max = max(max(positive[:, 0]), max(negative[:, 0]))
x_min = min(min(positive[:, 0]), min(negative[:, 0]))
x_margin = (x_max - x_min) * margin_rate
ax.set_xlim([x_min - x_margin, x_max + x_margin])
y_max = max(max(positive[:, 1]), max(negative[:, 1]))
y_min = min(min(positive[:, 1]), min(negative[:, 1]))
y_margin = (y_max - y_min) * margin_rate
ax.set_ylim([y_min - y_margin, y_max + y_margin])
if len(x_label) > 0:
ax.set_xlabel(x_label)
if len(y_label) > 0:
ax.set_ylabel(y_label)
MGraph.show_and_save(self, fig, filename, show_flag)
def plot_classification_with_contour(self, x, y, xx, yy, z, x_label, y_label,
filename="", show_flag=True):
fig, ax = plt.subplots()
ax.contourf(xx, yy, z, cmap=plt.cm.rainbow, alpha=0.2)
ax.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.rainbow)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
MGraph.show_and_save(self, fig, filename, show_flag)
def bar_feature_importance(self, entropy_reduction, labels, filename="", show_flag=True):
fig, ax = plt.subplots()
Y = range(len(entropy_reduction))
Y.reverse()
ax.barh(Y, entropy_reduction, height=0.4)
plt.yticks(Y, labels)
ax.set_xlabel("Entropy Reduction")
plt.tick_params(axis='both', which='both', labelsize=8)
plt.tight_layout()
MGraph.show_and_save(self, fig, filename, show_flag)
def bar_classification(self, l_classification_result, labels, comparison_label="",
title="", filename="", show_flag=True):
l_rec = [item.recall for item in l_classification_result]
l_prec = [item.prec for item in l_classification_result]
l_f = [item.f for item in l_classification_result]
legend = ['recall', 'precision', 'f_measure']
MGraph.comparison_bar(self, [l_rec, l_prec, l_f], labels, legend, lim=[0, 1],
comparison_label=comparison_label, title=title,
filename=filename, show_flag=show_flag)
def bar_histogram(self, hist, bin_edges, hist_label, bin_label, only_left_edge=False,
title="", filename="", show_flag=True):
label = list(bin_edges)
if only_left_edge:
label.pop()
MGraph.comparison_bar(self, hist, label, metric_label=hist_label,
comparison_label=bin_label,
title=title, filename=filename, show_flag=show_flag)
def series_classification(self, l_classification_result, timestamp, x_label,
title="", filename="", show_flag=True):
l_rec = [item.rec for item in l_classification_result]
l_prec = [item.prec for item in l_classification_result]
l_f = [item.f for item in l_classification_result]
legend = ['recall', 'precision', 'f_measure']
MGraph.line_series(self, [l_rec, l_prec, l_f], timestamp, legend, ylim=[0, 1],
x_label=x_label, title=title, filename=filename, show_flag=show_flag)
def draw_series_data_class(self, series, n_draw_sample=0):
"""Visualize the deata of SeriesData class."""
fig, ax = plt.subplots()
y_points = range(series.n_step())
n_sample = series.n_sample()
if 0 < n_draw_sample < n_sample:
idx_selected_sample = range(n_draw_sample - 1,
n_sample,
int(n_sample / n_draw_sample))
series = series.slice_by_sample(idx_selected_sample)
for idx_f in range(series.n_feature()):
f_series = series.slice_by_feature(idx_f)
MGraph.labeled_line_series(self, f_series.series.transpose(), f_series.label, y_points)
def waitforbuttonpress(self):
plt.waitforbuttonpress()
def close_all(self):
plt.close('all')
def normalize(self, value):
max_val = max(abs(value))
order = 10.0 ** int(log10(float(max_val)))
n_value = value / order
return n_value, order
def __figure_with_legend(self):
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_axes([.05, .1, .5, .8])
return fig, ax
def __show_legend(self, ax):
ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0, prop={'size': 8})
def __time_diff_in_hour(self, time_seq, base_time):
return [(item - base_time).total_seconds() / 3600 for item in time_seq]
def __get_plot_list(self, base_time, time_series):
plot_list = []
for item in time_series:
try:
time_diff = self.__time_diff_in_hour(item.timestamps, base_time)
value = np.array([float(num) for num in item.values])
plot_val, order = self.normalize(value)
tag = "%s [%0.1f %s]" % (item.description, order, item.unit)
plot_list.append([time_diff, plot_val, tag])
except ValueError:
print "Can't plot %s" % item.description
return plot_list
def __draw_series_with_legend(self, plot_list, icu_ios, title, filename, show_flag, style='-'):
plot_all = list(chunked(plot_list, self.limit_timeseries))
for plot_list in plot_all:
fig, ax = self.__figure_with_legend()
for item in plot_list:
ax.plot(item[0], item[1], style, label=item[2])
for span in icu_ios:
ax.axvspan(span[0], span[1], alpha=0.2, color='red')
ax.set_title(title)
ax.set_xlabel("Hours since Admission")
self.__show_legend(ax)
MGraph.show_and_save(self, fig, filename, show_flag)
|
|
""" A workflow that contains cyclic graphs. Note that a special solver is
required to converge this workflow in order to execute it. """
import networkx as nx
from networkx.algorithms.components import strongly_connected_components
from numpy import ndarray, hstack, array, empty, arange, ones
from openmdao.main.array_helpers import flattened_value
from openmdao.main.interfaces import IDriver
from openmdao.main.mp_support import has_interface
from openmdao.main.pseudoassembly import from_PA_var, to_PA_var
from openmdao.main.sequentialflow import SequentialWorkflow
from openmdao.main.vartree import VariableTree
from ordereddict import OrderedDict
__all__ = ['CyclicWorkflow']
# SequentialWorkflow gives us the add and remove methods.
class CyclicWorkflow(SequentialWorkflow):
"""A CyclicWorkflow consists of a collection of Components that contains
loops in the graph.
"""
def __init__(self, parent=None, members=None):
""" Create an empty flow. """
super(CyclicWorkflow, self).__init__(parent, members)
self.config_changed()
def config_changed(self):
"""Notifies the Workflow that its configuration (dependencies, etc.)
has changed.
"""
super(CyclicWorkflow, self).config_changed()
self._workflow_graph = None
self._topsort = None
self._severed_edges = []
self._mapped_severed_edges = []
def __iter__(self):
"""Iterate through the nodes in some proper order."""
# resolve all of the components up front so if there's a problem it'll
# fail early and not waste time running components
scope = self.scope
return [getattr(scope, n) for n in self._get_topsort()].__iter__()
def _get_topsort(self):
""" Return a sorted list of components in the workflow.
"""
if self._topsort is None:
self._severed_edges = set()
graph = nx.DiGraph(self._get_collapsed_graph())
cyclic = True
while cyclic:
try:
self._topsort = nx.topological_sort(graph)
cyclic = False
except nx.NetworkXUnfeasible:
strong = strongly_connected_components(graph)
# We may have multiple loops. We only deal with one at
# a time because multiple loops create some non-unique
# paths.
strong = strong[0]
# Break one edge of the loop.
# For now, just break the first edge.
# TODO: smarter ways to choose edge to break.
graph.remove_edge(strong[-1], strong[0])
# Keep a list of the edges we break, so that a solver
# can use them as its independents/dependents.
depgraph = self.scope._depgraph
edge_set = set(depgraph.get_directional_interior_edges(strong[-1],
strong[0]))
self._severed_edges.update(edge_set)
if self._severed_edges:
self._var_graph = self.scope._depgraph.copy()
self._var_graph.remove_edges_from(self._severed_edges)
return self._topsort
def _get_collapsed_graph(self):
"""Get a dependency graph with only our workflow components
in it, with additional edges added to it from sub-workflows
of any Driver components in our workflow, and from any ExprEvaluators
in any components in our workflow.
"""
if self._workflow_graph:
return self._workflow_graph
to_add = []
scope = self.scope
graph = scope._depgraph
# find all of the incoming and outgoing edges to/from all of the
# components in each driver's iteration set so we can add edges to/from
# the driver in our collapsed graph
comps = self.get_components(full=True)
cnames = set([c.name for c in comps])
removes = set()
itersets = {}
graph_with_subs = graph.component_graph()
collapsed_graph = graph_with_subs.subgraph(cnames)
for comp in comps:
cname = comp.name
if has_interface(comp, IDriver):
iterset = [c.name for c in comp.iteration_set()]
itersets[cname] = iterset
removes.update(iterset)
for u, v in graph_with_subs.edges_iter(nbunch=iterset): # outgoing edges
if v != cname and v not in iterset and not v.startswith('_pseudo_'):
collapsed_graph.add_edge(cname, v)
for u, v in graph_with_subs.in_edges_iter(nbunch=iterset): # incoming edges
if u != cname and u not in iterset and not u.startswith('_pseudo_'):
collapsed_graph.add_edge(u, cname)
# connect all of the edges from each driver's iterset members to itself
# For this, we need the graph with the subdriver itersets all still in it.
to_add = []
for drv, iterset in itersets.items():
for cname in iterset:
for u, v in graph_with_subs.edges_iter(cname):
if v != drv:
to_add.append((drv, v))
for u, v in graph_with_subs.in_edges_iter(cname):
if u != drv:
to_add.append((u, drv))
collapsed_graph.add_edges_from(to_add)
collapsed_graph = collapsed_graph.subgraph(cnames - removes)
# now add some fake dependencies for degree 0 nodes in an attempt to
# mimic a SequentialWorkflow in cases where nodes aren't connected.
# Edges are added from each degree 0 node to all nodes after it in
# sequence order.
self._duplicates = set()
last = len(self._names) - 1
if last > 0:
to_add = []
for i, cname in enumerate(self._names):
if collapsed_graph.degree(cname) == 0:
if self._names.count(cname) > 1:
# Don't introduce circular dependencies.
self._duplicates.add(cname)
else:
if i < last:
for n in self._names[i + 1:]:
to_add.append((cname, n))
else:
for n in self._names[0:i]:
to_add.append((n, cname))
collapsed_graph.add_edges_from([(u, v) for u, v in to_add
if u in collapsed_graph and v in collapsed_graph])
self._workflow_graph = collapsed_graph
return self._workflow_graph
def initialize_residual(self):
"""Creates the array that stores the residual. Also returns the
number of edges.
"""
dgraph = self.derivative_graph()
# We need to map any of our edges if they are in a
# pseudo-assy
pa_keys = set([s.split('.', 1)[0] for s in self.edge_list() if '~' in s])
if len(pa_keys) == 0:
self._mapped_severed_edges = self._severed_edges
else:
palist = [dgraph.node[pa_key]['pa_object'] for pa_key in pa_keys]
self._mapped_severed_edges = []
for src, target in self._severed_edges:
compname, _, varname = src.partition('.')
for pseudo in palist:
if src in pseudo.outputs:
src = to_PA_var(src, pseudo.name)
break
compname, _, varname = target.partition('.')
for pseudo in palist:
flat_inputs = set()
for item in pseudo.inputs:
flat_inputs.update(item)
if target in flat_inputs:
target = to_PA_var(target, pseudo.name)
break
self._mapped_severed_edges.append((src, target))
return super(CyclicWorkflow, self).initialize_residual()
def derivative_graph(self, inputs=None, outputs=None, fd=False,
group_nondif=True, add_implicit=True):
"""Returns the local graph that we use for derivatives. For cyclic
flows, we need to sever edges and use them as inputs/outputs.
"""
if self._derivative_graph is None or group_nondif is False:
if inputs is None:
inputs = []
if outputs is None:
outputs = []
if add_implicit is True:
# Solver can specify parameters
if hasattr(self.parent, 'list_param_group_targets'):
inputs.extend(self.parent.list_param_group_targets())
# Solver can specify equality constraints
if hasattr(self.parent, 'get_eq_constraints'):
outputs.extend(["%s.out0" % item.pcomp_name for item in
self.parent.get_constraints().values()])
# Cyclic flows need to be severed before derivatives are calculated.
self._get_topsort()
for src, target in self._severed_edges:
inputs.append(target)
outputs.append(src)
dgraph = super(CyclicWorkflow, self).derivative_graph(inputs=inputs,
outputs=outputs, fd=fd, severed=self._severed_edges,
group_nondif=group_nondif, add_implicit=add_implicit)
if group_nondif is False:
return dgraph
return self._derivative_graph
def edge_list(self):
""" Return the list of edges for the derivatives of this workflow. """
self._edges = super(CyclicWorkflow, self).edge_list()
# TODO: Shouldn't have to do this everytime.
if len(self._mapped_severed_edges) > 0:
cyclic_edges = OrderedDict()
for edge in self._mapped_severed_edges:
cyclic_edges[edge[0]] = edge[1]
# Finally, modify our edge list to include the severed edges, and
# exclude the boundary edges.
for src, targets in self._edges.iteritems():
if '@in' not in src or \
not any(edge in cyclic_edges.values() for edge in targets):
if isinstance(targets, str):
targets = [targets]
newtargets = []
for target in targets:
if '@out' not in target or \
src not in cyclic_edges:
newtargets.append(target)
if len(newtargets) > 0:
cyclic_edges[src] = newtargets
self._edges = cyclic_edges
return self._edges
def get_dependents(self, fixed_point=False):
"""Returns a list of current values of the dependents. This includes
both constraints and severed sources.
fixed_point: bool
Set to True if we are doing fixed-point iteration instead of a more
general solve. In such a case, we need to swap the order of the
constraints to match the parameter order. We also may need to swap
signs on the constraints.
"""
parent = self.parent
deps = array(parent.eval_eq_constraints(self.scope))
# Reorder for fixed point
if fixed_point is True:
eqcons = parent.get_eq_constraints()
rhs = {}
lhs = {}
i = 0
for value in eqcons.itervalues():
# make a mapping of position of each constraint
rhs[value.rhs.text] = (i, value.size)
lhs[value.lhs.text] = (i, value.size)
i += value.size
new_dep_index = empty(len(deps), dtype="int")
new_dep_sign = empty(len(deps), dtype="int")
k = 0
for params in parent.list_param_group_targets():
# for each param, grab the right map value and set the sign convention
try:
j, size = rhs[params[0]]
new_dep_index[k:k + size] = j + arange(0, size, dtype="int")
new_dep_sign[k:k + size] = ones((size,))
k += size
except KeyError: # wasn't in the rhs dict, try the lhs
try:
j, size = lhs[params[0]]
new_dep_index[k:k + size] = j + arange(0, size, dtype="int")
new_dep_sign[k:k + size] = -1 * ones(size)
k += size
except KeyError:
pass # TODO: need to throw an error here. Why was there a param that didn't show up in the constraint
# reset the deps array to the new order and sign
deps = deps[new_dep_index] * new_dep_sign
sev_deps = []
for src, target in self._severed_edges:
if not isinstance(target, str):
target = target[0]
target = from_PA_var(target)
src = from_PA_var(src)
src_val = self.scope.get(src)
targ_val = self.scope.get(target)
res = flattened_value(src, src_val) - flattened_value(target, targ_val)
sev_deps.extend(res)
return hstack((deps, sev_deps))
def get_independents(self):
"""Returns a list of current values of the dependents. This includes
both parameters and severed targets.
"""
indeps = self.parent.eval_parameters(self.scope)
sev_indeps = []
for _, target in self._severed_edges:
if not isinstance(target, str):
target = target[0]
target = from_PA_var(target)
old_val = self.scope.get(target)
sev_indeps.extend(flattened_value(target, old_val))
return hstack((indeps, sev_indeps))
def set_independents(self, val):
"""Sets all dependent variables to the values in the input array
`val`. This includes both parameters and severed targets.
"""
bounds = self._bounds_cache
nparam = self.parent.total_parameters()
if nparam > 0:
self.parent.set_parameters(val[:nparam].flatten())
if len(self._severed_edges) > 0:
i = nparam
for src, targets in self._mapped_severed_edges:
if isinstance(targets, str):
targets = [targets]
i1, i2 = bounds[src]
if isinstance(i1, list):
width = len(i1)
else:
width = i2 - i1
i1 = i
i2 = i + width
for target in targets:
target = from_PA_var(target)
old_val = self.scope.get(target)
if isinstance(old_val, float):
new_val = float(val[i1:i2])
elif isinstance(old_val, ndarray):
shape = old_val.shape
if len(shape) > 1:
new_val = val[i1:i2].copy()
new_val = new_val.reshape(shape)
else:
new_val = val[i1:i2].copy()
elif isinstance(old_val, VariableTree):
new_val = old_val.copy()
self._vtree_set(target, new_val, val[i1:i2], i1)
else:
msg = "Variable %s is of type %s." % (target, type(old_val)) + \
" This type is not supported by the MDA Solver."
self.scope.raise_exception(msg, RuntimeError)
i += width
# Poke new value into the input end of the edge.
self.scope.set(target, new_val, force=True)
# # Prevent OpenMDAO from stomping on our poked input.
# self.scope.set_valid([target.split('[', 1)[0]], True)
def _vtree_set(self, name, vtree, dv, i1=0):
""" Update VariableTree `name` value `vtree` from `dv`. """
for key in sorted(vtree.list_vars()): # Force repeatable order.
value = getattr(vtree, key)
if isinstance(value, float):
setattr(vtree, key, float(dv[i1]))
i1 += 1
elif isinstance(value, ndarray):
shape = value.shape
size = value.size
i2 = i1 + size
if len(shape) > 1:
value = dv[i1:i2]
value = value.reshape(shape)
else:
value = dv[i1:i2]
setattr(vtree, key, value)
i1 += size
elif isinstance(value, VariableTree):
i1 = self._vtree_set('.'.join((name, key)), value, dv, i1)
else:
msg = "Variable %s is of type %s." % (name, type(value)) + \
" This type is not supported by the MDA Solver."
self.scope.raise_exception(msg, RuntimeError)
return i1
|
|
import io
import os
import re
import string
class Character(object):
def __init__(self, left, right, bottom, top, text='', size=5.0, font='Courier'):
self.text = text
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.size = size
self.font = font
@property
def height(self):
return self.top - self.bottom
class Word(object):
MAX_HORIZONTAL_OVERLAP = 0.01
MAX_HORIZONTAL_SPACING = 0.01
MIN_VERTICAL_OVERLAP_FRACTION = 0.95
def __init__(self):
self._characters = []
def add_character(self, character):
if self.can_add(character):
self._characters.append(character)
return True
else:
return False
def vertical_overlap_fraction(self, chararacter):
intersection_length = min(self.top, chararacter.top) - max(self.bottom, chararacter.bottom)
return max(intersection_length / self.height, intersection_length / chararacter.height)
def can_add(self, character):
return self._characters == [] or \
(-self.MAX_HORIZONTAL_OVERLAP <= (character.left - self.right) <= self.MAX_HORIZONTAL_SPACING and
self.vertical_overlap_fraction(character) >= self.MIN_VERTICAL_OVERLAP_FRACTION)
@property
def text(self):
return ''.join([char.text for char in self._characters])
@property
def num_chars(self):
return len(self._characters)
@property
def left(self):
return min([char.left for char in self._characters])
@property
def right(self):
return max([char.right for char in self._characters])
@property
def top(self):
return max([char.top for char in self._characters])
@property
def bottom(self):
return min([char.bottom for char in self._characters])
@property
def height(self):
return self.top - self.bottom
@property
def mean_size(self):
total_size = sum([char.size for char in self._characters])
return total_size / len(self._characters)
@property
def fraction_capitalized(self):
caps = [char for char in self._characters if char.text in string.ascii_uppercase]
non_caps = [char for char in self._characters if char.text in string.ascii_lowercase]
total = len(caps) + len(non_caps)
if total == 0:
return 1.0
else:
return float(len(caps)) / total
@property
def mode_font(self):
fonts = {}
for char in self._characters:
fonts[char.font] = fonts.get(char.font, 0) + 1
font, _ = sorted(fonts.items(), key=lambda f: (-f[1], f[0]))[0]
return font
def __repr__(self):
return self.text
class Page(object):
MIN_MARGIN = 10.0
HEADER_CAP_THRESHOLD = 0.9
HEADER_1_THRESHOLD_SIZE = 7.5
HEADER_2_THRESHOLD_SIZE = 4.9
GAP_RANGE_FRACTION = 0.2
def __init__(self, page_no, left, right, bottom, top):
self.words = []
self.page_no = page_no
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.sweep_lines = {x: 0 for x in self._increment_by_point_one(self.left, self.right)}
self.left_edge, self.right_edge = None, None
self.left_gap_edge, self.right_gap_edge = None, None
def add_words(self, words):
self.words.extend(words)
def add_word(self, word):
self.words.append(word)
@property
def text_bottom(self):
return min([w.bottom for w in self.words])
@property
def text_top(self):
return max([w.top for w in self.words])
@property
def text_left(self):
return min([w.left for w in self.words])
@property
def text_right(self):
return max([w.right for w in self.words])
def _increment_by_point_one(self, left, right):
return [x / 10.0 for x in range(int(round(left, 1) * 10), int(round(right, 1) * 10 + 1))]
def _vertical_range_adjustment(self):
distinct_lines = sorted(set([word.bottom for word in self.words]))
range_adjustment = int(round(len(distinct_lines) * 0.10, 0))
return distinct_lines[range_adjustment], distinct_lines[-range_adjustment]
def _compute_vertical_lines(self):
bottom, top = self._vertical_range_adjustment()
middle_words = [w for w in self.words if w.bottom >= bottom and w.bottom <= top]
for word in middle_words:
for entry in self._increment_by_point_one(word.left, word.right):
self.sweep_lines[entry] += 1
def _middle_gap(self):
left = self.text_left
right = self.text_right
midpoint = (right - left) / 2 + left
sweep_range = (right - left) * self.GAP_RANGE_FRACTION / 2
sweep_left, sweep_right = midpoint - sweep_range, midpoint + sweep_range
left_edge, right_edge = None, None
for pt in self._increment_by_point_one(sweep_left, sweep_right):
if self.sweep_lines[pt] == 0:
if left_edge is None:
left_edge = pt
right_edge = pt
elif self.sweep_lines[pt] != 0 and left_edge is not None:
break
assert left_edge is not None and right_edge is not None, "Couldn't find middle gap on page {}".format(self.page_no)
return left_edge, right_edge
def _left_margin(self, gap_edge):
margin = self.left
for pt in self._increment_by_point_one(self.left, gap_edge):
if self.sweep_lines[pt] == 0 and pt != gap_edge:
margin = pt
return margin
def _right_margin(self, gap_edge):
margin = gap_edge
for pt in self._increment_by_point_one(gap_edge, self.right):
if self.sweep_lines[pt] == 0 and pt != gap_edge:
margin = pt
break
return margin
def compute_column_margins(self):
self._compute_vertical_lines()
self.left_gap_edge, self.right_gap_edge = self._middle_gap()
self.left_edge = self._left_margin(self.left_gap_edge)
self.right_edge = self._right_margin(self.right_gap_edge)
return self.left_edge, self.left_gap_edge, self.right_gap_edge, self.right_edge
def remove_troublesome_lines(self):
troublesome_words = []
bottom, top = self._vertical_range_adjustment()
for w in self.words:
if w.left <= self.right_gap_edge and w.right >= self.left_gap_edge:
troublesome_words.append(w)
if len(troublesome_words) > 0:
highest_words = [w.bottom for w in troublesome_words if w.bottom >= top]
lowest_words = [w.top for w in troublesome_words if w.bottom <= bottom]
words_to_remove = []
for word in self.words:
if len(highest_words) > 0 and word.top > min(highest_words):
words_to_remove.append(word)
if len(lowest_words) > 0 and word.bottom < max(lowest_words):
words_to_remove.append(word)
for word in words_to_remove:
self.words.remove(word)
def _extract_language(self, left, right):
language_words = [word for word in self.words if word.left > left and word.right < right]
language_words.sort(key=lambda word: (-word.bottom, word.left))
line = Line()
lines = []
for word in language_words:
if not line.add_word(word):
line.sort_words()
lines.append(line)
line = Line()
line.add_word(word)
if len(line.words) != 0:
line.sort_words()
lines.append(line)
return lines
@property
def english(self):
return self._extract_language(self.left_edge, self.left_gap_edge)
@property
def french(self):
return self._extract_language(self.right_gap_edge, self.right_edge)
def _check_header(self, line_text):
header_match = re.search(r'^(\d+\.)(.*)', line_text)
if header_match:
line_text = os.linesep + '**{}**{}'.format(*header_match.group(1, 2))
return line_text
def _check_letter_paragraph(self, line_text):
letter_match = re.search(r'^^\(([a-z]+)\)( .*)', line_text)
if letter_match:
line_text = ' * (_{}_){}'.format(*letter_match.group(1, 2))
return line_text
def _convert_line_to_markdown(self, line):
prefix = ''
markdown_text = line.text
markdown_text = markdown_text.replace('_', ' ')
markdown_text = markdown_text.replace('"', '"')
if all([word.fraction_capitalized >= self.HEADER_CAP_THRESHOLD for word in line.words]):
if line.mean_size >= self.HEADER_1_THRESHOLD_SIZE:
prefix = '# '
elif line.mean_size >= self.HEADER_2_THRESHOLD_SIZE:
prefix = '## '
if prefix == '':
markdown_text = self._check_header(markdown_text)
markdown_text = self._check_letter_paragraph(markdown_text)
else:
prefix = os.linesep + prefix
return prefix + markdown_text
def convert_to_markdown(self, lines):
markdown_buffer = io.StringIO()
for line in lines:
markdown_line = self._convert_line_to_markdown(line)
markdown_buffer.write(markdown_line + os.linesep)
output = markdown_buffer.getvalue()
markdown_buffer.close()
return output
class Line(object):
MIN_VERTICAL_OVERLAP_FRACTION = 0.5
def __init__(self):
self.words = []
def add_word(self, word):
if self.can_add(word):
self.words.append(word)
return True
else:
return False
def vertical_overlap(self, word):
intersection_length = min(self.top, word.top) - max(self.bottom, word.bottom)
return max(intersection_length / self.height, intersection_length / word.height)
def can_add(self, word):
return self.words == [] or \
self.vertical_overlap(word) >= self.MIN_VERTICAL_OVERLAP_FRACTION
@property
def left(self):
return min([word.left for word in self.words])
@property
def right(self):
return max([word.right for word in self.words])
@property
def bottom(self):
return min([word.bottom for word in self.words])
@property
def top(self):
return max([word.top for word in self.words])
@property
def height(self):
return self.top - self.bottom
@property
def mean_size(self):
weighted_size = sum([word.num_chars * word.mean_size for word in self.words])
total_size = sum([word.num_chars for word in self.words])
return weighted_size / total_size
@property
def mode_font(self):
fonts = {}
for word in self.words:
fonts[word.mode_font] = fonts.get(word.mode_font, 0) + word.num_chars
font, _ = sorted(fonts.items(), key=lambda f: (-f[1], f[0]))[0]
return font
@property
def text(self):
return ' '.join([word.text for word in self.words])
def sort_words(self):
self.words.sort(key=lambda word: word.left)
def __repr__(self):
data = ' '.join([word.text for word in self.words])
return "<Line text='{}'>".format(data)
|
|
"""
This module contains a class representing a Type 1 font.
This version reads pfa and pfb files and splits them for embedding in
pdf files. It also supports SlantFont and ExtendFont transformations,
similarly to pdfTeX and friends. There is no support yet for
subsetting.
Usage::
>>> font = Type1Font(filename)
>>> clear_part, encrypted_part, finale = font.parts
>>> slanted_font = font.transform({'slant': 0.167})
>>> extended_font = font.transform({'extend': 1.2})
Sources:
* Adobe Technical Note #5040, Supporting Downloadable PostScript
Language Fonts.
* Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
v1.1, 1993. ISBN 0-201-57044-0.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import filter
from six import unichr
import io
import itertools
import numpy as np
import re
import struct
import sys
if six.PY3:
def ord(x):
return x
class Type1Font(object):
"""
A class representing a Type-1 font, for use by backends.
.. attribute:: parts
A 3-tuple of the cleartext part, the encrypted part, and the
finale of zeros.
.. attribute:: prop
A dictionary of font properties.
"""
__slots__ = ('parts', 'prop')
def __init__(self, input):
"""
Initialize a Type-1 font. *input* can be either the file name of
a pfb file or a 3-tuple of already-decoded Type-1 font parts.
"""
if isinstance(input, tuple) and len(input) == 3:
self.parts = input
else:
with open(input, 'rb') as file:
data = self._read(file)
self.parts = self._split(data)
self._parse()
def _read(self, file):
"""
Read the font from a file, decoding into usable parts.
"""
rawdata = file.read()
if not rawdata.startswith(b'\x80'):
return rawdata
data = b''
while len(rawdata) > 0:
if not rawdata.startswith(b'\x80'):
raise RuntimeError('Broken pfb file (expected byte 128, '
'got %d)' % ord(rawdata[0]))
type = ord(rawdata[1])
if type in (1, 2):
length, = struct.unpack(str('<i'), rawdata[2:6])
segment = rawdata[6:6 + length]
rawdata = rawdata[6 + length:]
if type == 1: # ASCII text: include verbatim
data += segment
elif type == 2: # binary data: encode in hexadecimal
data += b''.join([('%02x' % ord(char)).encode('ascii')
for char in segment])
elif type == 3: # end of file
break
else:
raise RuntimeError('Unknown segment type %d in pfb file' %
type)
return data
def _split(self, data):
"""
Split the Type 1 font into its three main parts.
The three parts are: (1) the cleartext part, which ends in a
eexec operator; (2) the encrypted part; (3) the fixed part,
which contains 512 ASCII zeros possibly divided on various
lines, a cleartomark operator, and possibly something else.
"""
# Cleartext part: just find the eexec and skip whitespace
idx = data.index(b'eexec')
idx += len(b'eexec')
while data[idx] in b' \t\r\n':
idx += 1
len1 = idx
# Encrypted part: find the cleartomark operator and count
# zeros backward
idx = data.rindex(b'cleartomark') - 1
zeros = 512
while zeros and ord(data[idx]) in (
ord(b'0'[0]), ord(b'\n'[0]), ord(b'\r'[0])):
if ord(data[idx]) == ord(b'0'[0]):
zeros -= 1
idx -= 1
if zeros:
raise RuntimeError('Insufficiently many zeros in Type 1 font')
# Convert encrypted part to binary (if we read a pfb file, we
# may end up converting binary to hexadecimal to binary again;
# but if we read a pfa file, this part is already in hex, and
# I am not quite sure if even the pfb format guarantees that
# it will be in binary).
binary = b''.join([unichr(int(data[i:i + 2], 16)).encode('latin-1')
for i in range(len1, idx, 2)])
return data[:len1], binary, data[idx:]
_whitespace_re = re.compile(br'[\0\t\r\014\n ]+')
_token_re = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+')
_comment_re = re.compile(br'%[^\r\n\v]*')
_instring_re = re.compile(br'[()\\]')
# token types
_whitespace = object()
_name = object()
_string = object()
_delimiter = object()
_number = object()
@classmethod
def _tokens(cls, text):
"""
A PostScript tokenizer. Yield (token, value) pairs such as
(cls._whitespace, ' ') or (cls._name, '/Foobar').
"""
pos = 0
while pos < len(text):
match = (cls._comment_re.match(text[pos:]) or
cls._whitespace_re.match(text[pos:]))
if match:
yield (cls._whitespace, match.group())
pos += match.end()
elif text[pos] == '(':
start = pos
pos += 1
depth = 1
while depth:
match = cls._instring_re.search(text[pos:])
if match is None:
return
pos += match.end()
if match.group() == '(':
depth += 1
elif match.group() == ')':
depth -= 1
else: # a backslash - skip the next character
pos += 1
yield (cls._string, text[start:pos])
elif text[pos:pos + 2] in ('<<', '>>'):
yield (cls._delimiter, text[pos:pos + 2])
pos += 2
elif text[pos] == '<':
start = pos
pos += text[pos:].index('>')
yield (cls._string, text[start:pos])
else:
match = cls._token_re.match(text[pos:])
if match:
try:
float(match.group())
yield (cls._number, match.group())
except ValueError:
yield (cls._name, match.group())
pos += match.end()
else:
yield (cls._delimiter, text[pos:pos + 1])
pos += 1
def _parse(self):
"""
Find the values of various font properties. This limited kind
of parsing is described in Chapter 10 "Adobe Type Manager
Compatibility" of the Type-1 spec.
"""
# Start with reasonable defaults
prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
'UnderlinePosition': -100, 'UnderlineThickness': 50}
tokenizer = self._tokens(self.parts[0])
filtered = filter(lambda x: x[0] != self._whitespace, tokenizer)
# The spec calls this an ASCII format; in Python 2.x we could
# just treat the strings and names as opaque bytes but let's
# turn them into proper Unicode, and be lenient in case of high bytes.
convert = lambda x: x.decode('ascii', errors='replace')
for token, value in filtered:
if token is self._name and value.startswith(b'/'):
key = convert(value[1:])
token, value = next(filtered)
if token is self._name:
if value in (b'true', b'false'):
value = value == b'true'
else:
value = convert(value.lstrip(b'/'))
elif token is self._string:
value = convert(value.lstrip(b'(').rstrip(b')'))
elif token is self._number:
if b'.' in value:
value = float(value)
else:
value = int(value)
else: # more complicated value such as an array
value = None
if key != 'FontInfo' and value is not None:
prop[key] = value
# Fill in the various *Name properties
if 'FontName' not in prop:
prop['FontName'] = (prop.get('FullName') or
prop.get('FamilyName') or
'Unknown')
if 'FullName' not in prop:
prop['FullName'] = prop['FontName']
if 'FamilyName' not in prop:
extras = r'(?i)([ -](regular|plain|italic|oblique|(semi)?bold|(ultra)?light|extra|condensed))+$'
prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
self.prop = prop
@classmethod
def _transformer(cls, tokens, slant, extend):
def fontname(name):
result = name
if slant:
result += '_Slant_' + str(int(1000 * slant))
if extend != 1.0:
result += '_Extend_' + str(int(1000 * extend))
return result
def italicangle(angle):
return str(float(angle) - np.arctan(slant) / np.pi * 180)
def fontmatrix(array):
array = array.lstrip('[').rstrip(']').strip().split()
array = [float(x) for x in array]
oldmatrix = np.eye(3, 3)
oldmatrix[0:3, 0] = array[::2]
oldmatrix[0:3, 1] = array[1::2]
modifier = np.array([[extend, 0, 0],
[slant, 1, 0],
[0, 0, 1]])
newmatrix = np.dot(modifier, oldmatrix)
array[::2] = newmatrix[0:3, 0]
array[1::2] = newmatrix[0:3, 1]
return '[' + ' '.join(str(x) for x in array) + ']'
def replace(fun):
def replacer(tokens):
token, value = next(tokens) # name, e.g., /FontMatrix
yield value
token, value = next(tokens) # possible whitespace
while token == 'whitespace':
yield value
token, value = next(tokens)
if value != '[': # name/number/etc.
yield fun(value)
else: # array, e.g., [1 2 3]
array = []
while value != ']':
array += value
token, value = next(tokens)
array += value
yield fun(''.join(array))
return replacer
def suppress(tokens):
for x in itertools.takewhile(lambda x: x[1] != 'def', tokens):
pass
yield ''
table = {'/FontName': replace(fontname),
'/ItalicAngle': replace(italicangle),
'/FontMatrix': replace(fontmatrix),
'/UniqueID': suppress}
while True:
token, value = next(tokens)
if token == 'name' and value in table:
for value in table[value](itertools.chain([(token, value)],
tokens)):
yield value
else:
yield value
def transform(self, effects):
"""
Transform the font by slanting or extending. *effects* should
be a dict where ``effects['slant']`` is the tangent of the
angle that the font is to be slanted to the right (so negative
values slant to the left) and ``effects['extend']`` is the
multiplier by which the font is to be extended (so values less
than 1.0 condense). Returns a new :class:`Type1Font` object.
"""
buffer = io.BytesIO()
try:
tokenizer = self._tokens(self.parts[0])
for value in self._transformer(tokenizer,
slant=effects.get('slant', 0.0),
extend=effects.get('extend', 1.0)):
if six.PY3 and isinstance(value, int):
value = chr(value)
value = value.encode('latin-1')
buffer.write(value)
result = buffer.getvalue()
finally:
buffer.close()
return Type1Font((result, self.parts[1], self.parts[2]))
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import testtools
import openstack
import openstack.cloud
from openstack.tests.unit import base
class TestNetwork(base.TestCase):
mock_new_network_rep = {
'provider:physical_network': None,
'ipv6_address_scope': None,
'revision_number': 3,
'port_security_enabled': True,
'provider:network_type': 'local',
'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486',
'router:external': False,
'availability_zone_hints': [],
'availability_zones': [],
'provider:segmentation_id': None,
'ipv4_address_scope': None,
'shared': False,
'project_id': '861808a93da0484ea1767967c4df8a23',
'status': 'ACTIVE',
'subnets': [],
'description': '',
'tags': [],
'updated_at': '2017-04-22T19:22:53Z',
'is_default': False,
'qos_policy_id': None,
'name': 'netname',
'admin_state_up': True,
'tenant_id': '861808a93da0484ea1767967c4df8a23',
'created_at': '2017-04-22T19:22:53Z',
'mtu': 0,
'dns_domain': 'sample.openstack.org.'
}
network_availability_zone_extension = {
"alias": "network_availability_zone",
"updated": "2015-01-01T10:00:00-00:00",
"description": "Availability zone support for router.",
"links": [],
"name": "Network Availability Zone"
}
enabled_neutron_extensions = [network_availability_zone_extension]
def test_list_networks(self):
net1 = {'id': '1', 'name': 'net1'}
net2 = {'id': '2', 'name': 'net2'}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [net1, net2]})
])
nets = self.cloud.list_networks()
self.assertEqual([net1, net2], nets)
self.assert_calls()
def test_list_networks_filtered(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json'],
qs_elements=["name=test"]),
json={'networks': []})
])
self.cloud.list_networks(filters={'name': 'test'})
self.assert_calls()
def test_create_network(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': self.mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname'}}))
])
network = self.cloud.create_network("netname")
self.assertEqual(self.mock_new_network_rep, network)
self.assert_calls()
def test_create_network_specific_tenant(self):
project_id = "project_id_value"
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['project_id'] = project_id
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'tenant_id': project_id}}))
])
network = self.cloud.create_network("netname", project_id=project_id)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_external(self):
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['router:external'] = True
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'router:external': True}}))
])
network = self.cloud.create_network("netname", external=True)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_provider(self):
provider_opts = {'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1'}
new_network_provider_opts = {
'provider:physical_network': 'mynet',
'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1'
}
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = {
'admin_state_up': True,
'name': 'netname'
}
expected_send_params.update(new_network_provider_opts)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': expected_send_params}))
])
network = self.cloud.create_network("netname", provider=provider_opts)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_with_availability_zone_hints(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': self.enabled_neutron_extensions}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': self.mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'availability_zone_hints': ['nova']}}))
])
network = self.cloud.create_network("netname",
availability_zone_hints=['nova'])
self.assertEqual(self.mock_new_network_rep, network)
self.assert_calls()
def test_create_network_provider_ignored_value(self):
provider_opts = {'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
'should_not_be_passed': 1}
new_network_provider_opts = {
'provider:physical_network': 'mynet',
'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1'
}
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = {
'admin_state_up': True,
'name': 'netname'
}
expected_send_params.update(new_network_provider_opts)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': expected_send_params}))
])
network = self.cloud.create_network("netname", provider=provider_opts)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_wrong_availability_zone_hints_type(self):
azh_opts = "invalid"
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'availability_zone_hints' must be a list"
):
self.cloud.create_network("netname",
availability_zone_hints=azh_opts)
def test_create_network_provider_wrong_type(self):
provider_opts = "invalid"
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'provider' must be a dict"
):
self.cloud.create_network("netname", provider=provider_opts)
def test_create_network_port_security_disabled(self):
port_security_state = False
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['port_security_enabled'] = port_security_state
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'port_security_enabled': port_security_state}}))
])
network = self.cloud.create_network(
"netname",
port_security_enabled=port_security_state
)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_with_mtu(self):
mtu_size = 1500
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['mtu'] = mtu_size
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'mtu': mtu_size}}))
])
network = self.cloud.create_network("netname",
mtu_size=mtu_size
)
self.assertEqual(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_with_wrong_mtu_size(self):
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be greater than 67."
):
self.cloud.create_network("netname", mtu_size=42)
def test_create_network_with_wrong_mtu_type(self):
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be an integer."
):
self.cloud.create_network("netname", mtu_size="fourty_two")
def test_delete_network(self):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [network]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', "%s.json" % network_id]),
json={})
])
self.assertTrue(self.cloud.delete_network(network_name))
self.assert_calls()
def test_delete_network_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': []}),
])
self.assertFalse(self.cloud.delete_network('test-net'))
self.assert_calls()
def test_delete_network_exception(self):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [network]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', "%s.json" % network_id]),
status_code=503)
])
self.assertRaises(openstack.cloud.OpenStackCloudException,
self.cloud.delete_network, network_name)
self.assert_calls()
def test_get_network_by_id(self):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', "%s" % network_id]),
json={'network': network})
])
self.assertTrue(self.cloud.get_network_by_id(network_id))
self.assert_calls()
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import dumps, loads
from sys import exc_info
from time import sleep
from os import remove
from os.path import join
import traceback
import warnings
import qiita_db as qdb
from qiita_core.qiita_settings import r_client, qiita_config
from qiita_ware.commands import (download_remote, list_remote,
submit_VAMPS, submit_EBI)
from qiita_ware.metadata_pipeline import (
create_templates_from_qiime_mapping_file)
from qiita_ware.exceptions import EBISubmissionError
def build_analysis_files(job):
"""Builds the files for an analysis
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job with the information for building the files
"""
with qdb.sql_connection.TRN:
params = job.parameters.values
analysis_id = params['analysis']
merge_duplicated_sample_ids = params['merge_dup_sample_ids']
analysis = qdb.analysis.Analysis(analysis_id)
biom_files = analysis.build_files(merge_duplicated_sample_ids)
cmd = qdb.software.Command.get_validator('BIOM')
val_jobs = []
for dtype, biom_fp, archive_artifact_fp in biom_files:
if archive_artifact_fp is not None:
files = dumps({'biom': [biom_fp],
'plain_text': [archive_artifact_fp]})
else:
files = dumps({'biom': [biom_fp]})
validate_params = qdb.software.Parameters.load(
cmd, values_dict={'files': files,
'artifact_type': 'BIOM',
'provenance': dumps({'job': job.id,
'data_type': dtype}),
'analysis': analysis_id,
'template': None})
val_jobs.append(qdb.processing_job.ProcessingJob.create(
analysis.owner, validate_params, True))
job._set_validator_jobs(val_jobs)
for j in val_jobs:
j.submit()
sleep(1)
# The validator jobs no longer finish the job automatically so we need
# to release the validators here
job.release_validators()
def release_validators(job):
"""Waits until all the validators of a job are completed
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job with the information of the parent job
"""
qdb.processing_job.ProcessingJob(
job.parameters.values['job']).release_validators()
job._set_status('success')
def submit_to_VAMPS(job):
"""Submits an artifact to VAMPS
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
submit_VAMPS(job.parameters.values['artifact'])
job._set_status('success')
def submit_to_EBI(job):
"""Submit a study to EBI
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
artifact_id = int(param_vals['artifact'])
submission_type = param_vals['submission_type']
artifact = qdb.artifact.Artifact(artifact_id)
for info in artifact.study._ebi_submission_jobs():
jid, aid, js, cbste, era = info
if js in ('running', 'queued') and jid != job.id:
error_msg = ("Cannot perform parallel EBI submission for "
"the same study. Current job running: %s" % js)
raise EBISubmissionError(error_msg)
submit_EBI(artifact_id, submission_type, True)
job._set_status('success')
def copy_artifact(job):
"""Creates a copy of an artifact
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
orig_artifact = qdb.artifact.Artifact(param_vals['artifact'])
prep_template = qdb.metadata_template.prep_template.PrepTemplate(
param_vals['prep_template'])
qdb.artifact.Artifact.copy(orig_artifact, prep_template)
job._set_status('success')
def delete_artifact(job):
"""Deletes an artifact from the system
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
artifact_id = job.parameters.values['artifact']
qdb.artifact.Artifact.delete(artifact_id)
job._set_status('success')
def create_sample_template(job):
"""Creates a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
params = job.parameters.values
fp = params['fp']
study = qdb.study.Study(int(params['study_id']))
is_mapping_file = params['is_mapping_file']
data_type = params['data_type']
with warnings.catch_warnings(record=True) as warns:
if is_mapping_file:
create_templates_from_qiime_mapping_file(fp, study, data_type)
else:
qdb.metadata_template.sample_template.SampleTemplate.create(
qdb.metadata_template.util.load_template_to_dataframe(fp),
study)
remove(fp)
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("sample_template_%s" % study.id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def update_sample_template(job):
"""Updates a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
study_id = param_vals['study']
fp = param_vals['template_fp']
with warnings.catch_warnings(record=True) as warns:
st = qdb.metadata_template.sample_template.SampleTemplate(study_id)
df = qdb.metadata_template.util.load_template_to_dataframe(fp)
st.extend_and_update(df)
remove(fp)
# Join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("sample_template_%s" % study_id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def delete_sample_template(job):
"""Deletes a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
qdb.metadata_template.sample_template.SampleTemplate.delete(
job.parameters.values['study'])
job._set_status('success')
def update_prep_template(job):
"""Updates a prep template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
prep_id = param_vals['prep_template']
fp = param_vals['template_fp']
prep = qdb.metadata_template.prep_template.PrepTemplate(prep_id)
with warnings.catch_warnings(record=True) as warns:
df = qdb.metadata_template.util.load_template_to_dataframe(fp)
prep.extend_and_update(df)
remove(fp)
# Join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("prep_template_%s" % prep_id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def delete_sample_or_column(job):
"""Deletes a sample or a column from the metadata
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
obj_class = param_vals['obj_class']
obj_id = param_vals['obj_id']
sample_or_col = param_vals['sample_or_col']
name = param_vals['name'].split(',')
if obj_class == 'SampleTemplate':
constructor = qdb.metadata_template.sample_template.SampleTemplate
elif obj_class == 'PrepTemplate':
constructor = qdb.metadata_template.prep_template.PrepTemplate
else:
raise ValueError('Unknown value "%s". Choose between '
'"SampleTemplate" and "PrepTemplate"' % obj_class)
if sample_or_col == 'columns':
del_func = constructor(obj_id).delete_column
name = name[0]
elif sample_or_col == 'samples':
del_func = constructor(obj_id).delete_samples
else:
raise ValueError('Unknown value "%s". Choose between "samples" '
'and "columns"' % sample_or_col)
del_func(name)
job._set_status('success')
def _delete_analysis_artifacts(analysis):
aids = [a.id for a in analysis.artifacts if not a.parents]
aids.sort(reverse=True)
for aid in aids:
qdb.artifact.Artifact.delete(aid)
qdb.analysis.Analysis.delete(analysis.id)
def delete_study(job):
"""Deletes a full study
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
MT = qdb.metadata_template
with qdb.sql_connection.TRN:
study_id = job.parameters.values['study']
study = qdb.study.Study(study_id)
# deleting analyses
for analysis in study.analyses():
_delete_analysis_artifacts(analysis)
for pt in study.prep_templates():
if pt.artifact is not None:
# Artifact.delete will delete descendants so just delete
# the root
qdb.artifact.Artifact.delete(pt.artifact.id)
MT.prep_template.PrepTemplate.delete(pt.id)
if MT.sample_template.SampleTemplate.exists(study_id):
MT.sample_template.SampleTemplate.delete(study_id)
qdb.study.Study.delete(study_id)
job._set_status('success')
def complete_job(job):
"""Completes a job
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
payload = loads(param_vals['payload'])
if payload['success']:
artifacts = payload['artifacts']
error = None
else:
artifacts = None
error = payload['error']
c_job = qdb.processing_job.ProcessingJob(param_vals['job_id'])
c_job.step = 'Completing via %s [%s]' % (job.id, job.external_id)
try:
c_job.complete(payload['success'], artifacts, error)
except Exception:
c_job._set_error(traceback.format_exception(*exc_info()))
job._set_status('success')
if 'archive' in payload:
pass
# ToDo: Archive
# features = payload['archive']
# here we should call the method from the command to archive
def delete_analysis(job):
"""Deletes a full analysis
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
analysis_id = job.parameters.values['analysis_id']
analysis = qdb.analysis.Analysis(analysis_id)
_delete_analysis_artifacts(analysis)
r_client.delete('analysis_delete_%d' % analysis_id)
job._set_status('success')
def list_remote_files(job):
"""Lists valid study files on a remote server
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
url = job.parameters.values['url']
private_key = job.parameters.values['private_key']
study_id = job.parameters.values['study_id']
try:
files = list_remote(url, private_key)
r_client.set("upload_study_%s" % study_id,
dumps({'job_id': job.id, 'url': url, 'files': files}))
except Exception:
job._set_error(traceback.format_exception(*exc_info()))
else:
job._set_status('success')
def download_remote_files(job):
"""Downloads valid study files from a remote server
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
url = job.parameters.values['url']
destination = job.parameters.values['destination']
private_key = job.parameters.values['private_key']
try:
download_remote(url, private_key, destination)
except Exception:
job._set_error(traceback.format_exception(*exc_info()))
else:
job._set_status('success')
def INSDC_download(job):
"""Download an accession from INSDC
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
download_source = param_vals['download_source']
accession = param_vals['accession']
if job.user.level != 'admin':
job._set_error('INSDC_download is only for administrators')
job_dir = join(qiita_config.working_dir, job.id)
qdb.util.create_nested_path(job_dir)
# code doing something
print(download_source, accession)
job._set_status('success')
TASK_DICT = {'build_analysis_files': build_analysis_files,
'release_validators': release_validators,
'submit_to_VAMPS': submit_to_VAMPS,
'submit_to_EBI': submit_to_EBI,
'copy_artifact': copy_artifact,
'delete_artifact': delete_artifact,
'create_sample_template': create_sample_template,
'update_sample_template': update_sample_template,
'delete_sample_template': delete_sample_template,
'update_prep_template': update_prep_template,
'delete_sample_or_column': delete_sample_or_column,
'delete_study': delete_study,
'complete_job': complete_job,
'delete_analysis': delete_analysis,
'list_remote_files': list_remote_files,
'download_remote_files': download_remote_files,
'INSDC_download': INSDC_download}
def private_task(job_id):
"""Completes a Qiita private task
Parameters
----------
job_id : str
The job id
"""
if job_id == 'register':
# We don't need to do anything here if Qiita is registering plugins
return
job = qdb.processing_job.ProcessingJob(job_id)
job.update_heartbeat_state()
task_name = job.command.name
try:
TASK_DICT[task_name](job)
except Exception as e:
log_msg = "Error on job %s: %s" % (
job.id, ''.join(traceback.format_exception(*exc_info())))
le = qdb.logger.LogEntry.create('Runtime', log_msg)
job.complete(False, error="Error (log id: %d): %s" % (le.id, e))
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import os, os.path, shutil
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-05-22
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if an unescaped '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/' and self.theA != '\\':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|' or self.theA == ';' or
self.theA == '{' or self.theA == '}' or
self.theA == '\n'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = '\n'
self.theB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import urlparse
from eventlet.green import httplib
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova.common import memorycache
from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
def ec2_error(req, request_id, code, message):
"""Helper to send an ec2_compatible error."""
LOG.error(_('%(code)s: %(message)s') % locals())
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
(utils.xhtml_escape(utils.utf8(code)),
utils.xhtml_escape(utils.utf8(message)),
utils.xhtml_escape(utils.utf8(request_id))))
return resp
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts."""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
lock_mins = CONF.lockout_minutes
msg = _('Access key %(access_key)s has had %(failures)d'
' failed authentications and will be locked out'
' for %(lock_mins)d minutes.') % locals()
LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return ec2_error(req, request_id, "Unauthorized", msg)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return ec2_error(req, request_id, "Unauthorized", msg)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return ec2_error(req, request_id, "Unauthorized", msg)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError), e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return ec2_error(req, request_id, "Unauthorized", msg)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(detail=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': validator.validate_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
request_id = context.request_id
api_request = req.environ['ec2.request']
result = None
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.message % {'instance_id': ec2_id}
return ec2_error(req, request_id, type(ex).__name__, message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.message % {'volume_id': ec2_id}
return ec2_error(req, request_id, type(ex).__name__, message)
except exception.SnapshotNotFound as ex:
LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex),
context=context)
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.message % {'snapshot_id': ec2_id}
return ec2_error(req, request_id, type(ex).__name__, message)
except exception.NotFound as ex:
LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.EC2APIError as ex:
LOG.exception(_('EC2APIError raised: %s'), unicode(ex),
context=context)
if ex.code:
return ec2_error(req, request_id, ex.code, unicode(ex))
else:
return ec2_error(req, request_id, type(ex).__name__,
unicode(ex))
except exception.KeyPairExists as ex:
LOG.debug(_('KeyPairExists raised: %s'), unicode(ex),
context=context)
code = 'InvalidKeyPair.Duplicate'
return ec2_error(req, request_id, code, unicode(ex))
except exception.InvalidKeypair as ex:
LOG.debug(_('InvalidKeypair raised: %s'), unicode(ex),
context)
code = 'InvalidKeyPair.Format'
return ec2_error(req, request_id, code, unicode(ex))
except exception.InvalidParameterValue as ex:
LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex),
context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.InvalidPortRange as ex:
LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex),
context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.NotAuthorized as ex:
LOG.info(_('NotAuthorized raised: %s'), unicode(ex),
context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.InvalidRequest as ex:
LOG.debug(_('InvalidRequest raised: %s'), unicode(ex),
context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.QuotaError as ex:
LOG.debug(_('QuotaError raised: %s'), unicode(ex),
context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except exception.InvalidInstanceIDMalformed as ex:
LOG.debug(_('Invalid id: bogus (expecting "i-..."): %s'),
unicode(ex), context=context)
return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
except Exception as ex:
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], basestring):
env.pop(k)
LOG.exception(_('Unexpected error raised: %s'), unicode(ex))
LOG.error(_('Environment: %s') % jsonutils.dumps(env))
return ec2_error(req, request_id, 'UnknownError',
_('An unknown error has occurred. '
'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
|
from teafacto.core.base import Block, Var, Val, param, tensorops as T
from IPython import embed
# TODO: INPUT MASK !!!!!!!! and attention etc
# TODO: what about memory mask?
# TODO: MEMORY POSITION EMBEDDINGS/ENCODINGS
# SYMBOLIC OUTPUT MEMORY ENABLED SEQ2SEQ
# - can place attention over all of temporary created output sequence
# - can write to any time step of output (write/erase interface)
# - can do multiple attention steps without actual output (change scalars)
# -> loss is placed over the symbolic output memory
class BulkNN(Block):
def __init__(self, inpencoder=None, memsampler=None,
memembmat=None, memencoder=None, memlen=None,
mem_pos_repr=None, inp_pos_repr=None,
inp_attention=None, mem_attention=None,
inp_addr_extractor=None, mem_addr_extractor=None,
write_addr_extractor=None, write_addr_generator=None,
write_value_generator=None, write_value_extractor=None,
mem_erase_generator=None, mem_change_generator=None,
nsteps=100, core=None, **kw):
super(BulkNN, self).__init__(**kw)
if mem_pos_repr is not None:
self._memposvecs = mem_pos_repr(memlen)
else:
self._memposvecs = None
self._inp_pos_repr = inp_pos_repr
self._nsteps = nsteps
self._memlen = memlen
self._inpencoder = inpencoder
self._inp_att = inp_attention
self._memencoder = memencoder
self._mem_att = mem_attention
self._memembmat = memembmat
self._memsampler = memsampler
self._core = core
# extractors from top core state:
self._inp_addr_extractor = inp_addr_extractor
self._mem_addr_extractor = mem_addr_extractor
self._write_addr_extractor = write_addr_extractor
self._write_addr_generator = write_addr_generator
self._write_value_extractor = write_value_extractor
self._write_value_generator = write_value_generator
self._mem_change_generator = mem_change_generator
self._mem_erase_generator = mem_erase_generator
def apply(self, inpseq): # int-(batsize, seqlen)
inpenco = self._inpencoder(inpseq) # may carry mask, based on encoder's embedder
batsize = inpenco.shape[0]
outvocsize = self._memembmat.shape[0]
mem_0 = T.concatenate([
T.ones((batsize, self._memlen, 1), dtype="float32") * 0.95,
T.ones((batsize, self._memlen, outvocsize-1), dtype="float32") * 0.05,
], axis=2) # (batsize, outseqlen, outvocsize)
mem_0 = T.softmax(mem_0)
core_init_states = self._core.get_init_info(batsize)
core_state_spec = self._core.get_statespec(flat=False)
assert(len(core_state_spec) == len(core_init_states))
h_0 = None # take last output of core states as initial state
c = 0
for ss in core_state_spec:
h_0_isout = False
for sss in ss:
if sss[0] == "output":
h_0_isout = True
h_0 = core_init_states[c]
if not h_0_isout:
h_0 = core_init_states[c]
c += 1
if self._inp_pos_repr is not None:
inpposvecs = self._inp_pos_repr(inpseq.shape[1])
inpposvecs = T.repeat(inpposvecs.dimadd(0), batsize, axis=0)
inpenc = T.concatenate([inpenco, inpposvecs], axis=2)
inpenc.mask = inpenco.mask
else:
inpenc = inpenco
outputs = T.scan(fn=self.rec,
outputs_info=[None, mem_0, h_0] + core_init_states,
n_steps=self._nsteps,
non_sequences=inpenc)
ret = outputs[0]
ret.push_extra_outs({"mem_0": mem_0, "h_0": h_0}) # DEBUGGING
return ret[-1], ret
def rec(self, mem_tm1, h_tm1, *args):
inpenc = args[-1]
states_tm1 = args[:-1]
batsize = inpenc.shape[0]
# mem_tm1: f(batsize, outseqlen, outvocsize)
# h_tm1: f(batsize, thinkerdim)
# inpenc: f(batsize, inplen, inpencdim)
# summarize memory
mem_tm1_sam = self._memsample(mem_tm1) # sample from mem
mem_tm1_embsum = T.dot(mem_tm1_sam, self._memembmat) # f(batsize, outseqlen, memembdim)
mem_tm1_sum = self._memencode(mem_tm1_embsum) # f(batsize, outseqlen, memsumdim)
if self._memposvecs is not None:
memposvecs = T.repeat(self._memposvecs.dimadd(0), batsize, axis=0)
mem_tm1_sum = T.concatenate([mem_tm1_sum, memposvecs], axis=2)
# input and memory read attentions
inp_ctx_t = self._get_inp_ctx(h_tm1, inpenc) # (batsize, inpencdim)
mem_ctx_t = self._get_mem_ctx(h_tm1, mem_tm1_sum) # (batsize, memsumdim)
# update thinker state
i_t = T.concatenate([inp_ctx_t, mem_ctx_t], axis=1)
rnuret = self._core.rec(i_t, *states_tm1)
h_t = rnuret[0]
states_t = rnuret[1:]
# memory change interface
mem_t_addr = self._get_addr_weights(h_t, mem_tm1_sum) # float-(batsize, outseqlen)
mem_t_write = self._get_write_weights(h_t) # (batsize, memvocsize)
e_t = self._get_erase(h_t) # (0..1)-(batsize,)
c_t = self._get_change(h_t) # (0..1)-(batsize,)
# memory change
can_mem_t = mem_tm1 - T.batched_dot(e_t, mem_tm1 * mem_t_addr.dimshuffle(0, 1, 'x')) # erase where we addressed
can_mem_t = can_mem_t + T.batched_tensordot(mem_t_addr, mem_t_write, axes=0) # write new value
mem_t = T.batched_dot(1 - c_t, mem_tm1) + T.batched_dot(c_t, can_mem_t) # interpolate between old and new value
mem_t = T.softmax(mem_t) # normalize to probabilities
return (mem_t, mem_t, h_t) + tuple(states_t)
def _memsample(self, mem):
if self._memsampler is None:
return mem
else:
return self._memsampler(mem)
def _memencode(self, mem):
if self._memencoder is None:
return mem
else:
return self._memencoder(mem)
def _get_inp_ctx(self, h, inpenc):
crit = self._inp_addr_extractor(h)
return self._inp_att(crit, inpenc)
def _get_mem_ctx(self, h, mem):
crit = self._mem_addr_extractor(h)
return self._mem_att(crit, mem)
def _get_addr_weights(self, h, mem):
crit = self._write_addr_extractor(h)
return self._write_addr_generator(crit, mem)
def _get_write_weights(self, h):
crit = self._write_value_extractor(h)
return self._write_value_generator(crit) # generate categorical write distr
def _get_erase(self, h):
return self._mem_erase_generator(h)
def _get_change(self, h):
return self._mem_change_generator(h)
from teafacto.blocks.seq.rnn import SeqEncoder, MakeRNU, RecStack, RNNWithoutInput
from teafacto.blocks.seq.rnu import GRU
from teafacto.blocks.match import CosineDistance
from teafacto.blocks.seq.attention import Attention, AttGen
from teafacto.blocks.basic import MatDot, Linear, Forward, SMO
from teafacto.blocks.activations import GumbelSoftmax
from teafacto.core.base import asblock
from teafacto.util import issequence
class SimpleBulkNN(BulkNN):
""" Parameterized simple interface for BulkNN that builds defaults for subcomponents """
def __init__(self, inpvocsize=None, inpembdim=None, inpemb=None,
inpencinnerdim=None, bidir=False, maskid=None,
dropout=False, rnu=GRU,
inpencoder=None,
memvocsize=None, memembdim=None, memembmat=None,
memencinnerdim=None,
memencoder=None,
inp_att_dist=CosineDistance(), mem_att_dist=CosineDistance(),
inp_attention=None, mem_attention=None,
coredims=None, corernu=GRU,
core=None, explicit_interface=False, scalaraggdim=None,
write_value_dim=None, nsteps=100,
posvecdim=None, mem_pos_repr=None, inp_pos_repr=None,
inp_addr_extractor=None, mem_addr_extractor=None,
write_addr_extractor=None, write_addr_generator=None,
write_addr_dist=CosineDistance(),
write_value_generator=None, write_value_extractor=None,
mem_erase_generator=None, mem_change_generator=None,
memsampler=None, memsamplemethod=None, memsampletemp=0.3,
**kw):
# INPUT ENCODING
if inpencoder is None:
inpencoder = SeqEncoder.RNN(indim=inpvocsize, inpembdim=inpembdim,
inpemb=inpemb, innerdim=inpencinnerdim, bidir=bidir,
maskid=maskid, dropout_in=dropout, dropout_h=dropout,
rnu=rnu).all_outputs()
lastinpdim = inpencinnerdim if not issequence(inpencinnerdim) else inpencinnerdim[-1]
else:
lastinpdim = inpencoder.block.layers[-1].innerdim
# MEMORY ENCODING
if memembmat is None:
memembmat = param((memvocsize, memembdim), name="memembmat").glorotuniform()
if memencoder is None:
memencoder = SeqEncoder.RNN(inpemb=False, innerdim=memencinnerdim,
bidir=bidir, dropout_in=dropout, dropout_h=dropout,
rnu=rnu, inpembdim=memembdim).all_outputs()
lastmemdim = memencinnerdim if not issequence(memencinnerdim) else memencinnerdim[-1]
else:
lastmemdim = memencoder.block.layers[-1].innerdim
# POSITION VECTORS
if posvecdim is not None and inp_pos_repr is None:
inp_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)
if posvecdim is not None and mem_pos_repr is None:
mem_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)
xtra_dim = posvecdim if posvecdim is not None else 0
# CORE RNN - THE THINKER
if core is None:
corelayers, _ = MakeRNU.fromdims([lastinpdim+lastmemdim+xtra_dim*2] + coredims,
rnu=corernu, dropout_in=dropout, dropout_h=dropout,
param_init_states=True)
core = RecStack(*corelayers)
lastcoredim = core.get_statespec()[-1][0][1][0]
# ATTENTIONS
if mem_attention is None:
mem_attention = Attention(mem_att_dist)
if inp_attention is None:
inp_attention = Attention(inp_att_dist)
if write_addr_generator is None:
write_addr_generator = AttGen(write_addr_dist)
# WRITE VALUE
if write_value_generator is None:
write_value_generator = WriteValGenerator(write_value_dim, memvocsize, dropout=dropout)
# MEMORY SAMPLER
if memsampler is not None:
assert(memsamplemethod is None)
if memsamplemethod is not None:
assert(memsampler is None)
memsampler = GumbelSoftmax(temperature=memsampletemp)
################ STATE INTERFACES #################
if not explicit_interface:
if inp_addr_extractor is None:
inp_addr_extractor = Forward(lastcoredim, lastinpdim + xtra_dim, dropout=dropout)
if mem_addr_extractor is None:
inp_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout)
# WRITE INTERFACE
if write_addr_extractor is None:
write_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout)
if write_value_extractor is None:
write_value_extractor = Forward(lastcoredim, write_value_dim, dropout=dropout)
# MEM UPDATE INTERFACE
if mem_erase_generator is None:
mem_erase_generator = StateToScalar(lastcoredim, scalaraggdim)
if mem_change_generator is None:
mem_change_generator = StateToScalar(lastcoredim, scalaraggdim)
else:
inp_addr_extractor, mem_addr_extractor, write_addr_extractor, \
write_value_extractor, mem_erase_generator, mem_change_generator = \
make_vector_slicers(0, lastinpdim + xtra_dim, lastmemdim + xtra_dim,
lastmemdim + xtra_dim, write_value_dim, 1, 1)
super(SimpleBulkNN, self).__init__(inpencoder=inpencoder,
memembmat=memembmat, memencoder=memencoder,
inp_attention=inp_attention, mem_attention=mem_attention,
core=core, memsampler=memsampler, nsteps=nsteps,
inp_addr_extractor=inp_addr_extractor, mem_addr_extractor=mem_addr_extractor,
write_addr_extractor=write_addr_extractor, write_addr_generator=write_addr_generator,
mem_erase_generator=mem_erase_generator, mem_change_generator=mem_change_generator,
write_value_generator=write_value_generator, write_value_extractor=write_value_extractor,
inp_pos_repr=inp_pos_repr, mem_pos_repr=mem_pos_repr,
**kw)
class WriteValGenerator(Block):
def __init__(self, dim, vocsize, interdims=tuple(), dropout=False, **kw):
super(WriteValGenerator, self).__init__(**kw)
self.dims = (dim,) + interdims
self.vocsize = vocsize
self.layers = []
for i in range(len(self.dims)-1):
layer = Forward(self.dims[i], self.dims[i+1], dropout=dropout)
self.layers.append(layer)
self.smo = SMO(self.dims[-1], outdim=self.vocsize)
def apply(self, x):
for layer in self.layers:
x = layer(x)
ret = self.smo(x)
return ret
class StateToScalar(Block):
def __init__(self, dim, outdim, **kw):
super(StateToScalar, self).__init__(**kw)
self.block = Forward(dim, outdim)
self.agg = param((outdim,), name="scalartostate_agg").uniform()
def apply(self, x):
y = T.dot(x, self.block)
z = T.dot(y, self.agg) # (batsize,)
ret = T.nnet.sigmoid(z)
return ret
def make_vector_slicers(*sizes):
sizes = list(sizes)
boundaries = [sizes[0]]
del sizes[0]
while len(sizes) > 0:
boundaries.append(sizes[0]+boundaries[-1])
del sizes[0]
rets = []
for i in range(len(boundaries) - 1):
a, b = boundaries[i], boundaries[i + 1]
yield Slicer(a, b)
class Slicer(Block):
def __init__(self, a, b, **kw):
super(Slicer, self).__init__(**kw)
self.a = a
self.b = b
def apply(self, x):
attrs = [slice(None, None, None)] * x.ndim
if self.b - self.a == 1:
attrs[-1] = self.a
else:
attrs[-1] = slice(self.a, self.b, None)
ret = x[attrs]
return ret
if __name__ == "__main__":
from teafacto.blocks.seq.rnn import RNNWithoutInput
m = RNNWithoutInput(3, 2)
out = m(5)
print out.eval().shape
print out.eval()
|
|
#!/usr/bin/env python
import os
import sys
# For coverage.
if __package__ is None:
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from unittest import main, TestCase
import requests
import requests_mock
from iris_sdk.client import Client
from iris_sdk.models.account import Account
from iris_sdk.utils.rest import RestError
XML_RESPONSE_ACCOUNT_GET = (
b"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
b" <AccountResponse>"
b" <Account>"
b" <AccountId>123456</AccountId>"
b" <CompanyName>Spam</CompanyName>"
b" <AccountType>Ham</AccountType>"
b" <Tiers>"
b" <Tier>0</Tier>"
b" </Tiers>"
b" <Address>"
b" <HouseNumber>900</HouseNumber>"
b" </Address>"
b" <Contact>"
b" <FirstName>Eggs</FirstName>"
b" </Contact>"
b" </Account>"
b"</AccountResponse>"
)
XML_RESPONSE_AVAILABLE_NPA_NXX_GET = (
b"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
b"<SearchResultForAvailableNpaNxx>"
b" <AvailableNpaNxxList>"
b" <AvailableNpaNxx>"
b" <City>COMPTON:COMPTON DA</City>"
b" <Npa>424</Npa>"
b" <Nxx>242</Nxx>"
b" <Quantity>7</Quantity>"
b" <State>CA</State>"
b" </AvailableNpaNxx>"
b" <AvailableNpaNxx>"
b" <City>COMPTON:GARDENA DA</City>"
b" <Npa>424</Npa>"
b" <Nxx>246</Nxx>"
b" <Quantity>5</Quantity>"
b" <State>CA</State>"
b" </AvailableNpaNxx>"
b" </AvailableNpaNxxList>"
b"</SearchResultForAvailableNpaNxx>"
)
XML_RESPONSE_AVAILABLE_NUMBERS_GET = (
b"<SearchResult>"
b" <ResultCount>3</ResultCount>"
b" <TelephoneNumberList>"
b" <TelephoneNumber>6093252507</TelephoneNumber>"
b" <TelephoneNumber>6093570994</TelephoneNumber>"
b" <TelephoneNumber>6093574598</TelephoneNumber>"
b" </TelephoneNumberList>"
b"</SearchResult>"
)
XML_RESPONSE_AVAILABLE_NUMBERS_DETAIL_GET = (
b"<SearchResult>"
b" <ResultCount>3</ResultCount>"
b" <TelephoneNumberDetailList>"
b" <TelephoneNumberDetail>"
b" <City>ALLENTOWN</City>"
b" <LATA>222</LATA>"
b" <RateCenter>ALLENTOWN </RateCenter>"
b" <State>NJ</State>"
b" <FullNumber>6093252507</FullNumber>"
b" <Tier>0</Tier>"
b" <VendorId>49</VendorId>"
b" <VendorName>Bandwidth CLEC</VendorName>"
b" </TelephoneNumberDetail>"
b" <TelephoneNumberDetail>"
b" <City>ALLENTOWN</City>"
b" <LATA>222</LATA>"
b" <RateCenter>ALLENTOWN </RateCenter>"
b" <State>NJ</State>"
b" <FullNumber>6093570994</FullNumber>"
b" <Tier>0</Tier>"
b" <VendorId>49</VendorId>"
b" <VendorName>Bandwidth CLEC</VendorName>"
b" </TelephoneNumberDetail>"
b" <TelephoneNumberDetail>"
b" <City>ALLENTOWN</City>"
b" <LATA>222</LATA>"
b" <RateCenter>ALLENTOWN </RateCenter>"
b" <State>NJ</State>"
b" <FullNumber>6093574598</FullNumber>"
b" <Tier>0</Tier>"
b" <VendorId>49</VendorId>"
b" <VendorName>Bandwidth CLEC</VendorName>"
b" </TelephoneNumberDetail>"
b" </TelephoneNumberDetailList>"
b"</SearchResult>"
)
XML_RESPONSE_AVAILABLE_NUMBERS_ERROR = (
b"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
b"<SearchResult>"
b" <Error>"
b" <Code>4010</Code>"
b" <Description>Unable to perform search.</Description>"
b" </Error>"
b"</SearchResult>"
)
XML_RESPONSE_DISCONNECTED_NUMBERS_GET = (
b"<?xml version=\"1.0\"?>"
b"<TNs>"
b" <TotalCount>4</TotalCount>"
b" <Links>"
b" <first></first>"
b" </Links>"
b" <TelephoneNumbers>"
b" <Count>2</Count>"
b" <TelephoneNumber>4158714245</TelephoneNumber>"
b" <TelephoneNumber>4352154439</TelephoneNumber>"
b" </TelephoneNumbers>"
b"</TNs>"
)
XML_RESPONSE_LINE_OPTION_ORDER = (
b"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
b"<LineOptionOrderResponse>"
b" <LineOptions>"
b" <CompletedNumbers>"
b" <TelephoneNumber>2013223685</TelephoneNumber>"
b" </CompletedNumbers>"
b" <Errors>"
b" <Error>"
b" <TelephoneNumber>5209072452</TelephoneNumber>"
b" <ErrorCode>5071</ErrorCode>"
b" <Description>"
b" Telephone number not available."
b" </Description>"
b" </Error>"
b" <Error>"
b" <TelephoneNumber>5209072451</TelephoneNumber>"
b" <ErrorCode>13518</ErrorCode>"
b" <Description>"
b" CNAM for telephone number is applied at the "
b" Location level and it is notapplicable at the TN "
b" level."
b" </Description>"
b" </Error>"
b" </Errors>"
b" </LineOptions>"
b"</LineOptionOrderResponse>"
)
XML_RESPONSE_LNP_CHECKER = (
b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
b"<NumberPortabilityResponse>"
b" <SupportedRateCenters />"
b" <UnsupportedRateCenters>"
b" <RateCenterGroup>"
b" <RateCenter>BALTIMORE</RateCenter>"
b" <City>BALTIMORE</City>"
b" <State>MD</State>"
b" <LATA>238</LATA>"
b" <TnList>"
b" <Tn>4109255199</Tn>"
b" <Tn>4104685864</Tn>"
b" </TnList>"
b" </RateCenterGroup>"
b" <RateCenterGroup>"
b" <RateCenter>SPARKSGLNC</RateCenter>"
b" <City>SPARKS GLENCOE</City>"
b" <State>MD</State>"
b" <LATA>238</LATA>"
b" <TnList>"
b" <Tn>4103431313</Tn>"
b" <Tn>4103431561</Tn>"
b" </TnList>"
b" </RateCenterGroup>"
b" </UnsupportedRateCenters>"
b" <PartnerSupportedRateCenters>"
b" <RateCenterGroup>"
b" <RateCenter>FT COLLINS</RateCenter>"
b" <City>FORT COLLINS</City>"
b" <State>CO</State>"
b" <LATA>656</LATA>"
b" <Tiers>"
b" <Tier>1</Tier>"
b" </Tiers>"
b" <TnList>"
b" <Tn>4109235436</Tn>"
b" </TnList>"
b" </RateCenterGroup>"
b" </PartnerSupportedRateCenters>"
b" <SupportedLosingCarriers>"
b" <LosingCarrierTnList>"
b" <LosingCarrierSPID>9998</LosingCarrierSPID>"
b" <LosingCarrierName>Carrier L3</LosingCarrierName>"
b" <LosingCarrierIsWireless>false</LosingCarrierIsWireless>"
b" <LosingCarrierAccountNumberRequired>false</LosingCarrierAccountNumberRequired>"
b" <LosingCarrierMinimumPortingInterval>5</LosingCarrierMinimumPortingInterval>"
b" <TnList>"
b" <Tn>4109255199</Tn>"
b" <Tn>4104685864</Tn>"
b" <Tn>4103431313</Tn>"
b" <Tn>4103431561</Tn>"
b" </TnList>"
b" </LosingCarrierTnList>"
b" </SupportedLosingCarriers>"
b" <UnsupportedLosingCarriers />"
b"</NumberPortabilityResponse>"
)
XML_RESPONSE_TN_RESERVATION_GET = (
b"<?xml version=\"1.0\"?>"
b"<ReservationResponse>"
b" <Reservation>"
b" <ReservationId>0099ff73-da96-4303</ReservationId>"
b" <AccountId>14</AccountId>"
b" <ReservationExpires>0</ReservationExpires>"
b" <ReservedTn>2512027430</ReservedTn>"
b" </Reservation>"
b"</ReservationResponse>"
)
XML_RESPONSE_TOTALS = (
b"<Quantity>"
b" <Count>4</Count>"
b"</Quantity>"
)
class ClassAccountTest(TestCase):
"""Test account mapping and resources"""
@classmethod
def setUpClass(cls):
cls._client = Client("http://foo", "bar", "bar", "qux")
cls._account = Account(client=cls._client)
@classmethod
def tearDownClass(cls):
del cls._client
del cls._account
def test_account_get(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url + self._account.get_xpath()
m.get(url, content=XML_RESPONSE_ACCOUNT_GET)
self._account.get()
self.assertEqual(self._account.id, "123456")
self.assertEqual(self._account.company_name, "Spam")
self.assertEqual(self._account.account_type, "Ham")
self.assertEqual(self._account.tiers.tier.items, ["0"])
self.assertEqual(self._account.address.house_number, "900")
self.assertEqual(self._account.contact.first_name, "Eggs")
def test_available_numbers(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.available_numbers.get_xpath()
m.get(url, content=XML_RESPONSE_AVAILABLE_NUMBERS_GET)
avail_numbers = self._account.available_numbers.list(
{"state": "NJ"})
self.assertEqual(avail_numbers.items,
["6093252507", "6093570994", "6093574598"])
self.assertEqual(self._account.available_numbers.result_count,
"3")
def test_available_numbers_detail(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.available_numbers.get_xpath()
m.get(url, content=XML_RESPONSE_AVAILABLE_NUMBERS_DETAIL_GET)
avail_numbers = self._account.available_numbers.list(
{"enableTNDetail": "true", "state": "NJ"})
self.assertEqual(avail_numbers.items[0].city, "ALLENTOWN")
self.assertEqual(avail_numbers.items[0].lata, "222")
self.assertEqual(avail_numbers.items[1].full_number,"6093570994")
self.assertEqual(avail_numbers.items[1].tier, "0")
self.assertEqual(avail_numbers.items[2].vendor_id, "49")
self.assertEqual(avail_numbers.items[2].vendor_name,
"Bandwidth CLEC")
self.assertEqual(self._account.available_numbers.result_count,"3")
def test_available_numbers_error(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.available_numbers.get_xpath()
m.get(url, content=XML_RESPONSE_AVAILABLE_NUMBERS_ERROR,
status_code=400)
with self.assertRaises(RestError):
self._account.available_numbers.list(None)
def test_disc_numbers(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.disconnected_numbers.get_xpath()
m.get(url, content=XML_RESPONSE_DISCONNECTED_NUMBERS_GET)
disc_numbers = self._account.disconnected_numbers.list(
{"page": 1, "type": "x"})
self.assertEqual(disc_numbers.items, ["4158714245","4352154439"])
def test_disc_numbers_totals(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.disconnected_numbers.totals.get_xpath()
m.get(url, content=XML_RESPONSE_TOTALS)
count = self._account.disconnected_numbers.totals.get().count
self.assertEqual(count, "4")
def test_in_service_numbers(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.in_service_numbers.get_xpath()
m.get(url, content=XML_RESPONSE_DISCONNECTED_NUMBERS_GET)
numbers = self._account.in_service_numbers.list({"state": "NJ"})
self.assertEqual(numbers.items,["4158714245","4352154439"])
self.assertEqual(self._account.in_service_numbers.total_count,"4")
def test_in_service_numbers_totals(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.in_service_numbers.totals.get_xpath()
m.get(url, content=XML_RESPONSE_TOTALS)
count = self._account.in_service_numbers.totals.get().count
self.assertEqual(count, "4")
def test_line_option_orders(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.line_option_orders.get_xpath()
m.post(url, content = XML_RESPONSE_LINE_OPTION_ORDER)
self._account.line_option_orders.tn_line_options.add(
{"telephone_number":"5209072453","calling_name_display":"off"}
)
response = self._account.line_option_orders.save()
self.assertEqual(response.line_options.items[0].errors.error.\
items[0].telephone_number, "5209072452")
def test_lnpchecker(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.lnpchecker.get_xpath(True)
m.post(url, content = XML_RESPONSE_LNP_CHECKER)
response = self._account.lnpchecker(["123456"])
grp = response.unsupported_rate_centers.rate_center_group.items[0]
self.assertEqual(grp.rate_center, "BALTIMORE")
self.assertEqual(grp.city, "BALTIMORE")
self.assertEqual(grp.state, "MD")
self.assertEqual(grp.lata, "238")
self.assertEqual(grp.tn_list.tn.items,["4109255199","4104685864"])
grp = response.unsupported_rate_centers.rate_center_group.items[1]
self.assertEqual(grp.rate_center, "SPARKSGLNC")
self.assertEqual(grp.city, "SPARKS GLENCOE")
self.assertEqual(grp.state, "MD")
self.assertEqual(grp.lata, "238")
self.assertEqual(grp.tn_list.tn.items,["4103431313","4103431561"])
grp = response.partner_supported_rate_centers.rate_center_group.\
items[0]
self.assertEqual(grp.rate_center, "FT COLLINS")
self.assertEqual(grp.city, "FORT COLLINS")
self.assertEqual(grp.state, "CO")
self.assertEqual(grp.lata, "656")
self.assertEqual(grp.tn_list.tn.items, ["4109235436"])
self.assertEqual(grp.tiers.tier.items, ["1"])
grp = response.supported_losing_carriers.losing_carrier_tn_list
self.assertEqual(grp.losing_carrier_spid, "9998")
self.assertEqual(grp.losing_carrier_name, "Carrier L3")
self.assertEqual(grp.losing_carrier_is_wireless, "false")
self.assertEqual(grp.losing_carrier_account_number_required,
"false")
self.assertEqual(grp.losing_carrier_minimum_porting_interval,"5")
self.assertEqual(grp.tn_list.tn.items,
["4109255199","4104685864","4103431313","4103431561"])
def test_npa_nxx(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.available_npa_nxx.get_xpath()
m.get(url, content=XML_RESPONSE_AVAILABLE_NPA_NXX_GET)
npa = self._account.available_npa_nxx.list({"state": "CA"})
self.assertEqual(len(npa.items), 2)
self.assertEqual(npa.items[0].city, "COMPTON:COMPTON DA")
self.assertEqual(npa.items[0].npa, "424")
self.assertEqual(npa.items[0].nxx, "242")
self.assertEqual(npa.items[1].quantity, "5")
self.assertEqual(npa.items[1].state, "CA")
def test_tn_reservation_delete(self):
res = self._account.tnreservation
res.id = "123"
url = self._account.client.config.url +\
self._account.tnreservation.get_xpath()
with requests_mock.Mocker() as m:
m.delete(url, status_code = 200)
res.delete()
def test_tn_reservation_get(self):
res = self._account.tnreservation
res.id = "123"
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.tnreservation.get_xpath()
m.get(url, content = XML_RESPONSE_TN_RESERVATION_GET)
res.get("123")
self.assertEqual(res.id, "0099ff73-da96-4303")
self.assertEqual(res.reserved_tn, "2512027430")
self.assertEqual(res.account_id, "14")
self.assertEqual(res.reservation_expires, "0")
def test_tn_reservation_save(self):
with requests_mock.Mocker() as m:
url = self._account.client.config.url +\
self._account.tnreservation.get_xpath(True)
m.post(url, headers={"location": url + "/1337"})
res = self._account.tnreservation
res.reserved_tn = "123456789"
res.save()
self.assertEqual(m.request_history[0].method, "POST")
self.assertEqual(res.id, "1337")
self.assertEqual(res.reserved_tn, "123456789")
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""
OSM Deviation Finder - Web Interface
~~~~~~~~~~~~~~~~~~~~
Implementation of a web interface for the OSM Deviation Finder library.
It uses the flask microframework by Armin Ronacher
For more information see https://github.com/mitsuhiko/flask/
To interact with the GeoServer REST API, the GeoServer configuration client library by boundlessgeo is used, see:
https://github.com/boundlessgeo/gsconfig
On the client side it uses jquery.js, leaflet.js, nprogress.js, DataTables and the UIKit framework,
for further information see the README.md file.
:copyright: (c) 2015 by Martin Hochenwarter
:license: MIT
"""
__author__ = 'Martin Hochenwarter'
__version__ = '0.1'
import os
import shutil
import zipfile
import uuid
import socket
from osmdeviationfinder import OSMDeviationfinder, HarmonizeOptions, LinematchOptions, ResultOptions
from osgeo import ogr
from web import app, db
from models import User, DevMap
from flask import json, request, Blueprint, jsonify, redirect, url_for, render_template, Response, abort, make_response
from flask.ext.login import (current_user, login_required)
from werkzeug.utils import secure_filename
from geoserver.catalog import Catalog
#: Blueprint for deviation finder specific functions
devmap = Blueprint('devmap', __name__, template_folder='templates')
DEBUG = True
UPLOAD_FOLDER = 'web/uploads/'
ALLOWED_EXTENSIONS = set(['zip', 'rar', 'json', 'osm'])
app.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024 # 32MB Upload-Limit
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#: Database connection info
serverName = 'localhost'
database = 'odf'
port = '5432'
usr = 'martin'
pw = 'odf'
connectioninfo = "dbname='%s' host='%s' port='%s' user='%s' password='%s'" % (database, serverName, port, usr, pw)
#: GeoServer REST info
gs_url = 'http://localhost:8080/geoserver/'
gs_user = 'admin'
gs_password = 'geoserver'
gs_workspace = 'OSMDeviationMaps'
gs_store = 'osmdeviationmaps'
class Shapefile(object):
def __init__(self, name, ref, directory):
self.name = name
self.ref = ref
self.directory = directory
class ShapefileColumns(object):
def __init__(self, name):
self.name = name
@devmap.route('/upload', methods=['GET', 'POST'])
def upload_file():
"""This function handles the uid generation and zipfile (containing the shapefile) upload.
GET request: a redirect to the index site is made, where the user can upload a file.
POST request: first the file extions is validated, then a unique identifier is genereated for the current
upload. This uid is stored in the database and a directory is created using the uid,
in which the zip file gets extracted. After that the import to database site gets send to the user.
"""
if request.method == 'POST':
reffile = request.files['files[]']
if reffile and allowed_file(reffile.filename):
uid = str(uuid.uuid4())[:8] # str(uuid.uuid4()) #.hex
user = None
if current_user.is_authenticated():
user = current_user
#user = User.query.filter_by(username='Guest').first()
else:
user = User.query.filter_by(username='Guest').first()
dm = DevMap(uid, user)
db.session.add(dm)
db.session.commit()
filename = secure_filename(reffile.filename)
mapdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
os.makedirs(mapdir)
reffile.save(os.path.join(mapdir, filename))
archive = os.path.join(mapdir, filename)
zfile = zipfile.ZipFile(archive)
for name in zfile.namelist():
zfile.extract(name, mapdir)
os.remove(archive)
return url_for('devmap.import_to_db', uid=uid)
else:
return render_template('upload.html')
@devmap.route('/<uid>/import', methods=['GET', 'POST'])
def import_to_db(uid):
"""Function to import features from a layer of a shapefile into the database and calculation of the concavehull of
the features in the table to use as a bounding polygon for the OverpassAPI request.
GET request: The import site is returned, containing a set of the uploaded shapefiles.
The user can then choose the shapefile to import.
POST request: The chosen layer will be imported into a new table using the the function layer_to_db
from the OSMDeviationfinder class. This function will import the features and convert multigeometry features to
single geometry features. After a successful import, the concavehull of the imported data is generated using the
function get_concavehull of the OSMDeviationfinder class. The concavhull is saved for the current devmap in the
xy (for the OverpassAPI) and yx (for leaflet.js) representation. After that, the osm data download site is returned.
"""
error = None
fdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
fdata = dict()
if request.method == 'POST':
uid = uid.encode('ISO-8859-1')
fdata['datasource'] = request.form['source']
fdata['title'] = request.form['title']
fdata['datalicense'] = request.form['license']
fdata['shapefile'] = request.form['shapefile']
fdata['wmsformat'] = request.form['wmsformat']
fdata['wmsurl'] = request.form['wmsurl']
fdata['wmslayer'] = request.form['wmslayer']
if len(fdata['datasource']) < 4:
error = 'Please define a data source with at least 4 characters.'
if len(fdata['datalicense']) < 3:
error = 'Please a license with at least 2 characters.'
if len(fdata['title']) < 4:
error = 'Please define a title with at least 4 characters.'
if len(fdata['wmsurl']) > 1 or len(fdata['wmslayer']) > 1 or len(fdata['wmsformat']) > 1:
if not (fdata['wmsurl']) > 12 and len(fdata['wmslayer']) > 3 and len(fdata['wmsformat']) > 12:
error = 'All fields for a custom WMS Basemap have to be filled.'
if not 'image' in fdata['wmsformat']:
error = 'Please define a correct image format eg. image/jpeg'
else:
dm = DevMap.query.filter_by(title=fdata['title']).first()
if dm and dm.uid != uid:
error = 'The title "' + fdata['title'] + '" is already chosen. Please try another title.'
if fdata['shapefile'] == 'No Shapefile found!':
error = 'No shapefile was found.'
if error is None:
f = os.path.join(fdir, fdata['shapefile'])
tablename = 'odf_'+uid+'_ref'
shapefile = ogr.Open(f)
devfinder = OSMDeviationfinder(connectioninfo)
s = shapefile.GetLayerByIndex(0)
devfinder.layer_to_db(s, tablename, True)
concavehull = devfinder.get_concavehull(tablename)
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user or dm.owner == User.query.filter_by(
username='Guest').first():
boundsyx = {'type': "Feature", 'properties':
{'uid': uid, 'title': fdata['title'], 'author': dm.owner.username, 'source': fdata['datasource']},
'geometry': {'type': "Polygon", 'coordinates': [concavehull[1]['coordinates'][0]]}}
boundsxy = {'type': "Feature", 'properties':
{'uid': uid, 'title': fdata['title'], 'author': dm.owner.username, 'source': fdata['datasource']},
'geometry': {'type': "Polygon", 'coordinates': [concavehull[0]['coordinates'][0]]}}
dm.boundsxy = boundsxy
dm.boundsyx = boundsyx
dm.datasource = fdata['datasource']
dm.title = fdata['title']
dm.datalicense = fdata['datalicense']
dm.basemapwmsurl = fdata['wmsurl']
dm.basemapwmslayer = fdata['wmslayer']
dm.basemapwmsformat = fdata['wmsformat']
db.session.add(dm)
db.session.commit()
return redirect(url_for('devmap.osm_download', uid=uid))
shapefiles = []
for f in os.listdir(fdir):
if f.endswith(".shp") and not f.startswith('.'):
s = Shapefile(f, None, fdir)
shapefiles.append(s)
return render_template('import.html', shapefiles=shapefiles, uid=uid, error=error, fdata=fdata)
@devmap.route('/<uid>/osmdownload/', methods=['GET', 'POST'])
def osm_download(uid):
"""Function to download osm data.
GET request: The osmdownload site is returned, which shows the bounding polygon for the selected layer and a form to
choose the osm highway-types which should not be downloaded.
POST request: The selected options in the request form and the bounding polygon coordinates are transformed to
overpass query language. This data is used to call the osm_from_overpass function from the OSMDeviationfinder class,
which will make an OverpassAPI query and dowload the returned osm data and yield the progress of the download back,
which will be streamed to the client.
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
fdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
f = os.path.join(fdir, str(uid)+'.osm')
typesquery = ''
for i in request.form:
typesquery = typesquery + '["highway"!="' + i + '"]'
dm = DevMap.query.filter_by(uid=uid).first()
bbox = json.dumps(dm.boundsxy)
bbox = bbox[bbox.find("[["):bbox.find("]]")+2].replace('[', '').replace(']', '').replace(',', '')
devfinder = OSMDeviationfinder(connectioninfo)
return Response(devfinder.osm_from_overpass(bbox, typesquery, f, uid),
mimetype='text/html')
return render_template('osmdownload.html', uid=uid)
@devmap.route('/<uid>/harmonize/', methods=['GET', 'POST'])
def harmonize(uid):
"""This function is used to show and handle the harmonization options and process.
GET request: Renders and returns a site showing harmonization options.
POST request: Gets the harmonization options from the user and creates an object of the HarmonizeOptions class
which holds the user's chosen and default options. The harmonize_datasets function from the OSMDeviationfinder class
is called with the HarmonizeOptions object as parameter. The harmonize_datasets function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
devfinder = OSMDeviationfinder(connectioninfo)
dm = DevMap.query.filter_by(uid=uid).first()
if request.method == 'POST':
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
harmonization_options = HarmonizeOptions(uid)
#: Keep column osm_id while processing
harmonization_options.keepcolumns_t2 = {'osm_id': 'varchar'}
if 'azimuthdifftolerance' in request.form:
harmonization_options.azimuthdifftolerance = request.form['azimuthdifftolerance']
if 'maxcheckpointanglediff' in request.form:
harmonization_options.maxcheckpointanglediff = request.form['maxcheckpointanglediff']
if 'searchradius' in request.form:
harmonization_options.searchradius = request.form['searchradius']
if 'presplitref' in request.form:
harmonization_options.presplitref = True
if 'presplitosm' in request.form:
harmonization_options.presplitosm = True
if 'harmonize' in request.form:
harmonization_options.harmonize = True
if 'cleanref' in request.form:
harmonization_options.cleanref = True
if 'cleanosm' in request.form:
harmonization_options.cleanosm = True
if 'cleandistance' in request.form:
harmonization_options.cleanosmradius = request.form['cleandistance']
harmonization_options.cleanrefradius = request.form['cleandistance']
if 'streetnamecol' in request.form:
harmonization_options.streetnamecol = request.form['streetnamecol']
if harmonization_options.streetnamecol == 'NoNameCol':
devfinder.create_nonamecolumn('odf_'+uid+'_ref')
dm.basetable = harmonization_options.basetable
dm.harmonize = harmonization_options.harmonize
dm.reftable = harmonization_options.reftable
dm.osmtable = harmonization_options.osmtable
dm.streetnamecol = harmonization_options.streetnamecol
dm.outsuffix = harmonization_options.outsuffix
dm.keepcolumns_t1 = harmonization_options.keepcolumns_t1
dm.keepcolumns_t2 = harmonization_options.keepcolumns_t2
dm.cleanref = harmonization_options.cleanref
dm.cleanosm = harmonization_options.cleanosm
dm.cleanrefradius = harmonization_options.cleanrefradius
dm.cleanosmradius = harmonization_options.cleanosmradius
dm.presplitref = harmonization_options.presplitref
dm.presplitosm = harmonization_options.presplitosm
dm.searchradius = harmonization_options.searchradius
dm.azimuthdifftolerance = harmonization_options.azimuthdifftolerance
dm.maxcheckpointanglediff = harmonization_options.maxcheckpointanglediff
dm.max_roads_countdiff = harmonization_options.max_roads_countdiff
dm.max_azdiff = harmonization_options.max_azdiff
dm.max_distancediff = harmonization_options.max_distancediff
db.session.add(dm)
db.session.commit()
return Response(devfinder.harmonize_datasets(harmonization_options), mimetype='text/html')
namecolumns = devfinder.get_textcolumns('odf_'+uid+'_ref')
return render_template('harmonize.html', uid=uid, namecolumns=namecolumns, dm=dm)
@devmap.route('/<uid>/linematch/', methods=['GET', 'POST'])
def linematch(uid):
"""This function is used to show and handle the linematching options and process.
GET request: Renders and returns a site showing linematching options.
POST request: Gets the linematching options from the user and creates an object of the LinematchOptions class
which holds the user's chosen and default options. The linematch_datasets function from the OSMDeviationfinder class
is called with the LinematchOptions object as parameter. The linematch_datasets function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
dm = DevMap.query.filter_by(uid=uid).first()
if request.method == 'POST':
devfinder = OSMDeviationfinder(connectioninfo)
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
linematch_options = LinematchOptions(uid)
linematch_options.keepcolumns_t2 = {'osm_id': 'varchar'}
if 'searchradius' in request.form:
linematch_options.searchradius = request.form['searchradius']
if 'maxpotentialmatches' in request.form:
linematch_options.maxpotentialmatches = request.form['maxpotentialmatches']
if 'minmatchingfeatlen' in request.form:
linematch_options.minmatchingfeatlen = request.form['minmatchingfeatlen']
if 'maxlengthdiffratio' in request.form:
linematch_options.maxlengthdiffratio = request.form['maxlengthdiffratio']
if 'maxanglediff' in request.form:
linematch_options.maxanglediff = request.form['maxanglediff']
if 'posdiffsegmentlength' in request.form:
linematch_options.posdiffsegmentlength = request.form['posdiffsegmentlength']
if 'hausdorffsegmentlength' in request.form:
linematch_options.hausdorffsegmentlength = request.form['hausdorffsegmentlength']
if 'maxazimuthdiff' in request.form:
linematch_options.maxazimuthdiff = request.form['maxazimuthdiff']
if 'maxmeanposdifftolengthratio' in request.form:
linematch_options.maxmeanposdifftolength = request.form['maxmeanposdifftolengthratio']
if 'minmeanposdifftolengthratio' in request.form:
linematch_options.minmeanposdifftolength = request.form['minmeanposdifftolengthratio']
if 'exportdevvec' in request.form:
linematch_options.deviationvectorlayer = True
else:
linematch_options.deviationvectorlayer = False
dm.searchradius2 = linematch_options.searchradius
dm.minmatchingfeatlen = linematch_options.minmatchingfeatlen
dm.maxlengthdiffratio = linematch_options.maxlengthdiffratio
dm.maxanglediff = linematch_options.maxanglediff
dm.maxpotentialmatches = linematch_options.maxpotentialmatches
dm.posdiffsegmentlength = linematch_options.posdiffsegmentlength
dm.hausdorffsegmentlength = linematch_options.hausdorffsegmentlength
dm.maxazimuthdiff = linematch_options.maxazimuthdiff
dm.maxmeanposdevtolength = linematch_options.maxmeanposdevtolength
dm.minmeanposdevtolength = linematch_options.minmeanposdevtolength
dm.maxabsolutmeanposdev = linematch_options.maxabsolutmeanposdev
dm.maxdeviation = linematch_options.maxdeviation
db.session.add(dm)
db.session.commit()
return Response(devfinder.linematch_datasets(linematch_options), mimetype='text/html')
return render_template('linematch.html', uid=uid, dm=dm)
@devmap.route('/<uid>/finished/', methods=['GET', 'POST'])
def finished(uid):
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
title = request.form['title']
listedmap = False
if 'listed' in request.form:
listedmap = True
dm.title = title
dm.listed = listedmap
db.session.add(dm)
db.session.commit()
return render_template('finished.html', uid=uid)
else:
return render_template('finished.html', uid=uid, error="No User", dm=dm)
else:
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
return render_template('finished.html', uid=uid, dm=dm)
else:
return redirect(url_for('devmap.index'))
@devmap.route('/<uid>/results/', methods=['GET', 'POST'])
def results(uid):
"""This function is used to show and handle the result generation options and process.
GET request: Renders and returns a site showing result generation options.
POST request: Gets the result generation options from the user and creates an object of the ResultOptions class
which holds the user's chosen and default options. The create_results function from the OSMDeviationfinder class
is called with the ResultOptions object as parameter. The create_results function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
devfinder = OSMDeviationfinder(connectioninfo)
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
result_options = ResultOptions(uid)
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user \
or dm.owner == User.query.filter_by(username='Guest').first():
if 'maxdevgrid' in request.form:
result_options.maxdevgrid = True
if 'posdevlines' in request.form:
result_options.posdevlines = True
if 'posdevlinedist' in request.form:
result_options.posdevlinedist = request.form['posdevlinedist']
if 'absdevgrid' in request.form:
result_options.absdevgrid = True
if 'matchingrategrid' in request.form:
result_options.matchingrategrid = True
if 'gridcellsize' in request.form:
result_options.gridcellsize = request.form['gridcellsize']
if 'unmatchedref' in request.form:
result_options.unmatchedref = True
if 'unmatchedrefminlen' in request.form:
result_options.unmatchedrefminlen = request.form['unmatchedrefminlen']
if 'unmatchedosm' in request.form:
result_options.unmatchedosm = True
if 'unmatchedosmminlen' in request.form:
result_options.unmatchedosmminlen = request.form['unmatchedosmminlen']
if 'matchedref' in request.form:
result_options.matchedref = True
if 'matchedrefminlen' in request.form:
result_options.matchedrefminlen = request.form['matchedrefminlen']
if 'matchedosm' in request.form:
result_options.matchedosm = True
if 'matchedosmminlen' in request.form:
result_options.matchedosmminlen = request.form['matchedosmminlen']
if 'minlevenshtein' in request.form:
result_options.minlevenshtein = True
if 'minlev' in request.form:
result_options.minlev = request.form['minlev']
if 'maxlevenshtein' in request.form:
result_options.maxlevenshtein = True
if 'maxlev' in request.form:
result_options.maxlev = request.form['maxlev']
# Keep track of created results
dm.posdevlines = result_options.posdevlines
dm.maxdevgrid = result_options.maxdevgrid
dm.absdevgrid = result_options.absdevgrid
dm.matchingrategrid = result_options.matchingrategrid
dm.gridcellsize = result_options.gridcellsize
dm.unmatchedref = result_options.unmatchedref
dm.unmatchedosm = result_options.unmatchedosm
dm.matchedref = result_options.matchedref
dm.matchedosm = result_options.matchedosm
dm.minlevenshtein = result_options.minlevenshtein
dm.maxlevenshtein = result_options.maxlevenshtein
db.session.add(dm)
db.session.commit()
return Response(devfinder.create_results(result_options), mimetype='text/html')
else:
return render_template('results.html', uid=uid)
@devmap.route('/<uid>/export/', methods=['GET', 'POST'])
def export(uid):
"""This function is used to show and handle the export options and uses the GeoServer REST API to export results.
This function uses the GeoServer configuration client library by boundlessgeo, see:
https://github.com/boundlessgeo/gsconfig
To export the results, the GeoServer application has to be running and should be correctly set up to have access
to the database and the styles should already be imported, see geoserver styles folder.
GET request: Renders and returns a site showing export options.
POST request: Gets the export options from the user and exports the results if they are defined in the options.
The export is made by using GeoServer configuration client library. After the export, the newly created layers
should be visible in the GeoServer web interface and the WM(T)S links should also work. These links can then be
used to display the WM(T)S layers in JOSM or a gis.
Remarks: This function will be redesigned in future versions
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
title = request.form['title']
listedmap = False
if 'listed' in request.form:
listedmap = True
dm.title = title
dm.listed = listedmap
cat = Catalog(gs_url+'rest')
cat.username = gs_user
cat.password = gs_password
ws = None
try:
ws = cat.get_workspace(gs_workspace)
except socket.error, e:
db.session.add(dm)
db.session.commit()
return render_template('export.html', uid=uid, error=e, dm=dm)
st = cat.get_store(gs_store, ws)
if 'maxdevgrid' in request.form:
feattype = 'odf_'+uid+'_maxdevgrid'
if dm.wmsmaxdevgrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":maxdevgrid")
cat.save(l)
dm.wmsmaxdevgrid = True
else:
dm.wmsmaxdevgrid = True
if 'posdevlines' in request.form:
feattype = 'odf_'+uid+'_posdevlines'
if dm.wmsposdevlines:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":posdevlines")
cat.save(l)
dm.wmsposdevlines = True
else:
dm.wmsposdevlines = False
if 'absdevgrid' in request.form:
feattype = 'odf_'+uid+'_absdevgrid'
if dm.wmsabsdevgrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":absdevgrid")
cat.save(l)
dm.wmsabsdevgrid = True
else:
dm.wmsabsdevgrid = False
if 'matchingrategrid' in request.form:
feattype = 'odf_'+uid+'_matchingrategrid'
if dm.wmsmatchingrategrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":matchingrategrid")
cat.save(l)
dm.wmsmatchingrategrid = True
else:
dm.wmsmatchingrategrid = False
if 'unmatchedref' in request.form:
feattype = 'odf_'+uid+'_unmatchedref'
if dm.wmsunmatchedref:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":ReferenceLines")
cat.save(l)
dm.unmatchedref = True
else:
dm.unmatchedref = False
if 'unmatchedosm' in request.form:
feattype = 'odf_'+uid+'_unmatchedosm'
if dm.unmatchedosm:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":OSMLines")
cat.save(l)
dm.wmsunmatchedosm = True
else:
dm.wmsunmatchedosm = False
if 'matchedref' in request.form:
feattype = 'odf_'+uid+'_matchedref'
if dm.wmsmatchedref:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":ReferenceLines")
cat.save(l)
dm.wmsmatchedref = True
else:
dm.wmsmatchedref = False
if 'matchedosm' in request.form:
feattype = 'odf_'+uid+'_matchedosm'
if dm.wmsmatchedosm:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":OSMLines")
cat.save(l)
dm.wmsmatchedosm = True
else:
dm.wmsmatchedosm = False
if 'minlevenshtein' in request.form:
feattype = 'odf_'+uid+'_minlevenshtein'
if dm.wmsminlevenshtein:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":ReferenceLines")
cat.save(l)
dm.wmsminlevenshtein = True
else:
dm.wmsminlevenshtein = False
if 'maxlevenshtein' in request.form:
feattype = 'odf_'+uid+'_maxlevenshtein'
if dm.wmsmaxlevenshtein:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
cat.save(ft)
l = cat.get_layer(feattype)
l._set_default_style(gs_workspace+":ReferenceLines")
cat.save(l)
dm.wmsmaxlevenshtein = True
else:
dm.wmsmaxlevenshtein = False
db.session.add(dm)
db.session.commit()
return render_template('finished.html', uid=uid)
else:
return render_template('export.html', uid=uid, error="No User", dm=dm)
else:
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
return render_template('export.html', uid=uid, dm=dm)
else:
return redirect(url_for('devmap.index'))
@devmap.route('/<uid>/wmsmap/', methods=['POST', 'GET'])
def wmsmap(uid):
"""A function to render and display a map using WM(T)S layers provided by GeoServer
"""
dm = DevMap.query.filter_by(uid=uid).first()
baseurl = gs_url+gs_workspace+'/wms'
return render_template('wmsmap.html', dm=dm, uid=uid, baseurl=baseurl)
@devmap.route('/<uid>/delete/', methods=['GET', 'POST'])
def delete(uid):
"""A function to delete a deviation map, all its tables, files and GeoServer layers.
GET request: Renders and returns a site showing delete options.
POST request: Gets delete options chosen by user and uses them to delete the chosen parts of the deviation map.
To delete GeoServer layers the GeoServer configuration client library is used.
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
if (dm.wmsposdevlines or dm.wmsmaxdevgrid or dm.wmsabsdevgrid or dm.wmsmatchingrategrid or
dm.wmsunmatchedref or dm.wmsunmatchedosm or dm.wmsmatchedref or dm.wmsmatchedosm or
dm.wmsminlevenshtein or dm.wmsmaxlevenshtein):
cat = Catalog(gs_url+'rest')
cat.username = gs_user
cat.password = gs_password
ws = None
try:
ws = cat.get_workspace(gs_workspace)
except socket.error, e:
detail = 'GeoServer is not available. Make sure that it is running and the connection is ok.'
return render_template('error.html', err=e, detail=detail)
st = cat.get_store(gs_store, ws)
if 'deletemaxdevgrid' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_maxdevgrid'
if dm.wmsmaxdevgrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsmaxdevgrid = False
if 'deleteposdevlines' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_posdevlines'
if dm.wmsposdevlines:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsposdevlines = False
if 'deleteabsdevgrid' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_absdevgrid'
if dm.wmsabsdevgrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
ft = cat.publish_featuretype(feattype, st, "EPSG:4326")
if ft is not None:
cat.delete(ft)
dm.wmsabsdevgrid = False
if 'deletematchingrategrid' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_matchingrategrid'
if dm.wmsmatchingrategrid:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsmatchingrategrid = False
if 'deleteunmatchedref' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_unmatchedref'
if dm.wmsunmatchedref:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.deleteunmatchedref = False
if 'deleteunmatchedosm' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_unmatchedosm'
if dm.wmsunmatchedosm:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsunmatchedosm = False
if 'deletematchedref' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_matchedref'
if dm.wmsmatchedref:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsmatchedref = False
if 'deletematchedosm' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_matchedosm'
if dm.wmsmatchedosm:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsmatchedosm = False
if 'deleteminlevenshtein' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_minlevenshtein'
if dm.wmsminlevenshtein:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsminlevenshtein = False
if 'deletemaxlevenshtein' in request.form or 'deleteall' in request.form:
feattype = 'odf_'+uid+'_maxlevenshtein'
if dm.wmsmaxlevenshtein:
l = cat.get_layer(feattype)
if l is not None:
cat.delete(l)
dm.wmsmaxlevenshtein = False
if 'deleteall' in request.form:
folder = secure_filename(uid)
folder = os.path.join(app.config['UPLOAD_FOLDER'], folder)
shutil.rmtree(folder, True)
db.engine.execute('drop table if exists odf_' + uid + '_ref')
db.engine.execute('drop table if exists odf_' + uid + '_ref_presplitted')
db.engine.execute('drop table if exists odf_' + uid + '_ref_splitted')
db.engine.execute('drop table if exists odf_' + uid + '_found')
db.engine.execute('drop table if exists odf_' + uid + '_ref_junctions')
db.engine.execute('drop table if exists odf_' + uid + '_ref_points')
db.engine.execute('drop table if exists odf_' + uid + '_ref_cutpoints')
db.engine.execute('drop table if exists odf_' + uid + '_ref_cutcheckpoints')
db.engine.execute('drop table if exists odf_' + uid + '_osm')
db.engine.execute('drop table if exists odf_' + uid + '_osm_presplitted')
db.engine.execute('drop table if exists odf_' + uid + '_osm_splitted')
db.engine.execute('drop table if exists odf_' + uid + '_osm_junctions')
db.engine.execute('drop table if exists odf_' + uid + '_osm_points')
db.engine.execute('drop table if exists odf_' + uid + '_osm_cutpoints')
db.engine.execute('drop table if exists odf_' + uid + '_osm_cutcheckpoints')
db.engine.execute('drop table if exists odf_' + uid + '_unmatchedref;')
db.engine.execute('drop table if exists odf_' + uid + '_unmatchedosm;')
db.engine.execute('drop table if exists odf_' + uid + '_minlevenshtein;')
db.engine.execute('drop table if exists odf_' + uid + '_maxlevenshtein;')
db.engine.execute('drop table if exists odf_' + uid + '_grid;')
db.engine.execute('drop table if exists odf_' + uid + '_maxdevgrid;')
db.engine.execute('drop table if exists odf_' + uid + '_matchingrategrid;')
db.engine.execute('drop table if exists odf_' + uid + '_deviationlines')
db.engine.execute('drop table if exists odf_' + uid + '_junction_deviationlines')
if DEBUG:
db.engine.execute('drop table if exists odf_' + uid + '_osm_presplitted_cutcheckpoints')
db.engine.execute('drop table if exists odf_' + uid + '_osm_presplitted_cutpoints')
db.engine.execute('drop table if exists odf_' + uid + '_osm_presplitted_junctions')
db.engine.execute('drop table if exists odf_' + uid + '_osm_presplitted_points')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted_cutcheckpoints')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted_cutpoints')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted_junction_devvec')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted_junctions')
db.engine.execute('drop table if exists odf_' + uid + '_ref_corrected_presplitted_points')
db.engine.execute('drop table if exists odf_' + uid + '_result')
if 'deleteall' not in request.form:
db.session.add(dm)
db.session.commit()
return render_template('delete.html', uid=uid, dm=dm, error=None)
else:
db.session.delete(dm)
db.session.commit()
return redirect(url_for('basic.index'))
else:
return render_template('error.html', err='You are not allowed to delete this map!')
else:
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
return render_template('delete.html', uid=uid, dm=dm, error=None)
else:
return render_template('error.html', err='You are not allowed to delete this map!')#return redirect(url_for('basic.index'))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
|
|
import datetime
import os
import re
import time
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.4 or 2.5.
_morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly')
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
_tc.load('f:oo')
_cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output()
if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
if not _morsel_supports_httponly:
class Morsel(Cookie.Morsel):
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
class SimpleCookie(Cookie.SimpleCookie):
if not _morsel_supports_httponly:
def __set(self, key, real_value, coded_value):
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
super(SimpleCookie, self).load(rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = Cookie.BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
try:
self._strict_set(key, real_value, coded_value)
except Cookie.CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.",
PendingDeprecationWarning)
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data)
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_raw_post_data'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_raw_post_data'):
# Use already read data
data = StringIO(self._raw_post_data)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie, ignore_parse_errors=True)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
|
|
#!/usr/bin/env python
#
# Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Matthew Geldert (mgeldert@brocade.com), Brocade Communications Systems,Inc.
#
from driver_common import vTMDeviceDriverCommon, logging_wrapper
from neutron_lbaas.common.exceptions import LbaasException
from oslo_config import cfg
from oslo_log import log as logging
from services_director import ServicesDirector
from threading import Thread
from vtm import vTM
from time import sleep, time
from traceback import format_exc
LOG = logging.getLogger(__name__)
class BrocadeDeviceDriverV2(vTMDeviceDriverCommon):
"""
Services Director Unmanaged Version
"""
def __init__(self, plugin):
self.services_director = ServicesDirector(
"https://{}:{}/api/tmcm/{}".format(
cfg.CONF.lbaas_settings.service_endpoint_address,
cfg.CONF.services_director_settings.rest_port,
cfg.CONF.services_director_settings.api_version
),
cfg.CONF.services_director_settings.username,
cfg.CONF.services_director_settings.password,
connectivity_test_url="https://{}:{}/api/tmcm/1.5".format(
cfg.CONF.lbaas_settings.service_endpoint_address,
cfg.CONF.services_director_settings.rest_port
)
)
super(BrocadeDeviceDriverV2, self).__init__()
LOG.info(_("\nBrocade vTM LBaaS module initialized."))
@logging_wrapper
def create_loadbalancer(self, lb):
"""
Ensures a vTM instance is instantiated for the service.
If the deployment model is PER_LOADBALANCER, a new vTM instance
will always be spawned by this call. If the deployemnt model is
PER_TENANT, a new instance will only be spawned if one does not
already exist for the tenant.
"""
self._assert_not_mgmt_network(lb.vip_subnet_id)
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
hostname = self._get_hostname(lb)
if deployment_model == "PER_TENANT":
if not self.openstack_connector.vtm_exists(
lb.tenant_id, hostname):
self._spawn_vtm(hostname, lb)
sleep(5)
elif not self.openstack_connector.vtm_has_subnet_port(hostname,lb):
vtm = self._get_vtm(hostname)
self._attach_subnet_port(vtm, hostname, lb)
self.update_loadbalancer(lb, None)
elif deployment_model == "PER_LOADBALANCER":
self._spawn_vtm(hostname, lb)
@logging_wrapper
def update_loadbalancer(self, lb, old):
"""
Creates or updates a TrafficIP group for the loadbalancer VIP address.
The VIP is added to the allowed_address_pairs of the vTM's
Neutron port to enable it to receive traffic to this address.
NB. This only function only has a purpose in PER_TENANT deployments!
"""
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_TENANT":
hostname = self._get_hostname(lb)
vtm = self._get_vtm(hostname)
tip_config = {"properties": {
"basic": {
"enabled": lb.admin_state_up,
"ipaddresses": [lb.vip_address],
"machines": vtm.get_nodes_in_cluster(),
"note": lb.name
}
}}
vtm.tip_group.create(lb.id, config=tip_config)
self._touch_last_modified_timestamp(vtm)
if not old or lb.vip_address != old.vip_address:
port_ids = self.openstack_connector.get_server_port_ids(
lb.tenant_id, hostname
)
self.openstack_connector.add_ip_to_ports(
lb.vip_address, port_ids
)
@logging_wrapper
def delete_loadbalancer(self, lb):
"""
Deletes the listen IP from a vTM.
In the case of PER_LOADBALANCER deployments, this involves destroying
the whole vTM instance. In the case of a PER_TENANT deployment, it
involves deleting the TrafficIP Group associated with the VIP address.
When the last TrafficIP Group has been deleted, the instance is
destroyed.
"""
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
hostname = self._get_hostname(lb)
if deployment_model == "PER_TENANT":
vtm = self._get_vtm(hostname)
vtm.tip_group.delete(lb.id)
self._touch_last_modified_timestamp(vtm)
if not vtm.tip_group.list():
LOG.debug(_(
"\ndelete_loadbalancer({}): "
"last loadbalancer deleted; destroying vTM".format(lb.id)
))
self._destroy_vtm(hostname, lb)
else:
# Delete subnet port if subnet no longer required
if self.openstack_connector.subnet_in_use(lb) is False:
self._detach_subnet_port(vtm, hostname, lb)
# Remove allowed_address_pairs entry from remaining ports
port_ids = self.openstack_connector.get_server_port_ids(
lb.tenant_id, hostname
)
self.openstack_connector.delete_ip_from_ports(
lb.vip_address, port_ids
)
elif deployment_model == "PER_LOADBALANCER":
self._destroy_vtm(hostname, lb)
#############
# LISTENERS #
#############
@logging_wrapper
def update_listener(self, listener, old):
listen_on_settings = {}
deployment_model = self._get_setting(
listener.tenant_id, "lbaas_settings", "deployment_model"
)
hostname = self._get_hostname(listener.loadbalancer)
if deployment_model == "PER_TENANT":
listen_on_settings['listen_on_traffic_ips'] = [
listener.loadbalancer.id
]
listen_on_settings['listen_on_any'] = False
elif deployment_model == "PER_LOADBALANCER":
listen_on_settings['listen_on_traffic_ips'] = []
listen_on_settings['listen_on_any'] = True
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).update_listener(
listener, old, vtm, listen_on_settings
)
self._touch_last_modified_timestamp(vtm)
@logging_wrapper
def delete_listener(self, listener):
deployment_model = self._get_setting(
listener.tenant_id, "lbaas_settings", "deployment_model"
)
hostname = self._get_hostname(listener.loadbalancer)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).delete_listener(listener, vtm)
self._touch_last_modified_timestamp(vtm)
#########
# POOLS #
#########
@logging_wrapper
def update_pool(self, pool, old):
deployment_model = self._get_setting(
pool.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_TENANT":
hostname = self._get_hostname(pool.root_loadbalancer)
elif deployment_model == "PER_LOADBALANCER":
if pool.loadbalancer is not None:
hostname = self._get_hostname(pool.loadbalancer)
else:
hostname = self._get_hostname(
pool.listener.loadbalancer
)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).update_pool(pool, old, vtm)
self._touch_last_modified_timestamp(vtm)
@logging_wrapper
def delete_pool(self, pool):
hostname = self._get_hostname(pool.loadbalancer)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).delete_pool(
pool, vtm
)
self._touch_last_modified_timestamp(vtm)
############
# MONITORS #
############
@logging_wrapper
def update_healthmonitor(self, monitor, old):
hostname = self._get_hostname(
monitor.root_loadbalancer
)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).update_healthmonitor(
monitor, old, vtm
)
self._touch_last_modified_timestamp(vtm)
@logging_wrapper
def delete_healthmonitor(self, monitor):
hostname = self._get_hostname(monitor.root_loadbalancer)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).delete_healthmonitor(
monitor, vtm
)
self._touch_last_modified_timestamp(vtm)
###############
# L7 POLICIES #
###############
@logging_wrapper
def update_l7_policy(self, policy, old):
hostname = self._get_hostname(policy.root_loadbalancer)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).update_l7_policy(policy, old, vtm)
self._touch_last_modified_timestamp(vtm)
@logging_wrapper
def delete_l7_policy(self, policy):
hostname = self._get_hostname(policy.root_loadbalancer)
vtm = self._get_vtm(hostname)
super(BrocadeDeviceDriverV2, self).delete_l7_policy(policy, vtm)
self._touch_last_modified_timestamp(vtm)
#########
# STATS #
#########
@logging_wrapper
def stats(self, loadbalancer):
deployment_model = self._get_setting(
loadbalancer.tenant_id, "lbaas_settings", "deployment_model"
)
hostname = self._get_hostname(loadbalancer)
vtm = self._get_vtm(hostname)
if deployment_model == "PER_TENANT":
return super(BrocadeDeviceDriverV2, self).stats(
vtm, loadbalancer.vip_address
)
elif self.lb_deployment_model == "PER_LOADBALANCER":
return super(BrocadeDeviceDriverV2, self).stats(vtm)
########
# MISC #
########
def _touch_last_modified_timestamp(self, vtm):
timestamp = str(int(time() * 1000))
vtm.extra_file.create("last_update", file_text=timestamp)
def _get_hostname(self, lb):
identifier = self.openstack_connector.get_identifier(lb)
return "vtm-{}".format(identifier)
def _get_services_director(self):
"""
Gets available instance of Brocade Services Director from the cluster.
"""
for _ in range(3):
if self.services_director.test_connectivity():
return self.services_director
raise Exception("Could not contact Services Director")
def _get_vtm(self, hostname):
"""
Gets available instance of Brocade vTM from a Services Director.
"""
if isinstance(hostname, list) or isinstance(hostname, tuple):
for host in hostname:
try:
return self._get_vtm(host)
except:
pass
raise Exception("Could not contact vTM instance")
services_director = self._get_services_director()
url = "{}/instance/{}/tm/{}".format(
services_director.instance_url,
hostname,
cfg.CONF.vtm_settings.api_version
)
for i in xrange(5):
vtm = vTM(
url,
cfg.CONF.services_director_settings.username,
cfg.CONF.services_director_settings.password,
connectivity_test_url="{}/instance/{}/tm/{}".format(
services_director.connectivity_test_url,
hostname,
cfg.CONF.vtm_settings.api_version
)
)
try:
if not vtm.test_connectivity():
raise Exception("")
return vtm
except:
pass
sleep(i)
raise Exception("Could not contact vTM instance")
def _assert_not_mgmt_network(self, subnet_id):
network_id = self.openstack_connector.get_network_for_subnet(subnet_id)
if network_id == cfg.CONF.lbaas_settings.management_network:
raise Exception("Specified subnet is part of management network")
def _attach_subnet_port(self, vtm, hostname, lb):
# Create and attach a new Neutron port to the instance
port = self.openstack_connector.attach_port(hostname, lb)
# Configure the interface on the vTM
mgmt_ip = self.openstack_connector.get_mgmt_ip(lb.tenant_id, hostname)
tm_settings = vtm.traffic_manager.get(mgmt_ip)
iface_list = tm_settings.appliance__if
# Calculate the interface name that will be used
used_iface_numbers = sorted([
int(iface['name'][3:]) for iface in iface_list
])
next_if = None
for i, iface in enumerate(used_iface_numbers):
if iface > i:
next_if = "eth{}".format(i)
break
if next_if is None:
next_if = "eth{}".format(len(iface_list))
# Configure the interface on the vTM
tm_settings.appliance__if.append({
"name": next_if,
"mtu": cfg.CONF.vtm_settings.mtu
})
tm_settings.appliance__ip.append({
"name": next_if,
"addr": port['fixed_ips'][0]['ip_address'],
"mask": self.openstack_connector.get_subnet_netmask(
lb.vip_subnet_id),
"isexternal":False
})
tm_settings.update()
# Configure return-path routing for the new port
ip, mac = self.openstack_connector.get_subnet_gateway(
lb.vip_subnet_id
)
if ip is not None and mac is not None:
return_paths = vtm.global_settings.ip__appliance_returnpath
if {"mac": mac, "ipv4": ip} not in return_paths:
return_paths.append({"mac": mac, "ipv4": ip})
vtm.global_settings.ip__appliance_returnpath = return_paths
vtm.global_settings.update()
def _detach_subnet_port(self, vtm, hostname, lb):
# Detach and delete Neutron port from the instance
port_ip_address = self.openstack_connector.detach_port(hostname, lb)
mgmt_ip = self.openstack_connector.get_mgmt_ip(lb.tenant_id, hostname)
tm_settings = vtm.traffic_manager.get(mgmt_ip)
# Get the name of the interface to delete
iface_list = tm_settings.appliance__ip
iface_to_delete = None
for iface in iface_list:
if iface['addr'] == port_ip_address:
iface_to_delete = iface['name']
break
if iface_to_delete is None:
raise Exception(_("No interface configuration found"))
# Delete the "ip" entry for the interface
new_iface_list = [
iface for iface in iface_list
if iface['name'] != iface_to_delete
]
tm_settings.appliance__ip = new_iface_list
# Delete the "if" entry for the interface
iface_list = tm_settings.appliance__if
new_iface_list = [
iface for iface in iface_list if iface['name'] != iface_to_delete
]
tm_settings.appliance__if = new_iface_list
tm_settings.update()
# Remove return-path routing for the old port
ip, mac = self.openstack_connector.get_subnet_gateway(
lb.vip_subnet_id
)
return_paths = vtm.global_settings.ip__appliance_returnpath
new_return_paths = [
return_path for return_path in return_paths
if return_path['mac'] != mac and return_path['ipv4'] != ip
]
vtm.global_settings.ip__appliance_returnpath = new_return_paths
vtm.global_settings.update()
def _spawn_vtm(self, hostname, lb):
"""
Creates a vTM instance as a Nova VM.
The VM is registered with Services Director to provide licensing and
configuration proxying.
"""
# Initialize lists for roll-back on error
port_ids = []
security_groups = []
vms = []
# Create password and ports...
try: # For rolling back objects if an error occurs
password = self._generate_password()
if cfg.CONF.lbaas_settings.management_mode == "FLOATING_IP":
port, sec_grp, mgmt_ip = self.openstack_connector.create_port(
lb, hostname, create_floating_ip=True
)
ports = {"data": port, "mgmt": None}
port_ids.append(port['id'])
security_groups = [sec_grp]
elif cfg.CONF.lbaas_settings.management_mode == "MGMT_NET":
data_port, sec_grp,junk = self.openstack_connector.create_port(
lb, hostname
)
(mgmt_port, mgmt_sec_grp, mgmt_ip) = self.openstack_connector.create_port(
lb, hostname, mgmt_port=True
)
ports = {"data": data_port, "mgmt": mgmt_port}
security_groups = [sec_grp, mgmt_sec_grp]
port_ids.append(data_port['id'])
port_ids.append(mgmt_port['id'])
# Register instance record...
bandwidth = self._get_setting(
lb.tenant_id, "services_director_settings", "bandwidth"
)
feature_pack = self._get_setting(
lb.tenant_id, "services_director_settings", "feature_pack"
)
services_director = self._get_services_director()
instance = services_director.unmanaged_instance.create(
lb.id,
tag=hostname,
admin_username=cfg.CONF.vtm_settings.username,
admin_password=password,
management_address=mgmt_ip,
rest_address="{}:{}".format(
mgmt_ip, cfg.CONF.vtm_settings.rest_port
),
rest_enabled=False,
owner=lb.tenant_id,
bandwidth=int(bandwidth),
stm_feature_pack=feature_pack
)
instance.start()
LOG.debug(_(
"\nvTM {} registered with Services Director".format(hostname)
))
# Start instance...
vm = self.openstack_connector.create_vtm(hostname, lb, password, ports)
vms.append(vm['id'])
LOG.info(
_("\nvTM {} created for tenant {}".format(
hostname, lb.tenant_id
))
)
poll_thread = PollInstance(instance, hostname, services_director)
poll_thread.start()
if poll_thread.join() is False:
raise Exception(
"vTM instance {} failed to boot... Timed out".format(
hostname
)
)
except Exception as e:
try:
services_director.unmanaged_instance.delete(hostname)
except:
pass
self.openstack_connector.clean_up(
lb.tenant_id,
instances=vms,
security_groups=security_groups,
ports=port_ids
)
raise e
def _destroy_vtm(self, hostname, lb):
"""
Destroys the vTM Nova VM.
The vTM is "deleted" in Services Director (this flags the instance
rather than actually deleting it from the database).
"""
self.openstack_connector.destroy_vtm(hostname, lb)
LOG.debug(_("\nvTM {} destroyed".format(hostname)))
services_director = self._get_services_director()
services_director.unmanaged_instance.delete(hostname)
LOG.debug(_("\nInstance {} deactivated".format(hostname)))
class PollInstance(Thread):
class ConnectivityTestFailedError(Exception):
pass
def __init__(self, instance, hostname, services_director, *args, **kwargs):
self.instance = instance
self.hostname = hostname
self.services_director = services_director
self._return = False
super(PollInstance, self).__init__(*args, **kwargs)
def run(self):
# Poll for completion of initial configuration...
url = "{}/instance/{}/tm/{}".format(
self.services_director.connectivity_test_url,
self.hostname,
cfg.CONF.vtm_settings.api_version
)
vtm = vTM(
url,
cfg.CONF.services_director_settings.username,
cfg.CONF.services_director_settings.password
)
for counter in xrange(100):
try:
if not vtm.test_uuid_set():
raise self.ConnectivityTestFailedError()
self.instance.rest_enabled = True
self.instance.license_name = \
cfg.CONF.services_director_settings.fla_license
self.instance.update()
sleep(5) # Needed to ensure TIP groups are always created
self._return = True
break
except self.ConnectivityTestFailedError:
pass
sleep(5)
def join(self):
super(PollInstance, self).join()
return self._return
|
|
'Creates two csv data files parsed from existing JSON data'
from bs4 import BeautifulSoup
import urllib2
import json
import csv
import time
import requests
import os
import unicodedata
import logging
from urlparse import urlparse
def load_data(file_name):
' Returns a list from csv data '
csv_data = []
with open(file_name, 'rb') as f:
data = csv.reader(f)
for d in data:
csv_data.append(d)
return csv_data
#---------------------------------------------------------------------------------------------------------------------------------------------#
# categories array to be fetched from the ScoopWhoop url source page of
# each post
categories = []
# 2 D array containing array of keywords of each post
keywords = []
#---------------------------------------------------------------------------------------------------------------------------------------------#
def get_page_links(data):
' Returns a list of ScoopWhoop page urls to be used to get categories and keywords '
pages = []
# Getting page url from each post
for d in data:
pages.append(d['link'])
return pages
#---------------------------------------------------------------------------------------------------------------------------------------------#
def get_data_from_post(pages):
' Fills global arrays categories[] and keywords[] from page URLs '
global categories, keywords, no_of_images, images
passes = 0
for page in pages:
# Temporary string of keywords which is to be stripped and splitted at
# commas to get a list of keywords
keys = ""
response = urllib2.urlopen(page)
source = response.read()
soup = BeautifulSoup(source, 'html.parser')
# Iterating to get categories array and temporary string of keywords
for meta in soup.find_all('meta'):
if(meta.get('property') == 'category'):
categories.append(str(meta.get('content')))
if(meta.get('name') == 'keywords'):
keys = str(meta.get('content'))
if len(categories) != passes + 1:
categories.append('others'.encode("utf-8"))
# Process temporary string by List Comprehension
tt = [x.strip() for x in keys.split(',')]
keywords.append(tt)
passes = passes + 1
#---------------------------------------------------------------------------------------------------------------------------------------------#
def get_index_from_json(data, key, value):
' Returns an index reading JSON data to find the value at the particular key '
# Searching key-value pair in all indices
for idx in xrange(len(data['insights']['data'])):
if data['insights']['data'][idx][key] == value:
return idx
def get_int_month(month_str):
' Returns from 1-12 the month number from a three letter coded string '
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
for i in range(len(months)):
if month_str == months[i]:
return i + 1
def process_pub_date(pub_date):
' Returns a list containing year month day hour mins extracted from a string of published date'
ymdhm = []
ymdhm.append(int(pub_date[0:4]))
ymdhm.append(get_int_month(pub_date[5:7]))
ymdhm.append(int(pub_date[8:10]))
ymdhm.append(int(pub_date[11:13]))
ymdhm.append(int(pub_date[14:16]))
return ymdhm
def get_author(pdata, i):
' Returns author of a post '
return pdata[i]['userData'][0]['display_name']
def calc_ctr(d):
' Returns Click-Through Rate i.e. clicks per post reach rate '
ctr = 0
# Getting index to know the count of post reaches
idx = get_index_from_json(d, 'name', 'post_impressions_unique')
if d['insights']['data'][idx]['values'][0]['value']:
reach = d['insights']['data'][idx]['values'][0]['value']
else:
reach = 0
sum_clicks = 0
# Getting index to know the count of different type of clicks
idx = get_index_from_json(d, 'name', 'post_consumptions_by_type')
for clicks in d['insights']['data'][idx]['values'][0]['value']:
sum_clicks = float(d['insights']['data'][idx]['values'][
0]['value'][clicks]) + sum_clicks
if sum_clicks == 0 and reach == 0:
ctr = 0
else:
ctr = format((sum_clicks / reach) * 100, '.2f')
return ctr
def get_no_of_abusive_words(soup):
' Returns number of abusive words in article content '
f1 = open('abusive.txt')
abusive_words = f1.read()
abusive_words = abusive_words.split('\n')
ab_words = []
con = soup.get_text()
con = con.split()
for word in con:
if word in abusive_words:
ab_words.append(word)
f1.close()
return len(ab_words)
def get_article_content(pdata, i):
return pdata[i]["data"]['article_content']
def get_no_of_images(soup):
' Returns number of images in a post '
images = soup.find_all('img')
no_of_images = len(images)
return no_of_images
def get_no_of_videos(soup):
' Returns number of videos in a post '
videos = soup.find_all('iframe')
no_of_videos = len(videos)
return no_of_videos
def get_heading_length(pdata, i):
' Returns number of characters in the title of a post '
return len(pdata[i]['data']['title'])
def get_ga_data(ga_data, data, i):
ga_data_ = []
link = data[i]['link']
link = urlparse(link).path.strip('/')
pageviews = 0
uniquePageviews = 0
avgTimeOnPage = 0
newUsers = 0
bounceRate = 0
print i
print link
c = 0
for j in xrange(1,len(ga_data)) :
slug = urlparse(ga_data[j][0])
path = slug.path.strip('/')
if path in link and path!='':
c = c + 1
if c==1:
print path
pageviews = pageviews + int(ga_data[j][1])
uniquePageviews = uniquePageviews + int(ga_data[j][2])
avgTimeOnPage = avgTimeOnPage + float(ga_data[j][3])
newUsers = newUsers + int(ga_data[j][4])
bounceRate = bounceRate + float(ga_data[j][5])
ga_data_.append(pageviews)
ga_data_.append(uniquePageviews)
ga_data_.append(avgTimeOnPage)
ga_data_.append(newUsers)
ga_data_.append(bounceRate)
return ga_data_
def write_into_csv(data, pdata, owriter, owriter1, ga_data_csv):
' Writes into two csv files fb_posts_data.csv and keywords_data.csv parsing the JSON data file'
owriter.writerow(['id', 'name', 'category', 'author', 'likes', 'shares', 'comments',
'ctr', 'year', 'month', 'day', 'no_of_images', 'head_len', 'no_of_abusive_words',
'pageviews', 'uniquePageviews', 'avgTimeOnPage', 'newUsers', 'bounceRate'])
owriter1.writerow(['id', 'keywords', 'likes', 'shares', 'comments', 'ctr'])
# Key and Search Pattern to search for the index
key = 'name'
search_pattern = "post_stories_by_action_type"
category_idx = 0
row = 0
ga_data = load_data(ga_data_csv)
# Fill data.csv rows for each post
for i in range(len(data)):
ctr = calc_ctr(data[i])
# Use this index value to get likes, comments and shares of each post
idx = get_index_from_json(data[i], key, search_pattern)
if 'name' in data[i].keys():
name = data[i]['name']
name = unicodedata.normalize(
'NFKD', name).encode('ascii', 'ignore')
else:
name = ''
if 'like' in data[i]['insights']['data'][idx]['values'][0]['value'].keys():
like = data[i]['insights']['data'][
idx]['values'][0]['value']['like']
else:
like = 0
if 'share' in data[i]['insights']['data'][idx]['values'][0]['value'].keys():
share = data[i]['insights']['data'][
idx]['values'][0]['value']['share']
else:
share = 0
if 'comment' in data[i]['insights']['data'][idx]['values'][0]['value'].keys():
comment = data[i]['insights']['data'][
idx]['values'][0]['value']['comment']
else:
comment = 0
ga_data_ = get_ga_data(ga_data, data, i)
pageviews = ga_data_[0]
uniquePageviews = ga_data_[1]
avgTimeOnPage = ga_data_[2]
newUsers = ga_data_[3]
bounceRate = ga_data_[4]
ymdhm = process_pub_date(data[i]['created_time'])
year = ymdhm[0]
month = ymdhm[1]
day = ymdhm[2]
hour = ymdhm[3]
mins = ymdhm[4]
if pdata[i]['status'] == "1":
author = get_author(pdata, i)
source = get_article_content(pdata, i)
soup = BeautifulSoup(source, 'html.parser')
content = soup
if soup.table:
soup.table.decompose()
no_of_images = get_no_of_images(soup)
no_of_videos = get_no_of_videos(soup)
head_len = get_heading_length(pdata, i)
no_of_abusive_words = get_no_of_abusive_words(soup)
else:
author = 'Unknown'
head_len = 0
no_of_images = 0
no_of_videos = 0
no_of_abusive_words = 0
owriter.writerow([
data[i]['id'],
name,
categories[category_idx],
author,
like,
share,
comment,
ctr,
year,
month,
day,
hour,
mins,
no_of_images,
no_of_videos,
head_len,
no_of_abusive_words,
pageviews,
uniquePageviews,
avgTimeOnPage,
newUsers,
bounceRate])
for column in xrange(len(keywords[row])):
owriter1.writerow([
data[i]['id'],
keywords[row][column],
like,
share,
comment,
ctr])
row = row + 1
category_idx = category_idx + 1
#---------------------------------------------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
while True:
logging.basicConfig(filename='api.log', level=logging.DEBUG)
logging.debug('\nloading JSON data. . .\n')
try:
start_time = time.time()
'''r= requests.get('http://10.2.1.35:8087/')
data = r.json()'''
'''with open('old_data.json') as f:
old_dt = json.load(f)'''
with open('data1.json') as f:
data = json.load(f)
'''for d in old_dt:
data.append(d)'''
post_data = []
for d in data:
link = d['link']
parsed_uri = urlparse( link )
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
if 'facebook' in domain:
post_data.append({"status": "0"})
continue
link = parsed_uri.path
if link[len(link)-1] == '/':
link = link[:len(link)-1]
#print link
post_r = requests.get(
'http://www.scoopwhoop.com/api/v1/' + link)
post_data.append(post_r.json())
ga_data_csv = "csv_latest.csv"
csv_file = open('fb_posts_data.csv', 'w')
csv_file1 = open('keywords_data.csv', 'w')
owriter = csv.writer(csv_file)
owriter1 = csv.writer(csv_file1)
logging.debug('get page links. . .\n')
pages = get_page_links(data)
logging.debug('loading categories and keywords. . .\n')
get_data_from_post(pages)
logging.debug('writing into csv files. . .\n')
write_into_csv(data, post_data, owriter, owriter1, ga_data_csv)
logging.debug('data loaded successfully!\n')
csv_file.close()
csv_file1.close()
logging.debug('Time elapsed : ' + str(time.time() - start_time) + ' s.\n')
os.system('python prep_data.py add')
except Exception as e:
logging.error(str(e))
time.sleep(7200)
|
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Low level interface - see UnRARDLL\UNRARDLL.TXT
from __future__ import generators
import ctypes
import ctypes.wintypes
import os
import os.path
import re
import time
import sys
from .rar_exceptions import *
if sys.version_info > (3,3):
import faulthandler
faulthandler.enable()
if sys.version_info[0] >= 3:
def string_from_bytes(s):
return s.decode(sys.getdefaultencoding())
def bytes_from_string(s):
return s.encode(sys.getdefaultencoding())
else:
def string_from_bytes(s):
return s
def bytes_from_string(s):
return s
ERAR_END_ARCHIVE = 10
ERAR_NO_MEMORY = 11
ERAR_BAD_DATA = 12
ERAR_BAD_ARCHIVE = 13
ERAR_UNKNOWN_FORMAT = 14
ERAR_EOPEN = 15
ERAR_ECREATE = 16
ERAR_ECLOSE = 17
ERAR_EREAD = 18
ERAR_EWRITE = 19
ERAR_SMALL_BUF = 20
ERAR_UNKNOWN = 21
ERAR_MISSING_PASSWORD = 22
RAR_OM_LIST = 0
RAR_OM_EXTRACT = 1
RAR_SKIP = 0
RAR_TEST = 1
RAR_EXTRACT = 2
RAR_VOL_ASK = 0
RAR_VOL_NOTIFY = 1
RAR_DLL_VERSION = 3
# enum UNRARCALLBACK_MESSAGES
UCM_CHANGEVOLUME = 0
UCM_PROCESSDATA = 1
UCM_NEEDPASSWORD = 2
architecture_bits = ctypes.sizeof(ctypes.c_voidp) * 8
dll_name = "unrar.dll"
if architecture_bits == 64:
dll_name = "x64\\unrar64.dll"
volume_naming1 = re.compile("[.]r([0-9]{2})$")
volume_naming2 = re.compile("[.]([0-9]{3})[.]rar$")
volume_naming3 = re.compile("[.]part([0-9]+)[.]rar$")
try:
unrar = ctypes.WinDLL(
os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name))
except WindowsError:
unrar = ctypes.WinDLL(dll_name)
class RAROpenArchiveDataEx(ctypes.Structure):
def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):
self.CmtBuf = ctypes.c_buffer(64 * 1024)
ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW,
OpenMode=OpenMode,
_CmtBuf=ctypes.addressof(self.CmtBuf),
CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char_p),
('ArcNameW', ctypes.c_wchar_p),
('OpenMode', ctypes.c_uint),
('OpenResult', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Flags', ctypes.c_uint),
('Reserved', ctypes.c_uint * 32),
]
class RARHeaderDataEx(ctypes.Structure):
def __init__(self):
self.CmtBuf = ctypes.c_buffer(64 * 1024)
ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf),
CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char * 1024),
('ArcNameW', ctypes.c_wchar * 1024),
('FileName', ctypes.c_char * 1024),
('FileNameW', ctypes.c_wchar * 1024),
('Flags', ctypes.c_uint),
('PackSize', ctypes.c_uint),
('PackSizeHigh', ctypes.c_uint),
('UnpSize', ctypes.c_uint),
('UnpSizeHigh', ctypes.c_uint),
('HostOS', ctypes.c_uint),
('FileCRC', ctypes.c_uint),
('FileTime', ctypes.c_uint),
('UnpVer', ctypes.c_uint),
('Method', ctypes.c_uint),
('FileAttr', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Reserved', ctypes.c_uint * 1024),
]
def DosDateTimeToTimeTuple(dosDateTime):
"""Convert an MS-DOS format date time to a Python time tuple.
"""
dos_date = dosDateTime >> 16
dos_time = dosDateTime & 0xffff
day = dos_date & 0x1f
month = (dos_date >> 5) & 0xf
year = 1980 + (dos_date >> 9)
second = 2 * (dos_time & 0x1f)
minute = (dos_time >> 5) & 0x3f
hour = dos_time >> 11
return time.localtime(
time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))
def _wrap(restype, func, argtypes):
result = func
result.argtypes = argtypes
result.restype = restype
return result
RARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])
RAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx,
[ctypes.POINTER(RAROpenArchiveDataEx)])
RARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx,
[ctypes.wintypes.HANDLE,
ctypes.POINTER(RARHeaderDataEx)])
_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword,
[ctypes.wintypes.HANDLE, ctypes.c_char_p])
def RARSetPassword(handle, password):
_RARSetPassword(handle, password)
RARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile,
[ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p,
ctypes.c_char_p])
RARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive,
[ctypes.wintypes.HANDLE])
# The author of the UnRAR library uses "long" as the types of all the parameters,
# even if some of them are pointers *facepalm*
UNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_voidp, ctypes.c_voidp,
ctypes.c_voidp, ctypes.c_voidp)
RARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback,
[ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])
RARExceptions = {
ERAR_NO_MEMORY: MemoryError,
ERAR_BAD_DATA: ArchiveHeaderBroken,
ERAR_BAD_ARCHIVE: InvalidRARArchive,
ERAR_EOPEN: FileOpenError,
}
class PassiveReader:
"""Used for reading files to memory"""
def __init__(self, usercallback=None):
self.buf = []
self.ucb = usercallback
def _callback(self, msg, UserData, P1, P2):
if msg == UCM_PROCESSDATA:
data = (ctypes.c_char * P2).from_address(P1).raw
if self.ucb is not None:
self.ucb(data)
else:
self.buf.append(data)
return 1
def get_result(self):
return b''.join(self.buf)
class RarInfoIterator(object):
def __init__(self, arc):
self.arc = arc
self.index = 0
self.headerData = RARHeaderDataEx()
self.res = RARReadHeaderEx(self.arc._handle,
ctypes.byref(self.headerData))
if self.res in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:
raise IncorrectRARPassword
self.arc.lockStatus = "locked"
self.arc.needskip = False
def __iter__(self):
return self
def __next__(self):
if self.index > 0:
if self.arc.needskip:
RARProcessFile(self.arc._handle, RAR_SKIP, None, None)
self.res = RARReadHeaderEx(self.arc._handle,
ctypes.byref(self.headerData))
if self.res:
raise StopIteration
self.arc.needskip = True
data = {
'index': self.index, 'filename': self.headerData.FileNameW,
'datetime': DosDateTimeToTimeTuple(self.headerData.FileTime),
'isdir': ((self.headerData.Flags & 0xE0) == 0xE0),
'size': self.headerData.UnpSize + (
self.headerData.UnpSizeHigh << 32)
}
if self.headerData.CmtState == 1:
data['comment'] = string_from_bytes(self.headerData.CmtBuf.value.decode)
else:
data['comment'] = None
self.index += 1
return data
next = __next__ # Python 2
def __del__(self):
self.arc.lockStatus = "finished"
def generate_password_provider(password):
def password_provider_callback(msg, UserData, P1, P2):
if msg == UCM_NEEDPASSWORD and password is not None:
(ctypes.c_char * P2).from_address(P1).value = password
return 1
return password_provider_callback
class RarFileImplementation(object):
def init(self, password=None):
self.password = password
archive_data = RAROpenArchiveDataEx(ArcNameW=self.archiveName,
OpenMode=RAR_OM_EXTRACT)
self._handle = RAROpenArchiveEx(ctypes.byref(archive_data))
self.c_callback = UNRARCALLBACK(
generate_password_provider(self.password))
RARSetCallback(self._handle, self.c_callback, 1)
if archive_data.OpenResult != 0:
raise RARExceptions[archive_data.OpenResult]
if archive_data.CmtState == 1:
self.comment = string_from_bytes(archive_data.CmtBuf.value)
else:
self.comment = None
if password:
RARSetPassword(self._handle, bytes_from_string(password))
self.lockStatus = "ready"
self.isVolume = archive_data.Flags & 1
def destruct(self):
if self._handle and RARCloseArchive:
RARCloseArchive(self._handle)
def make_sure_ready(self):
if self.lockStatus == "locked":
raise InvalidRARArchiveUsage(
"cannot execute infoiter() without finishing previous one")
if self.lockStatus == "finished":
self.destruct()
self.init(self.password)
def infoiter(self):
self.make_sure_ready()
return RarInfoIterator(self)
def read_files(self, checker):
res = []
for info in self.infoiter():
if checker(info) and not info.isdir:
reader = PassiveReader()
c_callback = UNRARCALLBACK(reader._callback)
RARSetCallback(self._handle, c_callback, 1)
tmpres = RARProcessFile(self._handle, RAR_TEST, None, None)
if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:
raise IncorrectRARPassword
self.needskip = False
res.append((info, reader.get_result()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres is not False and not info.isdir:
if checkres:
fn = info.filename
if not withSubpath:
fn = os.path.split(fn)[-1]
target = os.path.join(path, fn)
else:
raise DeprecationWarning("Condition callbacks returning strings are deprecated"
" and only supported in Windows")
if overwrite or (not os.path.exists(target)):
tmpres = RARProcessFile(self._handle, RAR_EXTRACT, None,
target.encode(sys.getdefaultencoding()))
if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:
raise IncorrectRARPassword
self.needskip = False
res.append(info)
return res
def get_volume(self):
if not self.isVolume:
return None
header_data = RARHeaderDataEx()
res = RARReadHeaderEx(self._handle, ctypes.byref(header_data))
arc_name = header_data.ArcNameW
match3 = volume_naming3.search(arc_name)
if match3 is not None:
return int(match3.group(1)) - 1
match2 = volume_naming3.search(arc_name)
if match2 is not None:
return int(match2.group(1))
match1 = volume_naming1.search(arc_name)
if match1 is not None:
return int(match1.group(1)) + 1
return 0
|
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for handling mailchimp API calls."""
from __future__ import annotations
import ast
import hashlib
import logging
from core import feconf
import mailchimp3
from mailchimp3 import mailchimpclient
def _get_subscriber_hash(email: str) -> str:
"""Returns Mailchimp subscriber hash from email.
Args:
email: str. The email of the user.
Returns:
str. The subscriber hash corresponding to the input email.
Raises:
Exception. Invalid type for email, expected string.
"""
if not isinstance(email, str):
raise Exception(
'Invalid type for email. Expected string, received %s' % email)
md5_hash = hashlib.md5()
# The md5 accepts only bytes, so we first need to encode the email to bytes.
md5_hash.update(email.encode('utf-8'))
return md5_hash.hexdigest()
def _get_mailchimp_class() -> mailchimp3.MailChimp:
"""Returns the mailchimp api class. This is separated into a separate
function to facilitate testing.
NOTE: No other functionalities should be added to this function.
Returns:
Mailchimp. A mailchimp class instance with the API key and username
initialized.
"""
# The return value ignore pragma is required for this. This is
# because adding a Union[] type annotation to handle both None and
# mailchimp3.MailChimp causes errors where the return value is called
# (for eg: client.lists), since NoneType does not have an attribute lists.
if not feconf.MAILCHIMP_API_KEY:
logging.exception('Mailchimp API key is not available.')
return None # type: ignore[return-value]
if not feconf.MAILCHIMP_USERNAME:
logging.exception('Mailchimp username is not set.')
return None
# The following is a class initialized in the library with the API key and
# username and hence cannot be tested directly. The mailchimp functions are
# tested with a mock class.
return mailchimp3.MailChimp( # pragma: no cover
mc_api=feconf.MAILCHIMP_API_KEY, mc_user=feconf.MAILCHIMP_USERNAME)
def _create_user_in_mailchimp_db(user_email: str) -> bool:
"""Creates a new user in the mailchimp database and handles the case where
the user was permanently deleted from the database.
Args:
user_email: str. Email ID of the user. Email is used to uniquely
identify the user in the mailchimp DB.
Returns:
bool. Whether the user was successfully added to the db. (This will be
False if the user was permanently deleted earlier and therefore cannot
be added back.)
Raises:
Exception. Any error (other than the one mentioned below) raised by the
mailchimp API.
"""
post_data = {
'email_address': user_email,
'status': 'subscribed'
}
client = _get_mailchimp_class()
try:
client.lists.members.create(feconf.MAILCHIMP_AUDIENCE_ID, post_data)
except mailchimpclient.MailChimpError as error:
error_message = ast.literal_eval(str(error))
# This is the specific error message returned for the case where the
# user was permanently deleted from the Mailchimp database earlier.
# This was found by experimenting with the MailChimp API. Note that the
# error reference
# (https://mailchimp.com/developer/marketing/docs/errors/) is not
# comprehensive, since, under status 400, they only list a subset of the
# common error titles.
if error_message['title'] == 'Forgotten Email Not Subscribed':
return False
raise Exception(error_message['detail']) from error
return True
def permanently_delete_user_from_list(user_email: str) -> None:
"""Permanently deletes the user with the given email from the Mailchimp
list.
NOTE TO DEVELOPERS: This should only be called from the wipeout service
since once a user is permanently deleted from mailchimp, they cannot be
programmatically added back via their API (the user would have to manually
resubscribe back).
Args:
user_email: str. Email ID of the user. Email is used to uniquely
identify the user in the mailchimp DB.
Raises:
Exception. Any error raised by the mailchimp API.
"""
client = _get_mailchimp_class()
if not client:
return None
subscriber_hash = _get_subscriber_hash(user_email)
try:
client.lists.members.get(
feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash)
client.lists.members.delete_permanent(
feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash)
except mailchimpclient.MailChimpError as error:
# This has to be done since the message can only be accessed from
# MailChimpError by error.message in Python2, but this is deprecated in
# Python3.
# In Python3, the message can be accessed directly by KeyError
# (https://github.com/VingtCinq/python-mailchimp/pull/65), so as a
# workaround for Python2, the 'message' attribute is obtained by
# str() and then it is converted to dict. This works in Python3 as well.
error_message = ast.literal_eval(str(error))
# Ignore if the error corresponds to "User does not exist".
if error_message['status'] != 404:
raise Exception(error_message['detail']) from error
def add_or_update_user_status(
user_email: str, can_receive_email_updates: bool
) -> bool:
"""Subscribes/unsubscribes an existing user or creates a new user with
correct status in the mailchimp DB.
NOTE: Callers should ensure that the user's corresponding
UserEmailPreferencesModel.site_updates field is kept in sync.
Args:
user_email: str. Email ID of the user. Email is used to uniquely
identify the user in the mailchimp DB.
can_receive_email_updates: bool. Whether they want to be subscribed to
the bulk email list or not.
Returns:
bool. Whether the user was successfully added to the db. (This will be
False if the user was permanently deleted earlier and therefore cannot
be added back.)
Raises:
Exception. Any error (other than the case where the user was permanently
deleted earlier) raised by the mailchimp API.
"""
client = _get_mailchimp_class()
if not client:
return False
subscriber_hash = _get_subscriber_hash(user_email)
subscribed_mailchimp_data = {
'email_address': user_email,
'status': 'subscribed'
}
unsubscribed_mailchimp_data = {
'email_address': user_email,
'status': 'unsubscribed'
}
try:
member_details = client.lists.members.get(
feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash)
# If member is already added to mailchimp list, we cannot permanently
# delete a list member, since they cannot be programmatically added
# back, so we change their status based on preference.
if (
can_receive_email_updates and
member_details['status'] != 'subscribed'):
client.lists.members.update(
feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash,
subscribed_mailchimp_data)
elif (
not can_receive_email_updates and
member_details['status'] == 'subscribed'):
client.lists.members.update(
feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash,
unsubscribed_mailchimp_data)
except mailchimpclient.MailChimpError as error:
# This has to be done since the message can only be accessed from
# MailChimpError by error.message in Python2, but this is deprecated in
# Python3.
# In Python3, the message can be accessed directly by KeyError
# (https://github.com/VingtCinq/python-mailchimp/pull/65), so as a
# workaround for Python2, the 'message' attribute is obtained by
# str() and then it is converted to dict. This works in Python3 as well.
error_message = ast.literal_eval(str(error))
# Error 404 corresponds to "User does not exist".
if error_message['status'] == 404:
if can_receive_email_updates:
user_creation_successful = _create_user_in_mailchimp_db(
user_email)
if not user_creation_successful:
return False
else:
raise Exception(error_message['detail']) from error
return True
|
|
import __builtin__
import contextlib
import os
import unittest
import shutil
import tempfile
import StringIO
from pywatchman import bser
from .buck import BuildFileProcessor, Diagnostic, add_rule, process_with_diagnostics
def foo_rule(name, srcs=[], visibility=[], build_env=None):
add_rule({
'buck.type': 'foo',
'name': name,
'srcs': srcs,
'visibility': visibility,
}, build_env)
def extract_from_results(name, results):
for result in results:
if result.keys() == [name]:
return result[name]
raise ValueError(str(results))
def get_includes_from_results(results):
return extract_from_results('__includes', results)
def get_config_from_results(results):
return extract_from_results('__configs', results)
def get_env_from_results(results):
return extract_from_results('__env', results)
def setenv(varname, value=None):
if value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = value
@contextlib.contextmanager
def with_env(varname, value=None):
saved = os.environ.get(varname)
setenv(varname, value)
try:
yield
finally:
setenv(varname, saved)
@contextlib.contextmanager
def with_envs(envs):
with contextlib.nested(*[with_env(n, v) for n, v in envs.iteritems()]):
yield
class ProjectFile(object):
def __init__(self, root, path, contents):
self.path = path
self.name = '//{0}'.format(path)
self.root = root
self.prefix = None
if isinstance(contents, (tuple, list)):
contents = os.linesep.join(contents) + os.linesep
self.contents = contents
class BuckTest(unittest.TestCase):
def setUp(self):
self.project_root = tempfile.mkdtemp()
self.allow_empty_globs = False
self.build_file_name = 'BUCK'
self.watchman_client = None
self.watchman_error = None
self.enable_build_file_sandboxing = False
self.project_import_whitelist = None
def tearDown(self):
shutil.rmtree(self.project_root, True)
def write_file(self, pfile):
with open(os.path.join(self.project_root, pfile.path), 'w') as f:
f.write(pfile.contents)
def write_files(self, *pfiles):
for pfile in pfiles:
self.write_file(pfile)
def create_build_file_processor(self, cell_roots=None, includes=None, **kwargs):
return BuildFileProcessor(
self.project_root,
cell_roots or {},
self.build_file_name,
self.allow_empty_globs,
False, # ignore_buck_autodeps_files
False, # no_autodeps_signatures
self.watchman_client,
self.watchman_error,
False, # watchman_glob_stat_results
False, # watchman_use_glob_generator
False, # use_mercurial_glob
self.enable_build_file_sandboxing,
self.project_import_whitelist,
includes or [],
**kwargs)
def test_sibling_includes_use_separate_globals(self):
"""
Test that consecutive includes can't see each others globals.
If a build file includes two include defs, one after another, verify
that the first's globals don't pollute the second's (e.g. the second
cannot implicitly reference globals from the first without including
it itself).
"""
# Setup the includes defs. The first one defines a variable that the
# second one (incorrectly) implicitly references.
include_def1 = ProjectFile(self.project_root, path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(self.project_root, path='inc_def2', contents=('BAR = FOO',))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(self.project_root, path='BUCK', contents='')
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def1.name, include_def2.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_lazy_include_defs(self):
"""
Tests bug reported in https://github.com/facebook/buck/issues/182.
If a include def references another include def via a lazy include_defs
call is some defined function, verify that it can correctly access the
latter's globals after the import.
"""
# Setup the includes defs. The first one defines a variable that the
# second one references after a local 'include_defs' call.
include_def1 = ProjectFile(self.project_root, path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(
self.project_root,
path='inc_def2',
contents=(
'def test():',
' include_defs({0!r})'.format(include_def1.name),
' FOO',
))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('test()',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def1.name, include_def2.name])
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
'test()',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
def test_private_globals_are_ignored(self):
"""
Verify globals prefixed with '_' don't get imported via 'include_defs'.
"""
include_def = ProjectFile(self.project_root, path='inc_def1', contents=('_FOO = 1',))
self.write_file(include_def)
# Test we don't get private module attributes from default includes.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('_FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
# Test we don't get private module attributes from explicit includes.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'_FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_implicit_includes_apply_to_explicit_includes(self):
"""
Verify that implict includes are applied to explicit includes.
"""
# Setup an implicit include that defines a variable, another include
# that uses it, and a build file that uses the explicit include.
implicit_inc = ProjectFile(self.project_root, path='implicit', contents=('FOO = 1',))
explicit_inc = ProjectFile(self.project_root, path='explicit', contents=('FOO',))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(explicit_inc.name),
))
self.write_files(implicit_inc, explicit_inc, build_file)
# Run the processor to verify that the explicit include can use the
# variable in the implicit include.
build_file_processor = self.create_build_file_processor(
includes=[implicit_inc.name])
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
def test_all_list_is_respected(self):
"""
Verify that the `__all__` list in included files can be used to narrow
what gets pulled in.
"""
include_def = ProjectFile(
self.project_root,
path='inc_def1',
contents=('__all__ = []', 'FOO = 1'))
self.write_file(include_def)
# Test we don't get non-whitelisted attributes from default includes.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
# Test we don't get non-whitelisted attributes from explicit includes.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_do_not_override_overridden_builtins(self):
"""
We want to ensure that if you override something like java_binary, and then use
include_defs to get another file, you don't end up clobbering your override.
"""
# Override java_library and have it automatically add a dep
build_defs = ProjectFile(
self.project_root,
path='BUILD_DEFS',
contents=(
# While not strictly needed for this test, we want to make sure we are overriding
# a provided method and not just defining it ourselves.
'old_get_base_path = get_base_path',
'def get_base_path(*args, **kwargs):',
' raise ValueError()',
'include_defs("//OTHER_DEFS")',
))
other_defs = ProjectFile(self.project_root, path='OTHER_DEFS', contents=())
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'get_base_path()',
))
self.write_files(build_defs, other_defs, build_file)
build_file_processor = self.create_build_file_processor(
includes=[build_defs.name])
with build_file_processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
ValueError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_watchman_glob_failure_falls_back_to_regular_glob_and_adds_diagnostic(self):
class FakeWatchmanError(Exception):
pass
class FakeWatchmanClient:
def __init__(self):
self.query_invoked = False
def query(self, *args):
self.query_invoked = True
raise FakeWatchmanError("Nobody watches the watchmen")
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
self.watchman_error = FakeWatchmanError
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(self.project_root, path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
diagnostics = set()
with build_file_processor.with_builtins(__builtin__.__dict__):
rules = build_file_processor.process(
build_file.root, build_file.prefix, build_file.path, diagnostics)
self.assertTrue(self.watchman_client.query_invoked)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
self.assertEqual(
set([Diagnostic(
message='Nobody watches the watchmen',
level='error',
source='watchman')]),
diagnostics)
def test_watchman_glob_warning_adds_diagnostic(self):
class FakeWatchmanClient:
def query(self, *args):
return {'warning': 'This is a warning', 'files': ['Foo.java']}
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(self.project_root, path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
diagnostics = set()
with build_file_processor.with_builtins(__builtin__.__dict__):
rules = build_file_processor.process(
build_file.root, build_file.prefix, build_file.path, diagnostics)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
self.assertEqual(
set([Diagnostic(
message='This is a warning',
level='warning',
source='watchman')]),
diagnostics)
def test_read_config(self):
"""
Verify that the builtin `read_config()` function works.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'assert read_config("hello", "world") == "foo"',
'assert read_config("hello", "bar") is None',
'assert read_config("hello", "goo", "default") == "default"',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
configs={('hello', 'world'): 'foo'})
result = build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
set())
self.assertEquals(
get_config_from_results(result),
{'hello': {'world': 'foo', 'bar': None, 'goo': None}})
def test_add_build_file_dep(self):
"""
Test simple use of `add_build_file_dep`.
"""
# Setup the build file and dependency.
dep = ProjectFile(self.project_root, path='dep', contents=('',))
build_file = (
ProjectFile(
self.project_root,
path='BUCK',
contents=(
'add_build_file_dep("//dep")',
),
))
self.write_files(dep, build_file)
# Create a process and run it.
build_file_processor = self.create_build_file_processor()
results = build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
set())
# Verify that the dep was recorded.
self.assertTrue(
os.path.join(self.project_root, dep.path) in
get_includes_from_results(results))
def test_import_works_without_sandboxing(self):
self.enable_build_file_sandboxing = False
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import ssl',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_builtins(__builtin__.__dict__):
build_file_processor.process(
build_file.root,
build_file.prefix,
build_file.path,
set())
def test_enabled_sandboxing_blocks_import(self):
self.enable_build_file_sandboxing = True
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import ssl',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
ImportError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_import_whitelist(self):
"""
Verify that modules whitelisted globally or in configs can be imported
with sandboxing enabled.
"""
self.enable_build_file_sandboxing = True
self.project_import_whitelist = ['sys', 'subprocess']
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import json',
'import functools',
'import re',
'import sys',
'import subprocess',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
def test_allow_unsafe_import_allows_to_import(self):
"""
Verify that `allow_unsafe_import()` allows to import specified modules
"""
self.enable_build_file_sandboxing = True
# Importing httplib results in `__import__()` calls for other modules, e.g. socket, sys
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'with allow_unsafe_import():',
' import math, httplib',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_builtins(__builtin__.__dict__):
build_file_processor.process(
build_file.root,
build_file.prefix,
build_file.path,
set())
def test_modules_are_not_copied_unless_specified(self):
"""
Test that modules are not copied by 'include_defs' unless specified in '__all__'.
"""
include_def = ProjectFile(
self.project_root,
path='inc_def',
contents=(
'import math',
'def math_pi():',
' return math.pi',
))
self.write_files(include_def)
# Module math should not be accessible
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math.pi, 2) == 3.14)',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
# Confirm that math_pi() works
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math_pi(), 2) == 3.14)',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
# If specified in '__all__', math should be accessible
include_def = ProjectFile(
self.project_root,
path='inc_def',
contents=(
'__all__ = ["math"]',
'import math',
))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math.pi, 2) == 3.14)',
))
self.write_files(include_def, build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, set())
def test_os_getenv(self):
"""
Verify that calling `os.getenv()` records the environment variable.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os',
'assert os.getenv("TEST1") == "foo"',
'assert os.getenv("TEST2") is None',
'assert os.getenv("TEST3", "default") == "default"',
))
self.write_file(build_file)
with with_envs({'TEST1': 'foo', 'TEST2': None, 'TEST3': None}):
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_env_interceptors():
result = build_file_processor.process(build_file.root, build_file.prefix,
build_file.path, set())
self.assertEquals(
get_env_from_results(result),
{'TEST1': "foo", 'TEST2': None, 'TEST3': None})
def test_os_environ(self):
"""
Verify that accessing environemtn variables via `os.environ` records
the environment variables.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os',
'assert os.environ["TEST1"] == "foo"',
'assert os.environ.get("TEST2") is None',
'assert os.environ.get("TEST3", "default") == "default"',
'assert "TEST4" in os.environ',
'assert "TEST5" not in os.environ',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
with with_envs({'TEST1': 'foo', 'TEST2': None, 'TEST3': None, 'TEST4': '', 'TEST5': None}):
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_env_interceptors():
result = build_file_processor.process(build_file.root, build_file.prefix,
build_file.path, set())
self.assertEquals(
get_env_from_results(result),
{'TEST1': "foo", 'TEST2': None, 'TEST3': None, 'TEST4': '', 'TEST5': None})
def test_safe_modules_allow_safe_functions(self):
"""
Test that 'import os.path' allows access to safe 'os' functions,
'import pipes' allows 'quote' and also that 'from os.path import *' works.
"""
self.enable_build_file_sandboxing = True
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os.path',
'from os.path import *',
'import pipes',
'assert(os.path.split("a/b/c") == ("a/b", "c"))',
'assert(split("a/b/c") == ("a/b", "c"))',
'assert os.environ["TEST1"] == "foo"',
'assert pipes.quote("foo; bar") == "\'foo; bar\'"'
))
self.write_files(build_file)
with with_envs({'TEST1': 'foo'}):
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
set())
def test_safe_modules_block_unsafe_functions(self):
"""
Test that after 'import os.path' unsafe functions raise errors
"""
self.enable_build_file_sandboxing = True
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os.path',
'os.path.exists("a/b")',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
# 'os.path.exists()' should raise AttributeError
self.assertRaises(
AttributeError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_is_in_dir(self):
build_file_processor = self.create_build_file_processor()
assert build_file_processor._is_in_dir('foo/bar.py', 'foo')
assert build_file_processor._is_in_dir('foo/bar.py', 'foo/')
assert build_file_processor._is_in_dir('/foo/bar.py', '/')
assert not build_file_processor._is_in_dir('foo.py', 'foo')
assert not build_file_processor._is_in_dir('foo/bar.py', 'foo/bar')
assert not build_file_processor._is_in_dir('foo/bars', 'foo/bar')
def test_wrap_access_prints_warnings(self):
self.enable_build_file_sandboxing = True
path = os.path.normpath(os.path.join(self.project_root, 'foo.py'))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=("open('{0}', 'r')".format(path.replace('\\', '\\\\')),))
py_file = ProjectFile(self.project_root, path='foo.py', contents=('foo',))
self.write_files(build_file, py_file)
build_file_processor = self.create_build_file_processor()
diagnostics = set()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
diagnostics)
expected_message = (
"Access to a non-tracked file detected! {0} is not a ".format(path) +
"known dependency and it should be added using 'add_build_file_dep' " +
"function before trying to access the file, e.g.\n" +
"'add_build_file_dep({0!r})'\n".format(py_file.name) +
"The 'add_build_file_dep' function is documented at " +
"https://buckbuild.com/function/add_build_file_dep.html\n"
)
self.assertEqual(
set([Diagnostic(
message=expected_message,
level='warning',
source='sandboxing')]),
diagnostics)
def test_can_resolve_cell_paths(self):
build_file_processor = self.create_build_file_processor(
cell_roots={
'foo': os.path.abspath(os.path.join(self.project_root, '../cell'))
})
self.assertEqual(
os.path.abspath(os.path.join(self.project_root, '../cell/bar/baz')),
build_file_processor._get_include_path('foo//bar/baz'))
self.assertEqual(
os.path.abspath(os.path.join(self.project_root, 'bar/baz')),
build_file_processor._get_include_path('//bar/baz'))
def test_bser_encoding_failure(self):
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
fake_stdout = StringIO.StringIO()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=[object()],'
')'
))
self.write_file(build_file)
with build_file_processor.with_builtins(__builtin__.__dict__):
process_with_diagnostics(
{
'buildFile': self.build_file_name,
'watchRoot': '',
'projectPrefix': self.project_root,
},
build_file_processor,
fake_stdout)
result = fake_stdout.getvalue()
decoded_result = bser.loads(result)
self.assertEqual(
[],
decoded_result['values'])
self.assertEqual(
'fatal',
decoded_result['diagnostics'][0]['level'])
self.assertEqual(
'parse',
decoded_result['diagnostics'][0]['source'])
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_kms
short_description: Perform various KMS management tasks.
description:
- Manage role/user access to a KMS key. Not designed for encrypting/decrypting.
version_added: "2.3"
options:
alias:
description: An alias for a key. For safety, even though KMS does not require keys
to have an alias, this module expects all new keys to be given an alias
to make them easier to manage. Existing keys without an alias may be
referred to by I(key_id). Use M(aws_kms_info) to find key ids. Required
if I(key_id) is not given. Note that passing a I(key_id) and I(alias)
will only cause a new alias to be added, an alias will never be renamed.
The 'alias/' prefix is optional.
required: false
aliases:
- key_alias
key_id:
description:
- Key ID or ARN of the key. One of C(alias) or C(key_id) are required.
required: false
aliases:
- key_arn
policy_mode:
description:
- (deprecated) Grant or deny access.
- Used for modifying the Key Policy rather than modifying a grant and only
works on the default policy created through the AWS Console.
- This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
default: grant
choices: [ grant, deny ]
aliases:
- mode
policy_role_name:
description:
- (deprecated) Role to allow/deny access. One of C(policy_role_name) or C(policy_role_arn) are required.
- Used for modifying the Key Policy rather than modifying a grant and only
works on the default policy created through the AWS Console.
- This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
required: false
aliases:
- role_name
policy_role_arn:
description:
- (deprecated) ARN of role to allow/deny access. One of C(policy_role_name) or C(policy_role_arn) are required.
- Used for modifying the Key Policy rather than modifying a grant and only
works on the default policy created through the AWS Console.
- This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
required: false
aliases:
- role_arn
policy_grant_types:
description:
- (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". Required when C(policy_mode=grant).
- Used for modifying the Key Policy rather than modifying a grant and only
works on the default policy created through the AWS Console.
- This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
required: false
aliases:
- grant_types
policy_clean_invalid_entries:
description:
- (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases.
- Only cleans if changes are being made.
- Used for modifying the Key Policy rather than modifying a grant and only
works on the default policy created through the AWS Console.
- This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
type: bool
default: true
aliases:
- clean_invalid_entries
state:
description: Whether a key should be present or absent. Note that making an
existing key absent only schedules a key for deletion. Passing a key that
is scheduled for deletion with state present will cancel key deletion.
required: False
choices:
- present
- absent
default: present
version_added: 2.8
enabled:
description: Whether or not a key is enabled
default: True
version_added: 2.8
type: bool
description:
description:
A description of the CMK. Use a description that helps you decide
whether the CMK is appropriate for a task.
version_added: 2.8
tags:
description: A dictionary of tags to apply to a key.
version_added: 2.8
purge_tags:
description: Whether the I(tags) argument should cause tags not in the list to
be removed
version_added: 2.8
default: False
type: bool
purge_grants:
description: Whether the I(grants) argument should cause grants not in the list to
be removed
default: False
version_added: 2.8
type: bool
grants:
description:
- A list of grants to apply to the key. Each item must contain I(grantee_principal).
Each item can optionally contain I(retiring_principal), I(operations), I(constraints),
I(name).
- Valid operations are C(Decrypt), C(Encrypt), C(GenerateDataKey), C(GenerateDataKeyWithoutPlaintext),
C(ReEncryptFrom), C(ReEncryptTo), C(CreateGrant), C(RetireGrant), C(DescribeKey), C(Verify) and
C(Sign)
- Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals),
either or both being a dict specifying an encryption context match.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html)
- I(grantee_principal) and I(retiring_principal) must be ARNs
version_added: 2.8
policy:
description:
- policy to apply to the KMS key
- See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
version_added: 2.8
author:
- Ted Timmons (@tedder)
- Will Thames (@willthames)
- Mark Chappell (@tremble)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile
# and has been deprecated in favour of the policy option.
- name: grant user-style access to production secrets
aws_kms:
args:
alias: "alias/my_production_secrets"
policy_mode: grant
policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
policy_grant_types: "role,role grant"
- name: remove access to production secrets from role
aws_kms:
args:
alias: "alias/my_production_secrets"
policy_mode: deny
policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
# Create a new KMS key
- aws_kms:
alias: mykey
tags:
Name: myKey
Purpose: protect_stuff
# Update previous key with more tags
- aws_kms:
alias: mykey
tags:
Name: myKey
Purpose: protect_stuff
Owner: security_team
# Update a known key with grants allowing an instance with the billing-prod IAM profile
# to decrypt data encrypted with the environment: production, application: billing
# encryption context
- aws_kms:
key_id: abcd1234-abcd-1234-5678-ef1234567890
grants:
- name: billing_prod
grantee_principal: arn:aws:iam::1234567890123:role/billing_prod
constraints:
encryption_context_equals:
environment: production
application: billing
operations:
- Decrypt
- RetireGrant
'''
RETURN = '''
key_id:
description: ID of key
type: str
returned: always
sample: abcd1234-abcd-1234-5678-ef1234567890
key_arn:
description: ARN of key
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
key_state:
description: The state of the key
type: str
returned: always
sample: PendingDeletion
key_usage:
description: The cryptographic operations for which you can use the key.
type: str
returned: always
sample: ENCRYPT_DECRYPT
origin:
description: The source of the key's key material. When this value is C(AWS_KMS),
AWS KMS created the key material. When this value is C(EXTERNAL), the
key material was imported or the CMK lacks key material.
type: str
returned: always
sample: AWS_KMS
aws_account_id:
description: The AWS Account ID that the key belongs to
type: str
returned: always
sample: 1234567890123
creation_date:
description: Date of creation of the key
type: str
returned: always
sample: "2017-04-18T15:12:08.551000+10:00"
description:
description: Description of the key
type: str
returned: always
sample: "My Key for Protecting important stuff"
enabled:
description: Whether the key is enabled. True if C(KeyState) is true.
type: str
returned: always
sample: false
aliases:
description: list of aliases associated with the key
type: list
returned: always
sample:
- aws/acm
- aws/ebs
policies:
description: list of policy documents for the keys. Empty when access is denied even if there are policies.
type: list
returned: always
sample:
Version: "2012-10-17"
Id: "auto-ebs-2"
Statement:
- Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
Effect: "Allow"
Principal:
AWS: "*"
Action:
- "kms:Encrypt"
- "kms:Decrypt"
- "kms:ReEncrypt*"
- "kms:GenerateDataKey*"
- "kms:CreateGrant"
- "kms:DescribeKey"
Resource: "*"
Condition:
StringEquals:
kms:CallerAccount: "111111111111"
kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- Sid: "Allow direct access to key metadata to the account"
Effect: "Allow"
Principal:
AWS: "arn:aws:iam::111111111111:root"
Action:
- "kms:Describe*"
- "kms:Get*"
- "kms:List*"
- "kms:RevokeGrant"
Resource: "*"
tags:
description: dictionary of tags applied to the key
type: dict
returned: always
sample:
Name: myKey
Purpose: protecting_stuff
grants:
description: list of grants associated with a key
type: complex
returned: always
contains:
constraints:
description: Constraints on the encryption context that the grant allows.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
type: dict
returned: always
sample:
encryption_context_equals:
"aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
creation_date:
description: Date of creation of the grant
type: str
returned: always
sample: 2017-04-18T15:12:08+10:00
grant_id:
description: The unique ID for the grant
type: str
returned: always
sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
grantee_principal:
description: The principal that receives the grant's permissions
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
issuing_account:
description: The AWS account under which the grant was issued
type: str
returned: always
sample: arn:aws:iam::01234567890:root
key_id:
description: The key ARN to which the grant applies.
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
name:
description: The friendly name that identifies the grant
type: str
returned: always
sample: xyz
operations:
description: The list of operations permitted by the grant
type: list
returned: always
sample:
- Decrypt
- RetireGrant
retiring_principal:
description: The principal that can retire the grant
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
changes_needed:
description: grant types that would be changed/were changed.
type: dict
returned: always
sample: { "role": "add", "role grant": "add" }
had_invalid_entries:
description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made.
type: bool
returned: always
'''
# these mappings are used to go from simple labels to the actual 'Sid' values returned
# by get_policy. They seem to be magic values.
statement_label = {
'role': 'Allow use of the key',
'role grant': 'Allow attachment of persistent resources',
'admin': 'Allow access for Key Administrators'
}
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import compare_aws_tags, compare_policies
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
import traceback
import json
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_iam_roles_with_backoff(connection):
paginator = connection.get_paginator('list_roles')
return paginator.paginate().build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator('list_keys')
return paginator.paginate().build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_aliases_with_backoff(connection):
paginator = connection.get_paginator('list_aliases')
return paginator.paginate().build_full_result()
def get_kms_aliases_lookup(connection):
_aliases = dict()
for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
# Not all aliases are actually associated with a key
if 'TargetKeyId' in alias:
# strip off leading 'alias/' and add it to key's aliases
if alias['TargetKeyId'] in _aliases:
_aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
else:
_aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
return _aliases
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_tags_with_backoff(connection, key_id, **kwargs):
return connection.list_resource_tags(KeyId=key_id, **kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_grants_with_backoff(connection, key_id):
params = dict(KeyId=key_id)
paginator = connection.get_paginator('list_grants')
return paginator.paginate(**params).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_metadata_with_backoff(connection, key_id):
return connection.describe_key(KeyId=key_id)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_key_policies_with_backoff(connection, key_id):
paginator = connection.get_paginator('list_key_policies')
return paginator.paginate(KeyId=key_id).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_key_policy_with_backoff(connection, key_id, policy_name):
return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
def get_kms_tags(connection, module, key_id):
# Handle pagination here as list_resource_tags does not have
# a paginator
kwargs = {}
tags = []
more = True
while more:
try:
tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
tags.extend(tag_response['Tags'])
except is_boto3_error_code('AccessDeniedException'):
tag_response = {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to obtain key tags")
if tag_response.get('NextMarker'):
kwargs['Marker'] = tag_response['NextMarker']
else:
more = False
return tags
def get_kms_policies(connection, module, key_id):
try:
policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
policy in policies]
except is_boto3_error_code('AccessDeniedException'):
return []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to obtain key policies")
def key_matches_filter(key, filtr):
if filtr[0] == 'key-id':
return filtr[1] == key['key_id']
if filtr[0] == 'tag-key':
return filtr[1] in key['tags']
if filtr[0] == 'tag-value':
return filtr[1] in key['tags'].values()
if filtr[0] == 'alias':
return filtr[1] in key['aliases']
if filtr[0].startswith('tag:'):
return key['Tags'][filtr[0][4:]] == filtr[1]
def key_matches_filters(key, filters):
if not filters:
return True
else:
return all([key_matches_filter(key, filtr) for filtr in filters.items()])
def camel_to_snake_grant(grant):
''' camel_to_snake_grant snakifies everything except the encryption context '''
constraints = grant.get('Constraints', {})
result = camel_dict_to_snake_dict(grant)
if 'EncryptionContextEquals' in constraints:
result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals']
if 'EncryptionContextSubset' in constraints:
result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset']
return result
def get_key_details(connection, module, key_id):
try:
result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain key metadata")
result['KeyArn'] = result.pop('Arn')
try:
aliases = get_kms_aliases_lookup(connection)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain aliases")
result['aliases'] = aliases.get(result['KeyId'], [])
result = camel_dict_to_snake_dict(result)
# grants and tags get snakified differently
try:
result['grants'] = [camel_to_snake_grant(grant) for grant in
get_kms_grants_with_backoff(connection, key_id)['Grants']]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain key grants")
tags = get_kms_tags(connection, module, key_id)
result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
result['policies'] = get_kms_policies(connection, module, key_id)
return result
def get_kms_facts(connection, module):
try:
keys = get_kms_keys_with_backoff(connection)['Keys']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to obtain keys")
return [get_key_details(connection, module, key['KeyId']) for key in keys]
def convert_grant_params(grant, key):
grant_params = dict(KeyId=key['key_arn'],
GranteePrincipal=grant['grantee_principal'])
if grant.get('operations'):
grant_params['Operations'] = grant['operations']
if grant.get('retiring_principal'):
grant_params['RetiringPrincipal'] = grant['retiring_principal']
if grant.get('name'):
grant_params['Name'] = grant['name']
if grant.get('constraints'):
grant_params['Constraints'] = dict()
if grant['constraints'].get('encryption_context_subset'):
grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset']
if grant['constraints'].get('encryption_context_equals'):
grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals']
return grant_params
def different_grant(existing_grant, desired_grant):
if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'):
return True
if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'):
return True
if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')):
return True
if existing_grant.get('constraints') != desired_grant.get('constraints'):
return True
return False
def compare_grants(existing_grants, desired_grants, purge_grants=False):
existing_dict = dict((eg['name'], eg) for eg in existing_grants)
desired_dict = dict((dg['name'], dg) for dg in desired_grants)
to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys())
if purge_grants:
to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys())
else:
to_remove_keys = set()
to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys())
for candidate in to_change_candidates:
if different_grant(existing_dict[candidate], desired_dict[candidate]):
to_add_keys.add(candidate)
to_remove_keys.add(candidate)
to_add = []
to_remove = []
for key in to_add_keys:
grant = desired_dict[key]
to_add.append(grant)
for key in to_remove_keys:
grant = existing_dict[key]
to_remove.append(grant)
return to_add, to_remove
def ensure_enabled_disabled(connection, module, key):
changed = False
if key['key_state'] == 'Disabled' and module.params['enabled']:
try:
connection.enable_key(KeyId=key['key_arn'])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to enable key")
if key['key_state'] == 'Enabled' and not module.params['enabled']:
try:
connection.disable_key(KeyId=key['key_arn'])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to disable key")
return changed
def update_alias(connection, module, key_id, alias):
if not alias.startswith('alias/'):
alias = 'alias/' + alias
aliases = get_kms_aliases_with_backoff(connection)['Aliases']
if key_id:
# We will only add new aliases, not rename existing ones
if alias not in [_alias['AliasName'] for _alias in aliases]:
try:
connection.create_alias(KeyId=key_id, AliasName=alias)
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed create key alias")
return False
def update_key(connection, module, key):
changed = False
alias = module.params.get('alias')
key_id = key['key_arn']
if alias:
changed = update_alias(connection, module, key_id, alias) or changed
if key['key_state'] == 'PendingDeletion':
try:
connection.cancel_key_deletion(KeyId=key_id)
# key is disabled after deletion cancellation
# set this so that ensure_enabled_disabled works correctly
key['key_state'] = 'Disabled'
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to cancel key deletion")
changed = ensure_enabled_disabled(connection, module, key) or changed
description = module.params.get('description')
# don't update description if description is not set
# (means you can't remove a description completely)
if description and key['description'] != description:
try:
connection.update_key_description(KeyId=key_id, Description=description)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update key description")
desired_tags = module.params.get('tags')
to_add, to_remove = compare_aws_tags(key['tags'], desired_tags,
module.params.get('purge_tags'))
if to_remove:
try:
connection.untag_resource(KeyId=key_id, TagKeys=to_remove)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to remove or update tag")
if to_add:
try:
connection.tag_resource(KeyId=key_id,
Tags=[{'TagKey': tag_key, 'TagValue': desired_tags[tag_key]}
for tag_key in to_add])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to add tag to key")
# Update existing policy before trying to tweak grants
if module.params.get('policy'):
policy = module.params.get('policy')
try:
keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
# If we can't fetch the current policy assume we're making a change
# Could occur if we have PutKeyPolicy without GetKeyPolicy
original_policy = {}
original_policy = json.loads(keyret['Policy'])
try:
new_policy = json.loads(policy)
except ValueError as e:
module.fail_json_aws(e, msg="Unable to parse new policy as JSON")
if compare_policies(original_policy, new_policy):
changed = True
if not module.check_mode:
try:
connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to update key policy")
desired_grants = module.params.get('grants')
existing_grants = key['grants']
to_add, to_remove = compare_grants(existing_grants, desired_grants,
module.params.get('purge_grants'))
if to_remove:
for grant in to_remove:
try:
connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id'])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to retire grant")
if to_add:
for grant in to_add:
grant_params = convert_grant_params(grant, key)
try:
connection.create_grant(**grant_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to create grant")
# make results consistent with kms_facts before returning
result = get_key_details(connection, module, key_id)
module.exit_json(changed=changed, **result)
def create_key(connection, module):
params = dict(BypassPolicyLockoutSafetyCheck=False,
Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'),
KeyUsage='ENCRYPT_DECRYPT',
Origin='AWS_KMS')
if module.params.get('description'):
params['Description'] = module.params['description']
if module.params.get('policy'):
params['Policy'] = module.params['policy']
try:
result = connection.create_key(**params)['KeyMetadata']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create initial key")
key = get_key_details(connection, module, result['KeyId'])
alias = module.params['alias']
if not alias.startswith('alias/'):
alias = 'alias/' + alias
try:
connection.create_alias(AliasName=alias, TargetKeyId=key['key_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create alias")
ensure_enabled_disabled(connection, module, key)
for grant in module.params.get('grants'):
grant_params = convert_grant_params(grant, key)
try:
connection.create_grant(**grant_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to add grant to key")
# make results consistent with kms_facts
result = get_key_details(connection, module, key['key_id'])
module.exit_json(changed=True, **result)
def delete_key(connection, module, key_metadata, key_id):
changed = False
if key_metadata['KeyState'] != 'PendingDeletion':
try:
connection.schedule_key_deletion(KeyId=key_id)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to schedule key for deletion")
result = get_key_details(connection, module, key_id)
module.exit_json(changed=changed, **result)
def get_arn_from_kms_alias(kms, aliasname):
ret = kms.list_aliases()
key_id = None
for a in ret['Aliases']:
if a['AliasName'] == aliasname:
key_id = a['TargetKeyId']
break
if not key_id:
raise Exception('could not find alias {0}'.format(aliasname))
# now that we have the ID for the key, we need to get the key's ARN. The alias
# has an ARN but we need the key itself.
ret = kms.list_keys()
for k in ret['Keys']:
if k['KeyId'] == key_id:
return k['KeyArn']
raise Exception('could not find key from id: {0}'.format(key_id))
def get_arn_from_role_name(iam, rolename):
ret = iam.get_role(RoleName=rolename)
if ret.get('Role') and ret['Role'].get('Arn'):
return ret['Role']['Arn']
raise Exception('could not find arn for name {0}.'.format(rolename))
def do_policy_grant(module, kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
ret = {}
keyret = get_key_policy_with_backoff(kms, keyarn, 'default')
policy = json.loads(keyret['Policy'])
changes_needed = {}
assert_policy_shape(policy)
had_invalid_entries = False
for statement in policy['Statement']:
for granttype in ['role', 'role grant', 'admin']:
# do we want this grant type? Are we on its statement?
# and does the role have this grant type?
# create Principal and 'AWS' so we can safely use them later.
if not isinstance(statement.get('Principal'), dict):
statement['Principal'] = dict()
if 'AWS' in statement['Principal'] and isinstance(statement['Principal']['AWS'], string_types):
# convert to list
statement['Principal']['AWS'] = [statement['Principal']['AWS']]
if not isinstance(statement['Principal'].get('AWS'), list):
statement['Principal']['AWS'] = list()
if mode == 'grant' and statement['Sid'] == statement_label[granttype]:
# we're granting and we recognize this statement ID.
if granttype in granttypes:
invalid_entries = [item for item in statement['Principal']['AWS'] if not item.startswith('arn:aws:iam::')]
if clean_invalid_entries and invalid_entries:
# we have bad/invalid entries. These are roles that were deleted.
# prune the list.
valid_entries = [item for item in statement['Principal']['AWS'] if item.startswith('arn:aws:iam::')]
statement['Principal']['AWS'] = valid_entries
had_invalid_entries = True
if role_arn not in statement['Principal']['AWS']: # needs to be added.
changes_needed[granttype] = 'add'
if not dry_run:
statement['Principal']['AWS'].append(role_arn)
elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
changes_needed[granttype] = 'remove'
if not dry_run:
statement['Principal']['AWS'].remove(role_arn)
elif mode == 'deny' and statement['Sid'] == statement_label[granttype] and role_arn in statement['Principal']['AWS']:
# we don't selectively deny. that's a grant with a
# smaller list. so deny=remove all of this arn.
changes_needed[granttype] = 'remove'
if not dry_run:
statement['Principal']['AWS'].remove(role_arn)
try:
if len(changes_needed) and not dry_run:
policy_json_string = json.dumps(policy)
kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
# returns nothing, so we have to just assume it didn't throw
ret['changed'] = True
except Exception as e:
module.fail_json(msg='Could not update key_policy', new_policy=policy_json_string, details=to_native(e), exception=traceback.format_exc())
raise
ret['changes_needed'] = changes_needed
ret['had_invalid_entries'] = had_invalid_entries
ret['new_policy'] = policy
if dry_run:
# true if changes > 0
ret['changed'] = len(changes_needed) > 0
return ret
def assert_policy_shape(policy):
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
errors = []
if policy['Version'] != "2012-10-17":
errors.append('Unknown version/date ({0}) of policy. Things are probably different than we assumed they were.'.format(policy['Version']))
found_statement_type = {}
for statement in policy['Statement']:
for label, sidlabel in statement_label.items():
if statement['Sid'] == sidlabel:
found_statement_type[label] = True
for statementtype in statement_label.keys():
if not found_statement_type.get(statementtype):
errors.append('Policy is missing {0}.'.format(statementtype))
if len(errors):
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {0}'.format(' '.join(errors)) + "\n" + str(policy))
return None
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
alias=dict(aliases=['key_alias']),
policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'),
policy_role_name=dict(aliases=['role_name']),
policy_role_arn=dict(aliases=['role_arn']),
policy_grant_types=dict(aliases=['grant_types'], type='list'),
policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True),
key_id=dict(aliases=['key_arn']),
description=dict(),
enabled=dict(type='bool', default=True),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False),
grants=dict(type='list', default=[]),
policy=dict(),
purge_grants=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleAWSModule(
supports_check_mode=True,
argument_spec=argument_spec,
required_one_of=[['alias', 'key_id']],
)
result = {}
mode = module.params['policy_mode']
kms = module.client('kms')
iam = module.client('iam')
key_id = module.params.get('key_id')
alias = module.params.get('alias')
if alias and alias.startswith('alias/'):
alias = alias[6:]
# Fetch/Canonicalize key_id where possible
if key_id:
try:
# Don't use get_key_details it triggers module.fail when the key
# doesn't exist
key_metadata = get_kms_metadata_with_backoff(kms, key_id)['KeyMetadata']
key_id = key_metadata['Arn']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
# We can't create keys with a specific ID, if we can't access the
# key we'll have to fail
if module.params.get('state') == 'present':
module.fail_json(msg="Could not find key with id %s to update")
key_metadata = None
elif alias:
try:
key_metadata = get_kms_metadata_with_backoff(kms, 'alias/%s' % alias)['KeyMetadata']
key_id = key_metadata['Arn']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
key_metadata = None
if module.params.get('policy_grant_types') or mode == 'deny':
module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
' and has been deprecated in favour of the policy option.', version='2.13')
if module.params.get('policy_role_name') and not module.params.get('policy_role_arn'):
module.params['policy_role_arn'] = get_arn_from_role_name(iam, module.params['policy_role_name'])
if not module.params.get('policy_role_arn'):
module.fail_json(msg='policy_role_arn or policy_role_name is required to {0}'.format(module.params['policy_mode']))
# check the grant types for 'grant' only.
if mode == 'grant':
for g in module.params['policy_grant_types']:
if g not in statement_label:
module.fail_json(msg='{0} is an unknown grant type.'.format(g))
ret = do_policy_grant(module, kms,
key_id,
module.params['policy_role_arn'],
module.params['policy_grant_types'],
mode=mode,
dry_run=module.check_mode,
clean_invalid_entries=module.params['policy_clean_invalid_entries'])
result.update(ret)
module.exit_json(**result)
else:
if module.params.get('state') == 'present':
if key_metadata:
key_details = get_key_details(kms, module, key_id)
update_key(kms, module, key_details)
else:
if key_id:
module.fail_json(msg="Could not find key with id %s to update" % key_id)
else:
create_key(kms, module)
else:
if key_metadata:
delete_key(kms, module, key_metadata, key_id)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Chris Wesseling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from libqtile import layout
import libqtile.manager
import libqtile.config
from ..conftest import no_xinerama
from .layout_utils import assertFocused, assertFocusPath
class StackConfig(object):
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Stack(num_stacks=2),
layout.Stack(num_stacks=1),
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
screens = []
follow_mouse_focus = False
stack_config = lambda x: \
no_xinerama(pytest.mark.parametrize("qtile", [StackConfig], indirect=True)(x))
def _stacks(self):
stacks = []
for i in self.c.layout.info()["stacks"]:
windows = i["clients"]
current = i["current"]
stacks.append(windows[current:] + windows[:current])
return stacks
@stack_config
def test_stack_commands(qtile):
assert qtile.c.layout.info()["current_stack"] == 0
qtile.testWindow("one")
assert _stacks(qtile) == [["one"], []]
assert qtile.c.layout.info()["current_stack"] == 0
qtile.testWindow("two")
assert _stacks(qtile) == [["one"], ["two"]]
assert qtile.c.layout.info()["current_stack"] == 1
qtile.testWindow("three")
assert _stacks(qtile) == [["one"], ["three", "two"]]
assert qtile.c.layout.info()["current_stack"] == 1
qtile.c.layout.delete()
assert _stacks(qtile) == [["one", "three", "two"]]
info = qtile.c.groups()["a"]
assert info["focus"] == "one"
qtile.c.layout.delete()
assert len(_stacks(qtile)) == 1
qtile.c.layout.add()
assert _stacks(qtile) == [["one", "three", "two"], []]
qtile.c.layout.rotate()
assert _stacks(qtile) == [[], ["one", "three", "two"]]
@stack_config
def test_stack_cmd_down(qtile):
qtile.c.layout.down()
@stack_config
def test_stack_addremove(qtile):
one = qtile.testWindow("one")
qtile.c.layout.next()
two = qtile.testWindow("two")
three = qtile.testWindow("three")
assert _stacks(qtile) == [['one'], ['three', 'two']]
assert qtile.c.layout.info()["current_stack"] == 1
qtile.kill_window(three)
assert qtile.c.layout.info()["current_stack"] == 1
qtile.kill_window(two)
assert qtile.c.layout.info()["current_stack"] == 0
qtile.c.layout.next()
two = qtile.testWindow("two")
qtile.c.layout.next()
assert qtile.c.layout.info()["current_stack"] == 0
qtile.kill_window(one)
assert qtile.c.layout.info()["current_stack"] == 1
@stack_config
def test_stack_rotation(qtile):
qtile.c.layout.delete()
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("three")
assert _stacks(qtile) == [["three", "two", "one"]]
qtile.c.layout.down()
assert _stacks(qtile) == [["one", "three", "two"]]
qtile.c.layout.up()
assert _stacks(qtile) == [["three", "two", "one"]]
qtile.c.layout.down()
qtile.c.layout.down()
assert _stacks(qtile) == [["two", "one", "three"]]
@stack_config
def test_stack_nextprev(qtile):
qtile.c.layout.add()
one = qtile.testWindow("one")
two = qtile.testWindow("two")
three = qtile.testWindow("three")
assert qtile.c.groups()["a"]["focus"] == "three"
qtile.c.layout.next()
assert qtile.c.groups()["a"]["focus"] == "one"
qtile.c.layout.previous()
assert qtile.c.groups()["a"]["focus"] == "three"
qtile.c.layout.previous()
assert qtile.c.groups()["a"]["focus"] == "two"
qtile.c.layout.next()
qtile.c.layout.next()
qtile.c.layout.next()
assert qtile.c.groups()["a"]["focus"] == "two"
qtile.kill_window(three)
qtile.c.layout.next()
assert qtile.c.groups()["a"]["focus"] == "one"
qtile.c.layout.previous()
assert qtile.c.groups()["a"]["focus"] == "two"
qtile.c.layout.next()
qtile.kill_window(two)
qtile.c.layout.next()
assert qtile.c.groups()["a"]["focus"] == "one"
qtile.kill_window(one)
qtile.c.layout.next()
assert qtile.c.groups()["a"]["focus"] is None
qtile.c.layout.previous()
assert qtile.c.groups()["a"]["focus"] is None
@stack_config
def test_stack_window_removal(qtile):
qtile.c.layout.next()
one = qtile.testWindow("one")
two = qtile.testWindow("two")
qtile.c.layout.down()
qtile.kill_window(two)
@stack_config
def test_stack_split(qtile):
one = qtile.testWindow("one")
two = qtile.testWindow("two")
three = qtile.testWindow("three")
stacks = qtile.c.layout.info()["stacks"]
assert not stacks[1]["split"]
qtile.c.layout.toggle_split()
stacks = qtile.c.layout.info()["stacks"]
assert stacks[1]["split"]
@stack_config
def test_stack_shuffle(qtile):
qtile.c.next_layout()
one = qtile.testWindow("one")
two = qtile.testWindow("two")
three = qtile.testWindow("three")
stack = qtile.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
for i in range(5):
qtile.c.layout.shuffle_up()
stack = qtile.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
for i in range(5):
qtile.c.layout.shuffle_down()
stack = qtile.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
@stack_config
def test_stack_client_to(qtile):
one = qtile.testWindow("one")
two = qtile.testWindow("two")
assert qtile.c.layout.info()["stacks"][0]["clients"] == ["one"]
qtile.c.layout.client_to_previous()
assert qtile.c.layout.info()["stacks"][0]["clients"] == ["two", "one"]
qtile.c.layout.client_to_previous()
assert qtile.c.layout.info()["stacks"][0]["clients"] == ["one"]
assert qtile.c.layout.info()["stacks"][1]["clients"] == ["two"]
qtile.c.layout.client_to_next()
assert qtile.c.layout.info()["stacks"][0]["clients"] == ["two", "one"]
@stack_config
def test_stack_info(qtile):
one = qtile.testWindow("one")
assert qtile.c.layout.info()["stacks"]
@stack_config
def test_stack_window_focus_cycle(qtile):
# setup 3 tiled and two floating clients
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("float1")
qtile.c.window.toggle_floating()
qtile.testWindow("float2")
qtile.c.window.toggle_floating()
qtile.testWindow("three")
# test preconditions, stack adds clients at pos of current
assert qtile.c.layout.info()['clients'] == ['three', 'one', 'two']
# last added window has focus
assertFocused(qtile, "three")
# assert window focus cycle, according to order in layout
assertFocusPath(qtile, 'one', 'two', 'float1', 'float2', 'three')
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import zlib
from autobahn.websocket.compress_base import PerMessageCompressOffer, \
PerMessageCompressOfferAccept, \
PerMessageCompressResponse, \
PerMessageCompressResponseAccept, \
PerMessageCompress
__all__ = (
'PerMessageDeflateMixin',
'PerMessageDeflateOffer',
'PerMessageDeflateOfferAccept',
'PerMessageDeflateResponse',
'PerMessageDeflateResponseAccept',
'PerMessageDeflate',
)
class PerMessageDeflateMixin(object):
"""
Mixin class for this extension.
"""
EXTENSION_NAME = "permessage-deflate"
"""
Name of this WebSocket extension.
"""
WINDOW_SIZE_PERMISSIBLE_VALUES = [8, 9, 10, 11, 12, 13, 14, 15]
"""
Permissible value for window size parameter.
Higher values use more memory, but produce smaller output. The default is 15.
"""
MEM_LEVEL_PERMISSIBLE_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
Permissible value for memory level parameter.
Higher values use more memory, but are faster and produce smaller output. The default is 8.
"""
class PerMessageDeflateOffer(PerMessageCompressOffer, PerMessageDeflateMixin):
"""
Set of extension parameters for `permessage-deflate` WebSocket extension
offered by a client to a server.
"""
@classmethod
def parse(cls, params):
"""
Parses a WebSocket extension offer for `permessage-deflate` provided by a client to a server.
:param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: object -- A new instance of :class:`autobahn.compress.PerMessageDeflateOffer`.
"""
# extension parameter defaults
acceptMaxWindowBits = False
acceptNoContextTakeover = True
# acceptNoContextTakeover = False # FIXME: this may change in draft
requestMaxWindowBits = 0
requestNoContextTakeover = False
# verify/parse client ("client-to-server direction") parameters of permessage-deflate offer
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
val = params[p][0]
if p == 'client_max_window_bits':
##
# see: https://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-18
# 8.1.2.2. client_max_window_bits
# ".. This parameter has no value or a decimal integer value without
# leading zeroes between 8 to 15 inclusive ..""
# noinspection PySimplifyBooleanCheck
if val is not True:
try:
val = int(val)
except:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
if val not in PerMessageDeflateMixin.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
# FIXME (maybe): possibly forward/process the client hint!
# acceptMaxWindowBits = val
acceptMaxWindowBits = True
else:
acceptMaxWindowBits = True
elif p == 'client_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
acceptNoContextTakeover = True
elif p == 'server_max_window_bits':
try:
val = int(val)
except:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
if val not in PerMessageDeflateMixin.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
requestMaxWindowBits = val
elif p == 'server_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
requestNoContextTakeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
offer = cls(acceptNoContextTakeover,
acceptMaxWindowBits,
requestNoContextTakeover,
requestMaxWindowBits)
return offer
def __init__(self,
acceptNoContextTakeover=True,
acceptMaxWindowBits=True,
requestNoContextTakeover=False,
requestMaxWindowBits=0):
"""
Constructor.
:param acceptNoContextTakeover: Iff true, client accepts "no context takeover" feature.
:type acceptNoContextTakeover: bool
:param acceptMaxWindowBits: Iff true, client accepts setting "max window size".
:type acceptMaxWindowBits: bool
:param requestNoContextTakeover: Iff true, client request "no context takeover" feature.
:type requestNoContextTakeover: bool
:param requestMaxWindowBits: Iff non-zero, client requests given "max window size" - must be 8-15.
:type requestMaxWindowBits: int
"""
if type(acceptNoContextTakeover) != bool:
raise Exception("invalid type %s for acceptNoContextTakeover" % type(acceptNoContextTakeover))
self.acceptNoContextTakeover = acceptNoContextTakeover
if type(acceptMaxWindowBits) != bool:
raise Exception("invalid type %s for acceptMaxWindowBits" % type(acceptMaxWindowBits))
self.acceptMaxWindowBits = acceptMaxWindowBits
if type(requestNoContextTakeover) != bool:
raise Exception("invalid type %s for requestNoContextTakeover" % type(requestNoContextTakeover))
self.requestNoContextTakeover = requestNoContextTakeover
if requestMaxWindowBits != 0 and requestMaxWindowBits not in self.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for requestMaxWindowBits - permissible values %s" % (requestMaxWindowBits, self.WINDOW_SIZE_PERMISSIBLE_VALUES))
self.requestMaxWindowBits = requestMaxWindowBits
def getExtensionString(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: str -- PMCE configuration string.
"""
pmceString = self.EXTENSION_NAME
if self.acceptNoContextTakeover:
pmceString += "; client_no_context_takeover"
if self.acceptMaxWindowBits:
pmceString += "; client_max_window_bits"
if self.requestNoContextTakeover:
pmceString += "; server_no_context_takeover"
if self.requestMaxWindowBits != 0:
pmceString += "; server_max_window_bits=%d" % self.requestMaxWindowBits
return pmceString
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable representation.
"""
return {'extension': self.EXTENSION_NAME,
'acceptNoContextTakeover': self.acceptNoContextTakeover,
'acceptMaxWindowBits': self.acceptMaxWindowBits,
'requestNoContextTakeover': self.requestNoContextTakeover,
'requestMaxWindowBits': self.requestMaxWindowBits}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageDeflateOffer(acceptNoContextTakeover = %s, acceptMaxWindowBits = %s, requestNoContextTakeover = %s, requestMaxWindowBits = %s)" % (self.acceptNoContextTakeover, self.acceptMaxWindowBits, self.requestNoContextTakeover, self.requestMaxWindowBits)
class PerMessageDeflateOfferAccept(PerMessageCompressOfferAccept, PerMessageDeflateMixin):
"""
Set of parameters with which to accept an `permessage-deflate` offer
from a client by a server.
"""
def __init__(self,
offer,
requestNoContextTakeover=False,
requestMaxWindowBits=0,
noContextTakeover=None,
windowBits=None,
memLevel=None):
"""
Constructor.
:param offer: The offer being accepted.
:type offer: Instance of :class:`autobahn.compress.PerMessageDeflateOffer`.
:param requestNoContextTakeover: Iff true, server request "no context takeover" feature.
:type requestNoContextTakeover: bool
:param requestMaxCompressLevel: Iff non-zero, server requests given "maximum compression level" - must be 1-9.
:type requestMaxCompressLevel: int
:param noContextTakeover: Override server ("server-to-client direction") context takeover (this must be compatible with offer).
:type noContextTakeover: bool
:param windowBits: Override server ("server-to-client direction") window size (this must be compatible with offer).
:type windowBits: int
:param memLevel: Set server ("server-to-client direction") memory level.
:type memLevel: int
"""
if not isinstance(offer, PerMessageDeflateOffer):
raise Exception("invalid type %s for offer" % type(offer))
self.offer = offer
if type(requestNoContextTakeover) != bool:
raise Exception("invalid type %s for requestNoContextTakeover" % type(requestNoContextTakeover))
if requestNoContextTakeover and not offer.acceptNoContextTakeover:
raise Exception("invalid value %s for requestNoContextTakeover - feature unsupported by client" % requestNoContextTakeover)
self.requestNoContextTakeover = requestNoContextTakeover
if requestMaxWindowBits != 0 and requestMaxWindowBits not in self.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for requestMaxWindowBits - permissible values %s" % (requestMaxWindowBits, self.WINDOW_SIZE_PERMISSIBLE_VALUES))
if requestMaxWindowBits != 0 and not offer.acceptMaxWindowBits:
raise Exception("invalid value %s for requestMaxWindowBits - feature unsupported by client" % requestMaxWindowBits)
self.requestMaxWindowBits = requestMaxWindowBits
if noContextTakeover is not None:
if type(noContextTakeover) != bool:
raise Exception("invalid type %s for noContextTakeover" % type(noContextTakeover))
if offer.requestNoContextTakeover and not noContextTakeover:
raise Exception("invalid value %s for noContextTakeover - client requested feature" % noContextTakeover)
self.noContextTakeover = noContextTakeover
if windowBits is not None:
if windowBits not in self.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for windowBits - permissible values %s" % (windowBits, self.WINDOW_SIZE_PERMISSIBLE_VALUES))
if offer.requestMaxWindowBits != 0 and windowBits > offer.requestMaxWindowBits:
raise Exception("invalid value %s for windowBits - client requested lower maximum value" % windowBits)
self.windowBits = windowBits
if memLevel is not None:
if memLevel not in self.MEM_LEVEL_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for memLevel - permissible values %s" % (memLevel, self.MEM_LEVEL_PERMISSIBLE_VALUES))
self.memLevel = memLevel
def getExtensionString(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: str -- PMCE configuration string.
"""
pmceString = self.EXTENSION_NAME
if self.offer.requestNoContextTakeover:
pmceString += "; server_no_context_takeover"
if self.offer.requestMaxWindowBits != 0:
pmceString += "; server_max_window_bits=%d" % self.offer.requestMaxWindowBits
if self.requestNoContextTakeover:
pmceString += "; client_no_context_takeover"
if self.requestMaxWindowBits != 0:
pmceString += "; client_max_window_bits=%d" % self.requestMaxWindowBits
return pmceString
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable representation.
"""
return {'extension': self.EXTENSION_NAME,
'offer': self.offer.__json__(),
'requestNoContextTakeover': self.requestNoContextTakeover,
'requestMaxWindowBits': self.requestMaxWindowBits,
'noContextTakeover': self.noContextTakeover,
'windowBits': self.windowBits,
'memLevel': self.memLevel}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageDeflateOfferAccept(offer = %s, requestNoContextTakeover = %s, requestMaxWindowBits = %s, noContextTakeover = %s, windowBits = %s, memLevel = %s)" % (self.offer.__repr__(), self.requestNoContextTakeover, self.requestMaxWindowBits, self.noContextTakeover, self.windowBits, self.memLevel)
class PerMessageDeflateResponse(PerMessageCompressResponse, PerMessageDeflateMixin):
"""
Set of parameters for `permessage-deflate` responded by server.
"""
@classmethod
def parse(cls, params):
"""
Parses a WebSocket extension response for `permessage-deflate` provided by a server to a client.
:param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: object -- A new instance of :class:`autobahn.compress.PerMessageDeflateResponse`.
"""
client_max_window_bits = 0
client_no_context_takeover = False
server_max_window_bits = 0
server_no_context_takeover = False
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
val = params[p][0]
if p == 'client_max_window_bits':
try:
val = int(val)
except:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
if val not in PerMessageDeflateMixin.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
client_max_window_bits = val
elif p == 'client_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
client_no_context_takeover = True
elif p == 'server_max_window_bits':
try:
val = int(val)
except:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
if val not in PerMessageDeflateMixin.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
server_max_window_bits = val
elif p == 'server_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
server_no_context_takeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
response = cls(client_max_window_bits,
client_no_context_takeover,
server_max_window_bits,
server_no_context_takeover)
return response
def __init__(self,
client_max_window_bits,
client_no_context_takeover,
server_max_window_bits,
server_no_context_takeover):
self.client_max_window_bits = client_max_window_bits
self.client_no_context_takeover = client_no_context_takeover
self.server_max_window_bits = server_max_window_bits
self.server_no_context_takeover = server_no_context_takeover
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable representation.
"""
return {'extension': self.EXTENSION_NAME,
'client_max_window_bits': self.client_max_window_bits,
'client_no_context_takeover': self.client_no_context_takeover,
'server_max_window_bits': self.server_max_window_bits,
'server_no_context_takeover': self.server_no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageDeflateResponse(client_max_window_bits = %s, client_no_context_takeover = %s, server_max_window_bits = %s, server_no_context_takeover = %s)" % (self.client_max_window_bits, self.client_no_context_takeover, self.server_max_window_bits, self.server_no_context_takeover)
class PerMessageDeflateResponseAccept(PerMessageCompressResponseAccept, PerMessageDeflateMixin):
"""
Set of parameters with which to accept an `permessage-deflate` response
from a server by a client.
"""
def __init__(self,
response,
noContextTakeover=None,
windowBits=None,
memLevel=None):
"""
Constructor.
:param response: The response being accepted.
:type response: Instance of :class:`autobahn.compress.PerMessageDeflateResponse`.
:param noContextTakeover: Override client ("client-to-server direction") context takeover (this must be compatible with response).
:type noContextTakeover: bool
:param windowBits: Override client ("client-to-server direction") window size (this must be compatible with response).
:type windowBits: int
:param memLevel: Set client ("client-to-server direction") memory level.
:type memLevel: int
"""
if not isinstance(response, PerMessageDeflateResponse):
raise Exception("invalid type %s for response" % type(response))
self.response = response
if noContextTakeover is not None:
if type(noContextTakeover) != bool:
raise Exception("invalid type %s for noContextTakeover" % type(noContextTakeover))
if response.client_no_context_takeover and not noContextTakeover:
raise Exception("invalid value %s for noContextTakeover - server requested feature" % noContextTakeover)
self.noContextTakeover = noContextTakeover
if windowBits is not None:
if windowBits not in self.WINDOW_SIZE_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for windowBits - permissible values %s" % (windowBits, self.WINDOW_SIZE_PERMISSIBLE_VALUES))
if response.client_max_window_bits != 0 and windowBits > response.client_max_window_bits:
raise Exception("invalid value %s for windowBits - server requested lower maximum value" % windowBits)
self.windowBits = windowBits
if memLevel is not None:
if memLevel not in self.MEM_LEVEL_PERMISSIBLE_VALUES:
raise Exception("invalid value %s for memLevel - permissible values %s" % (memLevel, self.MEM_LEVEL_PERMISSIBLE_VALUES))
self.memLevel = memLevel
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable representation.
"""
return {'extension': self.EXTENSION_NAME,
'response': self.response.__json__(),
'noContextTakeover': self.noContextTakeover,
'windowBits': self.windowBits,
'memLevel': self.memLevel}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageDeflateResponseAccept(response = %s, noContextTakeover = %s, windowBits = %s, memLevel = %s)" % (self.response.__repr__(), self.noContextTakeover, self.windowBits, self.memLevel)
# noinspection PyArgumentList
class PerMessageDeflate(PerMessageCompress, PerMessageDeflateMixin):
"""
`permessage-deflate` WebSocket extension processor.
"""
DEFAULT_WINDOW_BITS = zlib.MAX_WBITS
DEFAULT_MEM_LEVEL = 8
@classmethod
def createFromResponseAccept(cls, isServer, accept):
# accept: instance of PerMessageDeflateResponseAccept
pmce = cls(isServer,
accept.response.server_no_context_takeover,
accept.noContextTakeover if accept.noContextTakeover is not None else accept.response.client_no_context_takeover,
accept.response.server_max_window_bits,
accept.windowBits if accept.windowBits is not None else accept.response.client_max_window_bits,
accept.memLevel)
return pmce
@classmethod
def createFromOfferAccept(cls, isServer, accept):
# accept: instance of PerMessageDeflateOfferAccept
pmce = cls(isServer,
accept.noContextTakeover if accept.noContextTakeover is not None else accept.offer.requestNoContextTakeover,
accept.requestNoContextTakeover,
accept.windowBits if accept.windowBits is not None else accept.offer.requestMaxWindowBits,
accept.requestMaxWindowBits,
accept.memLevel)
return pmce
def __init__(self,
isServer,
server_no_context_takeover,
client_no_context_takeover,
server_max_window_bits,
client_max_window_bits,
mem_level):
self._isServer = isServer
self.server_no_context_takeover = server_no_context_takeover
self.client_no_context_takeover = client_no_context_takeover
self.server_max_window_bits = server_max_window_bits if server_max_window_bits != 0 else self.DEFAULT_WINDOW_BITS
self.client_max_window_bits = client_max_window_bits if client_max_window_bits != 0 else self.DEFAULT_WINDOW_BITS
self.mem_level = mem_level if mem_level else self.DEFAULT_MEM_LEVEL
self._compressor = None
self._decompressor = None
def __json__(self):
return {'extension': self.EXTENSION_NAME,
'isServer': self._isServer,
'server_no_context_takeover': self.server_no_context_takeover,
'client_no_context_takeover': self.client_no_context_takeover,
'server_max_window_bits': self.server_max_window_bits,
'client_max_window_bits': self.client_max_window_bits,
'mem_level': self.mem_level}
def __repr__(self):
return "PerMessageDeflate(isServer = %s, server_no_context_takeover = %s, client_no_context_takeover = %s, server_max_window_bits = %s, client_max_window_bits = %s, mem_level = %s)" % (self._isServer, self.server_no_context_takeover, self.client_no_context_takeover, self.server_max_window_bits, self.client_max_window_bits, self.mem_level)
def startCompressMessage(self):
# compressobj([level[, method[, wbits[, memlevel[, strategy]]]]])
# http://bugs.python.org/issue19278
# http://hg.python.org/cpython/rev/c54c8e71b79a
if self._isServer:
if self._compressor is None or self.server_no_context_takeover:
self._compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -self.server_max_window_bits, self.mem_level)
else:
if self._compressor is None or self.client_no_context_takeover:
self._compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -self.client_max_window_bits, self.mem_level)
def compressMessageData(self, data):
return self._compressor.compress(data)
def endCompressMessage(self):
data = self._compressor.flush(zlib.Z_SYNC_FLUSH)
return data[:-4]
def startDecompressMessage(self):
if self._isServer:
if self._decompressor is None or self.client_no_context_takeover:
self._decompressor = zlib.decompressobj(-self.client_max_window_bits)
else:
if self._decompressor is None or self.server_no_context_takeover:
self._decompressor = zlib.decompressobj(-self.server_max_window_bits)
def decompressMessageData(self, data):
return self._decompressor.decompress(data)
def endDecompressMessage(self):
# Eat stripped LEN and NLEN field of a non-compressed block added
# for Z_SYNC_FLUSH.
self._decompressor.decompress(b'\x00\x00\xff\xff')
|
|
from __future__ import division
import math
import json
import itertools
import cPickle as pickle
from multiprocessing import Pool, cpu_count
from collections import namedtuple
from text_normalize import normalize_text, n_grams
from similarity import jaccard_sim, cosine_sim, page_sim
from ermdb import db_open, db_close, db_get
from ermcfg import TWEET_DB_FILENAME, MATCH_URL_DB_FILENAME
from ermcfg import ER_CENTROID_EN_DB_FILENAME
# Ngrams
Ngrams = namedtuple('Ngrams', ['uni', 'bi', 'tri', 'quad'])
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def get_related():
'''Read training set from matched URLs DB'''
related = {}
url_match_db = db_open(MATCH_URL_DB_FILENAME, readonly=True, create=False)
with url_match_db.begin(write=False) as txn:
cursor = txn.cursor()
for tweet_id, event_id in cursor:
related[tweet_id] = event_id
db_close(url_match_db)
return related
def get_data_raw(related, event_mult=8):
'''Load Tweet text, Article Title and Article Body'''
tweets = {}
titles = {}
bodies = {}
missing_articles = 0
print('\nLoading from DBs')
tweet_db = db_open(TWEET_DB_FILENAME, create=False, readonly=True)
article_db = db_open(ER_CENTROID_EN_DB_FILENAME, create=False,
readonly=True)
for tweet_id in related:
tweet_val = db_get(tweet_db, tweet_id)
tweet = json.loads(tweet_val)[u'text']
tweets[tweet_id] = tweet
# get article if necessary
event_id = related[tweet_id]
if event_id in titles:
tweets[tweet_id] = tweet
continue
article = db_get(article_db, event_id)
if article is None:
missing_articles += 1
continue
article = json.loads(article)
titles[event_id] = article['title']
bodies[event_id] = article['body']
total_articles = len(titles)
max_articles = total_articles * event_mult
print('Missing articles: %d' % (missing_articles,))
print('Matched articles: %d' % (total_articles,))
print('Matched tweeets: %d' % (len(tweets),))
# Load ALL articles (even unmatched articles)
with article_db.begin(write=False) as txn:
with txn.cursor() as curs:
for event_id, article in curs:
if event_id in titles:
continue
else:
article = json.loads(article)
titles[event_id] = article['title']
bodies[event_id] = article['body']
total_articles += 1
if total_articles >= max_articles:
break
# close the dbs
db_close(tweet_db)
db_close(article_db)
print('Matched tweeets: %d' % (len(tweets),))
print('Total articles loaded: %d' % (len(titles), ))
return (tweets, titles, bodies)
def generate_article_ngrams(article_data):
event_id, title_text, body_text = article_data
title_text = normalize_text(title_text)
title_ngrams = Ngrams(n_grams(title_text, 1), n_grams(title_text, 2),
n_grams(title_text, 3), n_grams(title_text, 4))
body_text = normalize_text(body_text)
body_ngrams = Ngrams(n_grams(body_text, 1), n_grams(body_text, 2),
n_grams(body_text, 3), n_grams(body_text, 4))
return (event_id, title_ngrams, body_ngrams)
def generate_tweet_ngrams(tweet_data, min_tokens=4):
tweet_id, tweet_text = tweet_data
tweet_text = normalize_text(tweet_text)
if len(tweet_text.split()) < min_tokens:
return None
tweet_ngrams = Ngrams(n_grams(tweet_text, 1), n_grams(tweet_text, 2),
n_grams(tweet_text, 3), n_grams(tweet_text, 4))
return (tweet_id, tweet_ngrams)
def get_ngrams(related, tweets, titles, bodies):
'''
Output is the ngrams for articles and tweets
'''
pool = Pool(cpu_count())
#
# Articles
#
print('\nNormalizing Article text and Generating Article Ngrams')
chunk_size = 1000
event_ids = titles.keys()
total_chunks = math.ceil(len(event_ids) / chunk_size)
n_chunks = 0
for chunk in grouper(chunk_size, event_ids):
data = [(event_id, titles[event_id], bodies[event_id])
for event_id in chunk]
results = pool.map(generate_article_ngrams, data)
for x in results:
event_id, title, body = x
titles[event_id] = title
bodies[event_id] = body
n_chunks += 1
p = (n_chunks / total_chunks) * 100
print('\r%f%%' % (p,))
#
# Tweets
#
tweets_new = dict()
print('\nNormalizing Tweet Text and Generating Tweet Ngrams')
chunk_size = 1000
tweet_ids = tweets.keys()
total_chunks = math.ceil(len(tweet_ids) / chunk_size)
n_chunks = 0
for chunk in grouper(chunk_size, tweet_ids):
data = [(tweet_id, tweets[tweet_id]) for tweet_id in chunk]
results = pool.map(generate_tweet_ngrams, data)
for x in results:
if x is not None:
tweet_id, tweet_ngrams = x
tweets_new[tweet_id] = tweet_ngrams
n_chunks += 1
p = (n_chunks / total_chunks) * 100
print('\r%f%%' % (p,))
# Update tweets and Related
tweets = tweets_new
for tweet_id in list(related.keys()):
if tweet_id not in tweets:
del related[tweet_id]
print('New Matched Size = %d' % (len(related),))
return (tweets, titles, bodies)
def calc_sims(tweet_grams, title_grams, body_grams, n):
sims = []
# title to tweet similarity
sims.append(jaccard_sim(tweet_grams, title_grams))
# tweet to body sims
sims.append(page_sim(body_grams, tweet_grams))
sims.append(jaccard_sim(tweet_grams, body_grams))
sims.append(cosine_sim(tweet_grams, body_grams))
return sims
def extract_features(tweet_ngrams, title_ngrams, body_ngrams, n=4):
x = []
n -= 1 # 0 based indexing
while n >= 0:
s = calc_sims(tweet_ngrams[n], title_ngrams[n], body_ngrams[n], n)
x.extend(s)
n -= 1
return x
def extract_features_map(ex_data):
'''Version for generating datasets, ignores examples with no similarity'''
tweet_ngrams, title_ngrams, body_ngrams = ex_data
x = extract_features(tweet_ngrams, title_ngrams, body_ngrams)
if sum(x) == 0:
return None
return x
def gen_dataset_pos(related, tweets, titles, bodies):
'''Positive Examples'''
print('\nGenerating Positive Examples')
x_list = []
pool = Pool(cpu_count())
tweet_ids = related.keys()
chunk_size = 1000
total_chunks = math.ceil(len(tweet_ids) / chunk_size)
n_chunks = 0
for chunk in grouper(chunk_size, tweet_ids):
data = [(tweets[tweet_id], titles[related[tweet_id]],
bodies[related[tweet_id]]) for tweet_id in chunk]
results = pool.map(extract_features_map, data)
for x in results:
if x is not None:
x_list.append(x)
n_chunks += 1
p = (n_chunks / total_chunks) * 100
print('\r%f%%' % (p,))
return x_list
def gen_dataset_neg(related, tweets, titles, bodies, npos):
'''Negative Examples'''
print('\nGenerating Negative Examples')
x_list = []
nprocs = cpu_count()
pool = Pool(nprocs)
tweet_ids = related.keys()
event_ids = titles.keys()
rset = set(related.items())
all_combinations = itertools.product(tweet_ids, event_ids)
chunk_size = 10000
nneg = 0
npos -= 1 # remove 1 for the 0 vector example
for chunk in grouper(chunk_size, all_combinations):
f_chunk = (p for p in chunk if p not in rset)
# generate examples
data = [(tweets[tweet_id], titles[event_id],
bodies[event_id]) for (tweet_id, event_id) in f_chunk]
if len(data) == 0:
continue
# test examples
print('testing')
results = pool.map(extract_features_map, data)
# add valid examples
for x in results:
if x is not None:
x_list.append(x)
nneg = len(x_list)
# print stats
print('\r%f%% (%d / %d)' % ((nneg / npos) * 100, nneg, npos))
if nneg > npos:
break
# all zero example
n_feats = len(x_list[0])
print('Number of Features = %d' % (n_feats,))
x_list.insert(0, [0] * n_feats)
# trim
x_list = x_list[0:nneg]
return x_list
def main():
''' '''
p_proto = pickle.HIGHEST_PROTOCOL
related = get_related()
#
# Raw Data
#
try:
# Try to load from serialized files
tweets = pickle.load(open('pickled/tweets.raw', 'rb'))
titles = pickle.load(open('pickled/titles.raw', 'rb'))
bodies = pickle.load(open('pickled/bodies.raw', 'rb'))
print('Loaded Raw Files')
except:
# else load from dbs
tweets, titles, bodies = get_data_raw(related)
pickle.dump(tweets, open('pickled/tweets.raw', 'wb'), p_proto)
pickle.dump(titles, open('pickled/titles.raw', 'wb'), p_proto)
pickle.dump(bodies, open('pickled/bodies.raw', 'wb'), p_proto)
#
# Ngrams
# This is bypassed because Python's Pickle requires huge ammounts of memory
# (double?) which is fundamentally retarded and unusable
'''
try:
tweets = pickle.load(open('pickled/tweets.ngrams', 'rb'))
titles = pickle.load(open('pickled/titles.ngrams', 'rb'))
bodies = pickle.load(open('pickled/bodies.ngrams', 'rb'))
print('Loaded N-Gram Files')
except:
tweets, titles, bodies = get_ngrams(related, tweets, titles, bodies)
pickle.dump(bodies, open('pickled/bodies.ngrams', 'wb'), p_proto)
pickle.dump(titles, open('pickled/titles.ngrams', 'wb'), p_proto)
pickle.dump(tweets, open('pickled/tweets.ngrams', 'wb'), p_proto)
'''
tweets, titles, bodies = get_ngrams(related, tweets, titles, bodies)
#
# Positive Training Set
#
try:
pos = pickle.load(open('pickled/pos.feats', 'rb'))
print('Loaded Positve Training Set')
except:
pos = gen_dataset_pos(related, tweets, titles, bodies)
pickle.dump(pos, open('pickled/pos.feats', 'wb'), p_proto)
print('Total Pos Sim = %f' % (sum([sum(x) for x in pos]),))
p_size = len(pos)
print('Pos Size = %d' % (p_size,))
del pos
#
# Negative Training Set
#
try:
neg = pickle.load(open('pickled/neg.feats', 'rb'))
print('Loaded Negative Training Set')
except:
neg = gen_dataset_neg(related, tweets, titles, bodies, p_size)
pickle.dump(neg, open('pickled/neg.feats', 'wb'), p_proto)
print('Total Neg Sim = %f' % (sum([sum(x) for x in neg]),))
if __name__ == '__main__':
main()
|
|
# Add mouse controls
# add half size paddle after hitting back wall
import math, pygame, sys, shutil, getpass
from pygame.locals import *
pygame.init()
fpsClock = pygame.time.Clock()
screen = pygame.display.set_mode((640, 480)) # create screen - 640 pix by 480 pix
pygame.display.set_caption('Breakout') # set title bar
# add the font; use PressStart2P, but otherwise default if not available
try:
fontObj = pygame.font.Font('PressStart2P.ttf', 36)
except:
fontObj = pygame.font.Font('freesansbold.ttf', 36)
# generic colors-------------------------------
red = pygame.Color(255, 0, 0)
green = pygame.Color(0, 255, 0)
blue = pygame.Color(0, 0, 255)
white = pygame.Color(255, 255, 255)
grey = pygame.Color(142, 142, 142)
black = pygame.Color(0, 0, 0)
# row colors-----------------------------------
r1 = pygame.Color(200, 72, 72)
r2 = pygame.Color(198, 108, 58)
r3 = pygame.Color(180, 122, 48)
r4 = pygame.Color(162, 162, 42)
r5 = pygame.Color(72, 160, 72)
r6 = pygame.Color(67, 73, 202)
colors = [r1, r2, r3, r4, r5, r6]
# variables------------------------------------
controls = 'keys' # control method
mousex, mousey = 0, 0 # mouse position
dx, dy = 18, 6 # dimensions of board
bx, by = 50, 150 # board position
score = 0 # score
wall1 = pygame.Rect(20, 100, 30, 380) # walls of the game
wall2 = pygame.Rect(590, 100, 30, 380)
wall3 = pygame.Rect(20, 80, 600, 30)
# Creates a board of rectangles----------------
def new_board():
board = []
for x in range(dx):
board.append([])
for y in range(dy):
board[x].append(1)
return board
# Classes defined------------------------------
class Paddle: # class for paddle vars
x = 320
y = 450
size = 2 # 2 is normal size, 1 is half-size
direction = 'none'
class Ball: # class for ball vars
def __init__(self):
self.x = 0
self.y = 0
self.remaining = 3
self.xPos = 1 # amount increasing by for x. adjusted for speed
self.yPos = 1
self.adjusted = False # says wether the xPos and yPos have been adjusted for speed
self.speed = 5
self.collisions = 0
self.alive = False
self.moving = False
def rect(self):
return pygame.Rect(self.x - 3, self.y - 3, 6, 6)
def adjust(self): # adjusts the x and y being added to the ball to make the hypotenuse the ball speed
tSlope = math.sqrt(self.xPos ** 2 + self.yPos ** 2)
self.xPos = (self.speed / tSlope) * self.xPos
self.yPos = (self.speed / tSlope) * self.yPos
self.adjusted = True
# Functions defined----------------------------
def print_board(board, colors): # prints the board
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
pygame.draw.rect(screen, colors[y], (((x * 30) + bx), ((y * 12) + by), 30, 12))
def print_paddle(paddle): # prints the paddle
if paddle.size == 2:
pygame.draw.rect(screen, red, ((paddle.x - 20), (paddle.y), 40, 5))
def collide_paddle(paddle, ball): # recalculates the trajectory for the ball after collision with the paddle
ball.adjusted = False
if ball.x - paddle.x != 0:
ball.xPos = (ball.x - paddle.x) / 8
ball.yPos = -1
else:
ball.xPos = 0
ball.yPos = 1
return ball.adjusted, float(ball.xPos), float(ball.yPos)
def write(x, y, color, msg): # prints onto the screen in selected font
msgSurfaceObj = fontObj.render(msg, False, color)
msgRectobj = msgSurfaceObj.get_rect()
msgRectobj.topleft = (x, y)
screen.blit(msgSurfaceObj, msgRectobj)
def game(score, paddle, ball, board, wall1): # The game itself
# starting variables
running = True
ball.alive = True
ball.moving = False
ball.x = 53
ball.y = 300
ball.collisions, ball.speed = 0, 5
colO = False # check collision with the orange row, for speed purposes
colR = False # same but for red row
ball.speed = 5
ball.xPos = 1
ball.yPos = 1
ball.adjusted = False
while running:
# Draw all the things------------------------------
screen.fill(black)
pygame.draw.rect(screen, grey, wall1)
pygame.draw.rect(screen, grey, wall2)
pygame.draw.rect(screen, grey, wall3)
pygame.draw.rect(screen, red, (ball.x - 3, ball.y - 3, 6, 6))
print_board(board, colors)
print_paddle(paddle)
write(20, 20, grey, str(score))
temp = 0
for life in range(ball.remaining):
if life != 0:
pygame.draw.rect(screen, red, (600, 400 - temp, 10, 10))
temp += 15
# check all the collisions-------------------------
if ball.moving:
if ball.adjusted == False:
ball.adjust()
ball.x += ball.xPos
ball.y += ball.yPos
if ball.y < 455 and ball.y > 445:
if ball.x > paddle.x - 20 and ball.x < paddle.x + 20:
ball.adjusted, ball.xPos, ball.yPos = collide_paddle(paddle, ball) # paddle collide
ball.collisions += 1
# increase ball speeds at 4 hits on paddle, 12 hits, orange row, red row
if ball.collisions % 4 == 0:
ball.speed += 1
# check wall collide----------------------------
if wall1.colliderect(ball.rect()) or wall2.colliderect(ball.rect()):
ball.xPos = -(ball.xPos)
if wall3.colliderect(ball.rect()):
ball.yPos = -(ball.yPos)
# check collision with bricks-------------------
Break = False
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
block = pygame.Rect(30 * x + bx - 1, 12 * y + by - 1, 32, 14)
if block.collidepoint(ball.x, ball.y):
board[x][y] = 0
ball.yPos = -ball.yPos
if y == 4 or y == 5:
score += 1
elif y == 2 or y == 3:
score += 4
if colO == False:
colO = True
ball.speed += 1
else:
score += 7
if colR == False:
colR = True
ball.speed += 2
Break = True
# ball.speed += 1
if Break:
break
if Break:
break
if ball.y > 460:
ball.alive = False
# check if ball was lost
if not ball.alive:
running = False
ball.remaining -= 1
agent_move(ball, paddle)
# move paddle
if paddle.direction == 'right':
if paddle.x <= 561:
paddle.x += 8
elif paddle.direction == 'left':
if paddle.x >= 79:
paddle.x -= 8
# get user input
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
if not ball.moving:
ball.moving = True
# update display
pygame.display.update()
fpsClock.tick(90)
return score
def agent_move(ball, paddle):
if ball.yPos > 0: # if ball goes down: calculate falling point
dist = (paddle.y - ball.y) / ball.yPos
fall_point = ball.x + dist * ball.xPos
if fall_point > 590:
fall_point = 590 - (fall_point % 590)
if fall_point < 50:
fall_point = math.fabs(50 - fall_point) + 50
else: # if ball goes up: try to follow
fall_point = ball.x
if fall_point > paddle.x + 8:
paddle.direction = 'right'
elif fall_point < paddle.x - 8:
paddle.direction = 'left'
else:
paddle.direction = 'none'
# -----------------------------------------------------
if __name__ == '__main__':
replay = False
loop = 0
while True:
screen.fill(black)
if replay:
board = new_board()
score = 0
paddle = Paddle()
ball = Ball()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1)
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
boardcheck = 0
for x in range(len(board)):
for y in range(len(board[x])):
boardcheck += board[x][y]
if boardcheck == 0:
paddle = Paddle()
ball = Ball()
board = new_board()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1)
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
replay = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
replay = True
loop += 1
pygame.display.update()
|
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from owslib import crs
from pycsw.core import util
LOGGER = logging.getLogger(__name__)
TYPES = ['gml:Point', 'gml:LineString', 'gml:Polygon', 'gml:Envelope']
DEFAULT_SRS = crs.Crs('urn:x-ogc:def:crs:EPSG:6.11:4326')
def _poslist2wkt(poslist, axisorder):
"""Repurpose gml:posList into WKT aware list"""
tmp = poslist.split()
poslist2 = []
xlist = tmp[1::2]
ylist = tmp[::2]
if axisorder == 'yx':
for i, j in zip(ylist, xlist):
poslist2.append('%s %s' % (i, j))
else:
for i, j in zip(xlist, ylist):
poslist2.append('%s %s' % (i, j))
return ', '.join(poslist2)
class Geometry(object):
"""base geometry class"""
def __init__(self, element, nsmap):
"""initialize geometry parser"""
self.nsmap = nsmap
self.type = None
self.wkt = None
self.crs = None
self._exml = element
# return OGC WKT for GML geometry
operand = element.xpath(
'|'.join(TYPES),
namespaces={'gml': 'http://www.opengis.net/gml'})[0]
if 'srsName' in operand.attrib:
LOGGER.debug('geometry srsName detected')
self.crs = crs.Crs(operand.attrib['srsName'])
else:
LOGGER.debug('setting default geometry srsName %s' % DEFAULT_SRS)
self.crs = DEFAULT_SRS
self.type = util.xmltag_split(operand.tag)
if self.type == 'Point':
self._get_point()
elif self.type == 'LineString':
self._get_linestring()
elif self.type == 'Polygon':
self._get_polygon()
elif self.type == 'Envelope':
self._get_envelope()
else:
raise RuntimeError('Unsupported geometry type (Must be one of %s)'
% ','.join(TYPES))
# reproject data if needed
if self.crs is not None and self.crs.code not in [4326, 'CRS84']:
LOGGER.debug('transforming geometry to 4326')
try:
self.wkt = self.transform(self.crs.code, DEFAULT_SRS.code)
except Exception as err:
raise RuntimeError('Reprojection error: Invalid srsName '
'"%s": %s' % (self.crs.id, str(err)))
def _get_point(self):
"""Parse gml:Point"""
tmp = self._exml.find(util.nspath_eval('gml:Point/gml:pos',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Point geometry. Missing gml:pos')
else:
xypoint = tmp.text.split()
if self.crs.axisorder == 'yx':
self.wkt = 'POINT(%s %s)' % (xypoint[1], xypoint[0])
else:
self.wkt = 'POINT(%s %s)' % (xypoint[0], xypoint[1])
def _get_linestring(self):
"""Parse gml:LineString"""
tmp = self._exml.find(util.nspath_eval('gml:LineString/gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'LINESTRING(%s)' % _poslist2wkt(tmp.text,
self.crs.axisorder)
def _get_polygon(self):
"""Parse gml:Polygon"""
tmp = self._exml.find('.//%s' % util.nspath_eval('gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'POLYGON((%s))' % _poslist2wkt(tmp.text,
self.crs.axisorder)
def _get_envelope(self):
"""Parse gml:Envelope"""
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:lowerCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:lowerCorner')
else:
lower_left = tmp.text
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:upperCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:upperCorner')
else:
upper_right = tmp.text
llmin = lower_left.split()
urmax = upper_right.split()
if len(llmin) < 2 or len(urmax) < 2:
raise RuntimeError('Invalid gml:Envelope geometry. \
gml:lowerCorner and gml:upperCorner must hold at least x and y')
if self.crs.axisorder == 'yx':
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[1],
llmin[0], urmax[1], urmax[0]))
else:
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[0],
llmin[1], urmax[0], urmax[1]))
def transform(self, src, dest):
"""transform coordinates from one CRS to another"""
import pyproj
from shapely.geometry import Point, LineString, Polygon
from shapely.wkt import loads
LOGGER.debug('Transforming geometry from %s to %s' % (src, dest))
vertices = []
try:
proj_src = pyproj.Proj(init='epsg:%s' % src)
except:
raise RuntimeError('Invalid source projection')
try:
proj_dst = pyproj.Proj(init='epsg:%s' % dest)
except:
raise RuntimeError('Invalid destination projection')
geom = loads(self.wkt)
if geom.type == 'Point':
newgeom = Point(pyproj.transform(proj_src, proj_dst,
geom.x, geom.y))
wkt2 = newgeom.wkt
elif geom.type == 'LineString':
for vertice in list(geom.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
linestring = LineString(vertices)
wkt2 = linestring.wkt
elif geom.type == 'Polygon':
for vertice in list(geom.exterior.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
polygon = Polygon(vertices)
wkt2 = polygon.wkt
return wkt2
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Automatically-generated blanket testing for the MediaFile metadata
layer.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import tempfile
import datetime
import time
from test import _common
from test._common import unittest
from beets.mediafile import MediaFile, MediaField, Image, \
MP3DescStorageStyle, StorageStyle, MP4StorageStyle, \
ASFStorageStyle, ImageType, CoverArtField
from beets.library import Item
from beets.plugins import BeetsPlugin
class ArtTestMixin(object):
"""Test reads and writes of the ``art`` property.
"""
@property
def png_data(self):
if not self._png_data:
with open(os.path.join(_common.RSRC, 'image-2x3.png'), 'rb') as f:
self._png_data = f.read()
return self._png_data
_png_data = None
@property
def jpg_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.jpg'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
@property
def tiff_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.tiff'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
def test_set_png_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.png_data)
def test_set_jpg_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.jpg_data)
def test_delete_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.art)
del mediafile.art
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.art)
class ImageStructureTestMixin(ArtTestMixin):
"""Test reading and writing multiple image tags.
The tests use the `image` media file fixture. The tags of these files
include two images, on in the PNG format, the other in JPEG format. If
the tag format supports it they also include additional metadata.
"""
def test_read_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = next(i for i in mediafile.images
if i.mime_type == 'image/png')
self.assertEqual(image.data, self.png_data)
self.assertExtendedImageAttributes(image, desc='album cover',
type=ImageType.front)
image = next(i for i in mediafile.images
if i.mime_type == 'image/jpeg')
self.assertEqual(image.data, self.jpg_data)
self.assertExtendedImageAttributes(image, desc='the artist',
type=ImageType.artist)
def test_set_image_structure(self):
mediafile = self._mediafile_fixture('empty')
image = Image(data=self.png_data, desc='album cover',
type=ImageType.front)
mediafile.images = [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 1)
image = mediafile.images[0]
self.assertEqual(image.data, self.png_data)
self.assertEqual(image.mime_type, 'image/png')
self.assertExtendedImageAttributes(image, desc='album cover',
type=ImageType.front)
def test_add_image_structure(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.png_data, desc='the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
images = (i for i in mediafile.images if i.desc == 'the composer')
image = next(images, None)
self.assertExtendedImageAttributes(
image, desc='the composer', type=ImageType.composer
)
def test_delete_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
del mediafile.images
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 0)
def test_guess_cover(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
cover = CoverArtField.guess_cover_image(mediafile.images)
self.assertEqual(cover.desc, 'album cover')
self.assertEqual(mediafile.art, cover.data)
def assertExtendedImageAttributes(self, image, **kwargs):
"""Ignore extended image attributes in the base tests.
"""
pass
class ExtendedImageStructureTestMixin(ImageStructureTestMixin):
"""Checks for additional attributes in the image structure."""
def assertExtendedImageAttributes(self, image, desc=None, type=None):
self.assertEqual(image.desc, desc)
self.assertEqual(image.type, type)
def test_add_tiff_image(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.tiff_data, desc='the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
# WMA does not preserve the order, so we have to work around this
image = filter(lambda i: i.mime_type == 'image/tiff',
mediafile.images)[0]
self.assertExtendedImageAttributes(
image, desc='the composer', type=ImageType.composer)
class LazySaveTestMixin(object):
"""Mediafile should only write changes when tags have changed
"""
@unittest.skip('not yet implemented')
def test_unmodified(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip('not yet implemented')
def test_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title})
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip('not yet implemented')
def test_tag_value_change(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.album = 'another'
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_changed_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title, 'album': 'another'})
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def _set_past_mtime(self, path):
mtime = round(time.time() - 10000)
os.utime(path, (mtime, mtime))
return mtime
class GenreListTestMixin(object):
"""Tests access to the ``genres`` property as a list.
"""
def test_read_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertItemsEqual(mediafile.genres, ['the genre'])
def test_write_genre_list(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, ['one', 'two'])
def test_write_genre_list_get_first(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.genre, 'one')
def test_append_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.genre, 'the genre')
mediafile.genres += [u'another']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, [u'the genre', u'another'])
field_extension = MediaField(
MP3DescStorageStyle(b'customtag'),
MP4StorageStyle(b'----:com.apple.iTunes:customtag'),
StorageStyle(b'customtag'),
ASFStorageStyle(b'customtag'),
)
class ExtendedFieldTestMixin(object):
def test_extended_field_write(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.customtag = 'F#'
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, 'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_write_extended_tag_from_item(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
self.assertIsNone(mediafile.customtag)
item = Item(path=mediafile.path, customtag='Gb')
item.write()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, 'Gb')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_read_flexible_attribute_from_file(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.update({'customtag': 'F#'})
mediafile.save()
item = Item.from_path(mediafile.path)
self.assertEqual(item['customtag'], 'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_invalid_descriptor(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('somekey', True)
self.assertIn('must be an instance of MediaField',
unicode(cm.exception))
def test_overwrite_property(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('artist', MediaField())
self.assertIn('property "artist" already exists',
unicode(cm.exception))
class ReadWriteTestBase(ArtTestMixin, GenreListTestMixin,
ExtendedFieldTestMixin):
"""Test writing and reading tags. Subclasses must set ``extension`` and
``audio_properties``.
"""
full_initial_tags = {
'title': u'full',
'artist': u'the artist',
'album': u'the album',
'genre': u'the genre',
'composer': u'the composer',
'grouping': u'the grouping',
'year': 2001,
'month': None,
'day': None,
'date': datetime.date(2001, 1, 1),
'track': 2,
'tracktotal': 3,
'disc': 4,
'disctotal': 5,
'lyrics': u'the lyrics',
'comments': u'the comments',
'bpm': 6,
'comp': True,
'mb_trackid': '8b882575-08a5-4452-a7a7-cbb8a1531f9e',
'mb_albumid': '9e873859-8aa4-4790-b985-5a953e8ef628',
'mb_artistid': '7cf0ea9d-86b9-4dad-ba9e-2355a64899ea',
'art': None,
'label': u'the label',
}
tag_fields = [
'title',
'artist',
'album',
'genre',
'composer',
'grouping',
'year',
'month',
'day',
'date',
'track',
'tracktotal',
'disc',
'disctotal',
'lyrics',
'comments',
'bpm',
'comp',
'mb_trackid',
'mb_albumid',
'mb_artistid',
'art',
'label',
'rg_track_peak',
'rg_track_gain',
'rg_album_peak',
'rg_album_gain',
'albumartist',
'mb_albumartistid',
'artist_sort',
'albumartist_sort',
'acoustid_fingerprint',
'acoustid_id',
'mb_releasegroupid',
'asin',
'catalognum',
'disctitle',
'script',
'language',
'country',
'albumstatus',
'media',
'albumdisambig',
'artist_credit',
'albumartist_credit',
'original_year',
'original_month',
'original_day',
'original_date',
'initial_key',
]
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_read_audio_properties(self):
mediafile = self._mediafile_fixture('full')
for key, value in self.audio_properties.items():
if isinstance(value, float):
self.assertAlmostEqual(getattr(mediafile, key), value,
delta=0.1)
else:
self.assertEqual(getattr(mediafile, key), value)
def test_read_full(self):
mediafile = self._mediafile_fixture('full')
self.assertTags(mediafile, self.full_initial_tags)
def test_read_empty(self):
mediafile = self._mediafile_fixture('empty')
for field in self.tag_fields:
self.assertIsNone(getattr(mediafile, field))
def test_write_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_overwrite_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
# Make sure the tags are already set when writing a second time
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
# Make sure the tags are already set when writing a second time
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_write_date_components(self):
mediafile = self._mediafile_fixture('full')
mediafile.year = 2001
mediafile.month = 1
mediafile.day = 2
mediafile.original_year = 1999
mediafile.original_month = 12
mediafile.original_day = 30
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_incomplete_date_components(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2001
mediafile.month = None
mediafile.day = 2
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 1))
def test_write_dates(self):
mediafile = self._mediafile_fixture('full')
mediafile.date = datetime.date(2001, 1, 2)
mediafile.original_date = datetime.date(1999, 12, 30)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_packed(self):
mediafile = self._mediafile_fixture('empty')
mediafile.tracktotal = 2
mediafile.track = 1
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 1)
self.assertEqual(mediafile.tracktotal, 2)
def test_write_counters_without_total(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.track, 2)
self.assertEqual(mediafile.tracktotal, 3)
self.assertEqual(mediafile.disc, 4)
self.assertEqual(mediafile.disctotal, 5)
mediafile.track = 10
delattr(mediafile, 'tracktotal')
mediafile.disc = 10
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 10)
self.assertEqual(mediafile.tracktotal, None)
self.assertEqual(mediafile.disc, 10)
self.assertEqual(mediafile.disctotal, None)
def test_unparseable_date(self):
mediafile = self._mediafile_fixture('unparseable')
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_tag(self):
mediafile = self._mediafile_fixture('full')
keys = self.full_initial_tags.keys()
for key in set(keys) - set(['art', 'month', 'day']):
self.assertIsNotNone(getattr(mediafile, key))
for key in keys:
delattr(mediafile, key)
mediafile.save()
mediafile = MediaFile(mediafile.path)
for key in keys:
self.assertIsNone(getattr(mediafile, key))
def test_delete_packed_total(self):
mediafile = self._mediafile_fixture('full')
delattr(mediafile, 'tracktotal')
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, self.full_initial_tags['track'])
self.assertEqual(mediafile.disc, self.full_initial_tags['disc'])
def test_delete_partial_date(self):
mediafile = self._mediafile_fixture('empty')
mediafile.date = datetime.date(2001, 12, 3)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNotNone(mediafile.month)
self.assertIsNotNone(mediafile.day)
delattr(mediafile, 'month')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_year(self):
mediafile = self._mediafile_fixture('full')
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
delattr(mediafile, 'year')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
def assertTags(self, mediafile, tags):
errors = []
for key, value in tags.items():
try:
value2 = getattr(mediafile, key)
except AttributeError:
errors.append('Tag %s does not exist' % key)
else:
if value2 != value:
errors.append('Tag %s: %r != %r' % (key, value2, value))
if any(errors):
errors = ['Tags did not match'] + errors
self.fail('\n '.join(errors))
def _mediafile_fixture(self, name):
name = name + '.' + self.extension
src = os.path.join(_common.RSRC, name)
target = os.path.join(self.temp_dir, name)
shutil.copy(src, target)
return MediaFile(target)
def _generate_tags(self, base=None):
"""Return dictionary of tags, mapping tag names to values.
"""
tags = {}
for key in self.tag_fields:
if key.startswith('rg_'):
# ReplayGain is float
tags[key] = 1.0
else:
tags[key] = b'value\u2010%s' % key
for key in ['disc', 'disctotal', 'track', 'tracktotal', 'bpm']:
tags[key] = 1
tags['art'] = self.jpg_data
tags['comp'] = True
date = datetime.date(2001, 4, 3)
tags['date'] = date
tags['year'] = date.year
tags['month'] = date.month
tags['day'] = date.day
original_date = datetime.date(1999, 5, 6)
tags['original_date'] = original_date
tags['original_year'] = original_date.year
tags['original_month'] = original_date.month
tags['original_day'] = original_date.day
return tags
class PartialTestMixin(object):
tags_without_total = {
'track': 2,
'tracktotal': 0,
'disc': 4,
'disctotal': 0,
}
def test_read_track_without_total(self):
mediafile = self._mediafile_fixture('partial')
self.assertEqual(mediafile.track, 2)
self.assertIsNone(mediafile.tracktotal)
self.assertEqual(mediafile.disc, 4)
self.assertIsNone(mediafile.disctotal)
class MP3Test(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'mp3'
audio_properties = {
'length': 1.0,
'bitrate': 80000,
'format': 'MP3',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_unknown_apic_type(self):
mediafile = self._mediafile_fixture('image_unknown_type')
self.assertEqual(mediafile.images[0].type, ImageType.other)
class MP4Test(ReadWriteTestBase, PartialTestMixin,
ImageStructureTestMixin, unittest.TestCase):
extension = 'm4a'
audio_properties = {
'length': 1.0,
'bitrate': 64000,
'format': 'AAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 2,
}
def test_add_tiff_image_fails(self):
mediafile = self._mediafile_fixture('empty')
with self.assertRaises(ValueError):
mediafile.images = [Image(data=self.tiff_data)]
def test_guess_cover(self):
# There is no metadata associated with images, we pick one at random
pass
class AlacTest(ReadWriteTestBase, unittest.TestCase):
extension = 'alac.m4a'
audio_properties = {
'length': 1.0,
'bitrate': 21830,
# 'format': 'ALAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class MusepackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'mpc'
audio_properties = {
'length': 1.0,
'bitrate': 23458,
'format': 'Musepack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 2,
}
class WMATest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'wma'
audio_properties = {
'length': 1.0,
'bitrate': 128000,
'format': 'Windows Media',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_write_genre_list_get_first(self):
# WMA does not preserve list order
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIn(mediafile.genre, [u'one', u'two'])
def test_read_pure_tags(self):
mediafile = self._mediafile_fixture('pure')
self.assertEqual(mediafile.comments, 'the comments')
self.assertEqual(mediafile.title, 'the title')
self.assertEqual(mediafile.artist, 'the artist')
class OggTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ogg'
audio_properties = {
'length': 1.0,
'bitrate': 48000,
'format': 'OGG',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_read_date_from_year_tag(self):
mediafile = self._mediafile_fixture('year')
self.assertEqual(mediafile.year, 2000)
self.assertEqual(mediafile.date, datetime.date(2000, 1, 1))
def test_write_date_to_year_tag(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2000
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.mgfile['YEAR'], [u'2000'])
def test_legacy_coverart_tag(self):
mediafile = self._mediafile_fixture('coverart')
self.assertTrue('coverart' in mediafile.mgfile)
self.assertEqual(mediafile.art, self.png_data)
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertFalse('coverart' in mediafile.mgfile)
def test_date_tag_with_slashes(self):
mediafile = self._mediafile_fixture('date_with_slashes')
self.assertEqual(mediafile.year, 2005)
self.assertEqual(mediafile.month, 6)
self.assertEqual(mediafile.day, 5)
class FlacTest(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'flac'
audio_properties = {
'length': 1.0,
'bitrate': 175120,
'format': 'FLAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class ApeTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ape'
audio_properties = {
'length': 1.0,
'bitrate': 112040,
'format': 'APE',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class WavpackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'wv'
audio_properties = {
'length': 1.0,
'bitrate': 108744,
'format': 'WavPack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class OpusTest(ReadWriteTestBase, unittest.TestCase):
extension = 'opus'
audio_properties = {
'length': 1.0,
'bitrate': 57984,
'format': 'Opus',
'samplerate': 48000,
'bitdepth': 0,
'channels': 1,
}
class AIFFTest(ReadWriteTestBase, unittest.TestCase):
extension = 'aiff'
audio_properties = {
'length': 1.0,
'bitrate': 705600,
'format': 'AIFF',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class MediaFieldTest(unittest.TestCase):
def test_properties_from_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.fields():
self.assertTrue(hasattr(mediafile, field))
def test_properties_from_readable_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.readable_fields():
self.assertTrue(hasattr(mediafile, field))
def test_known_fields(self):
fields = list(ReadWriteTestBase.tag_fields)
fields.extend(('encoder', 'images', 'genres', 'albumtype'))
self.assertItemsEqual(MediaFile.fields(), fields)
def test_fields_in_readable_fields(self):
readable = MediaFile.readable_fields()
for field in MediaFile.fields():
self.assertIn(field, readable)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova import db
from nova import exception
from nova import objects
from nova.objects import compute_node
from nova.objects import hv_spec
from nova.objects import service
from nova.tests.unit import fake_pci_device_pools
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
fake_stats_db_format = jsonutils.dumps(fake_stats)
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
fake_numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=0, memory_usage=0),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=0, memory_usage=0)])
fake_numa_topology_db_format = fake_numa_topology._to_json()
fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar')
fake_supported_hv_specs = [fake_hv_spec]
# for backward compatibility, each supported instance object
# is stored as a list in the database
fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)
fake_compute_node = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'service_id': 456,
'host': 'fake',
'vcpus': 4,
'memory_mb': 4096,
'local_gb': 1024,
'vcpus_used': 2,
'memory_mb_used': 2048,
'local_gb_used': 512,
'hypervisor_type': 'Hyper-Dan-VM-ware',
'hypervisor_version': 1001,
'hypervisor_hostname': 'vm.danplanet.com',
'free_ram_mb': 1024,
'free_disk_gb': 256,
'current_workload': 100,
'running_vms': 2013,
'cpu_info': 'Schmintel i786',
'disk_available_least': 256,
'metrics': '',
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'numa_topology': fake_numa_topology_db_format,
'supported_instances': fake_supported_hv_specs_db_format,
'pci_stats': fake_pci,
}
# FIXME(sbauza) : For compatibility checking, to be removed once we are sure
# that all computes are running latest DB version with host field in it.
fake_old_compute_node = fake_compute_node.copy()
del fake_old_compute_node['host']
class _TestComputeNodeObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.json_comparator(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
obj_val = obj_val.obj_to_primitive()
self.json_loads_comparator(expected, obj_val)
def json_loads_comparator(self, expected, obj_val):
# NOTE(edleafe): This is necessary because the dumps() version of the
# PciDevicePoolList doesn't maintain ordering, so the output string
# doesn't always match.
self.assertEqual(jsonutils.loads(expected), obj_val)
def comparators(self):
return {'stats': self.json_comparator,
'host_ip': self.str_comparator,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator,
}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'compute_node_get')
db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(objects.Service, 'get_by_id')
@mock.patch.object(db, 'compute_node_get')
def test_get_by_id_with_host_field_not_in_db(self, mock_cn_get,
mock_obj_svc_get):
fake_compute_node_with_no_host = fake_compute_node.copy()
host = fake_compute_node_with_no_host.pop('host')
fake_service = service.Service(id=123)
fake_service.host = host
mock_cn_get.return_value = fake_compute_node_with_no_host
mock_obj_svc_get.return_value = fake_service
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_service_id(self):
self.mox.StubOutWithMock(db, 'compute_nodes_get_by_service_id')
db.compute_nodes_get_by_service_id(self.context, 456).AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename(self, cn_get_by_h_and_n):
cn_get_by_h_and_n.return_value = fake_compute_node
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_with_old_compute(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [fake_old_compute_node]
svc_get_by_id.return_value = fake_service
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_not_found(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
another_node = fake_old_compute_node.copy()
another_node['hypervisor_hostname'] = 'elsewhere'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [another_node]
svc_get_by_id.return_value = fake_service
self.assertRaises(exception.ComputeHostNotFound,
compute_node.ComputeNode.get_by_host_and_nodename,
self.context, 'fake', 'vm.danplanet.com')
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_good_and_bad(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
bad_node = fake_old_compute_node.copy()
bad_node['hypervisor_hostname'] = 'elsewhere'
good_node = fake_old_compute_node.copy()
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [bad_node, good_node]
svc_get_by_id.return_value = fake_service
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(compute, good_node,
subs=self.subs(),
comparators=self.comparators())
def test_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(
self.context,
{
'service_id': 456,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
compute.create()
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.create()
self.assertRaises(exception.ObjectActionError, compute.create,
self.context)
def test_save(self):
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(
self.context, 123,
{
'vcpus_used': 3,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.vcpus_used = 3
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
compute.save()
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(db, 'compute_node_create',
return_value=fake_compute_node)
def test_set_id_failure(self, db_mock):
compute = compute_node.ComputeNode(context=self.context)
compute.create()
self.assertRaises(exception.ReadOnlyFieldError, setattr,
compute, 'id', 124)
def test_destroy(self):
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, 123)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.destroy()
def test_service(self):
self.mox.StubOutWithMock(service.Service, 'get_by_id')
service.Service.get_by_id(self.context, 456).AndReturn('my-service')
self.mox.ReplayAll()
compute = compute_node.ComputeNode()
compute._context = self.context
compute.id = 123
compute.service_id = 456
self.assertEqual('my-service', compute.service)
# Make sure it doesn't call Service.get_by_id() again
self.assertEqual('my-service', compute.service)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(self.context).AndReturn([fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_all(self.context)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_hypervisor(self):
self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
'hyper')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.db.compute_nodes_get_by_service_id')
def test_get_by_service(self, cn_get_by_svc_id):
cn_get_by_svc_id.return_value = [fake_compute_node]
fake_service = service.Service(id=123)
computes = compute_node.ComputeNodeList.get_by_service(self.context,
fake_service)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_all_by_host(self, cn_get_all_by_host):
cn_get_all_by_host.return_value = [fake_compute_node]
computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
'fake')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_all_by_host_with_old_compute(self, cn_get_all_by_host,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [fake_old_compute_node]
svc_get_by_id.return_value = fake_service
computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
'fake')
self.assertEqual(1, len(computes))
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_compat_numa_topology(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.4')
self.assertNotIn('numa_topology', primitive)
def test_compat_supported_hv_specs(self):
compute = compute_node.ComputeNode()
compute.supported_hv_specs = fake_supported_hv_specs
primitive = compute.obj_to_primitive(target_version='1.5')
self.assertNotIn('supported_hv_specs', primitive)
def test_compat_host(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.6')
self.assertNotIn('host', primitive)
def test_compat_pci_device_pools(self):
compute = compute_node.ComputeNode()
compute.pci_device_pools = fake_pci_device_pools.fake_pool_list
primitive = compute.obj_to_primitive(target_version='1.8')
self.assertNotIn('pci_device_pools', primitive)
class TestComputeNodeObject(test_objects._LocalTest,
_TestComputeNodeObject):
pass
class TestRemoteComputeNodeObject(test_objects._RemoteTest,
_TestComputeNodeObject):
pass
|
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Cursor classes
"""
import weakref
import re
from . import errors
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_PY_PARAM = re.compile(b'(%s)')
RE_SQL_SPLIT_STMTS = re.compile(
b''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class _ParamSubstitutor(object):
def __init__(self, params):
self.params = params
self.index = 0
def __call__(self, matchobj):
index = self.index
self.index += 1
try:
return self.params[index]
except IndexError:
raise errors.ProgrammingError(
"Not enough parameters for the SQL statement")
@property
def remaining(self):
return len(self.params) - self.index
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def __del__(self):
self.close()
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
return self.__next__(self)
def __next__(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in list(params.items()):
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res["%({})s".format(k).encode()] = c
except Exception as e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
try:
res = params
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = list(map(to_mysql,res))
res = list(map(escape,res))
res = list(map(quote,res))
except Exception as e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except Exception as e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError) as err:
raise errors.ProgrammingError(
"Failed handling non-resultset; {}".format(err))
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses zip() to make an iterator from the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in zip(query_iter, iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._connection.unread_result is True:
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if not isinstance(operation, bytes):
stmt = operation.encode(self._connection.charset)
else:
stmt = operation
except (UnicodeDecodeError, UnicodeEncodeError) as e:
raise errors.ProgrammingError(str(e))
if params is not None:
if isinstance(params, dict):
for k,v in self._process_params_dict(params).items():
stmt = stmt.replace(k, v, 1)
elif isinstance(params, (list, tuple)):
psub = _ParamSubstitutor(self._process_params(params))
stmt = RE_PY_PARAM.sub(psub, stmt)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError as err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def _batch_insert(self, operation, seq_params):
opnocom = re.sub(RE_SQL_COMMENT,'',operation)
m = re.search(RE_SQL_INSERT_VALUES,opnocom)
fmt = m.group(1).encode(self._connection.charset)
values = []
try:
stmt = operation.encode(self._connection.charset)
for params in seq_params:
tmp = fmt
if isinstance(params,dict):
for k,v in self._process_params_dict(params).items():
tmp = tmp.replace(k,v,1)
else:
psub = _ParamSubstitutor(self._process_params(params))
tmp = RE_PY_PARAM.sub(psub,tmp)
if psub.remaining != 0:
raise errors.ProgrammingError("Not all parameters "
"were used in the SQL statement")
#for p in self._process_params(params):
# tmp = tmp.replace(b'%s',p,1)
values.append(tmp)
stmt = stmt.replace(fmt,b','.join(values),1)
return self.execute(stmt)
except (UnicodeDecodeError,UnicodeEncodeError) as e:
raise errors.ProgrammingError(str(e))
except errors.Error:
raise
except Exception as e:
raise errors.InterfaceError(
"Failed executing the operation; %s" % e)
else:
self._executed = stmt
return self._rowcount
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s)"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._connection.unread_result is True:
raise errors.InternalError("Unread result found.")
if not isinstance(seq_params, (list,tuple)):
raise errors.ProgrammingError(
"Parameters for query must be list or tuple.")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
return self._batch_insert(operation,seq_params)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError) as err:
raise errors.InterfaceError(
"Failed executing the operation; {}".format(err))
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
if not procname or not isinstance(procname, str):
raise ValueError("procname must be a string")
if not isinstance(args, (tuple, list)):
raise ValueError("args must be a sequence")
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
argnames = []
if args:
for idx,arg in enumerate(args):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
self.execute("SET {0}=%s".format(argname), (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except Exception as e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement.
Returns a long value or None.
"""
return self._last_insert_id
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except Exception as e:
raise errors.InterfaceError(
"Failed getting warnings; %s" % e)
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if self._have_unread_result() is False:
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in range(0, self._rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0] for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
try:
return self._executed.strip().decode('utf8')
except AttributeError:
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __str__(self):
fmt = "MySQLCursor: %s"
if self._executed:
executed = self._executed.decode('utf-8')
if len(executed) > 30:
res = fmt % (executed[:30] + '..')
else:
res = fmt % (executed)
else:
res = fmt % '(Nothing executed yet)'
return res
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ]
@property
def with_rows(self):
return self._rows is not None
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
import logging
import re
import signal
import sys
import uuid
from logging import getLogger
from threading import (Timer, Lock)
from six import u
from .chunk_downloader import (DEFAULT_CLIENT_RESULT_PREFETCH_SLOTS,
DEFAULT_CLIENT_RESULT_PREFETCH_THREADS)
from .compat import (BASE_EXCEPTION_CLASS)
from .constants import (FIELD_NAME_TO_ID, FIELD_ID_TO_NAME)
from .errorcode import (ER_UNSUPPORTED_METHOD,
ER_CURSOR_IS_CLOSED,
ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT,
ER_NOT_POSITIVE_SIZE,
ER_FAILED_PROCESSING_PYFORMAT,
ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
ER_INVALID_VALUE)
from .errors import (Error, ProgrammingError, NotSupportedError,
DatabaseError, InterfaceError)
from .file_transfer_agent import (SnowflakeFileTransferAgent)
from .sqlstate import (SQLSTATE_FEATURE_NOT_SUPPORTED)
STATEMENT_TYPE_ID_DML = 0x3000
STATEMENT_TYPE_ID_INSERT = STATEMENT_TYPE_ID_DML + 0x100
STATEMENT_TYPE_ID_UPDATE = STATEMENT_TYPE_ID_DML + 0x200
STATEMENT_TYPE_ID_DELETE = STATEMENT_TYPE_ID_DML + 0x300
STATEMENT_TYPE_ID_MERGE = STATEMENT_TYPE_ID_DML + 0x400
STATEMENT_TYPE_ID_MULTI_TABLE_INSERT = STATEMENT_TYPE_ID_DML + 0x500
STATEMENT_TYPE_ID_DML_SET = frozenset(
[STATEMENT_TYPE_ID_DML, STATEMENT_TYPE_ID_INSERT,
STATEMENT_TYPE_ID_UPDATE,
STATEMENT_TYPE_ID_DELETE, STATEMENT_TYPE_ID_MERGE,
STATEMENT_TYPE_ID_MULTI_TABLE_INSERT])
DESC_TABLE_RE = re.compile(u(r'desc(?:ribe)?\s+([\w_]+)\s*;?\s*$'),
flags=re.IGNORECASE)
class SnowflakeCursor(object):
u"""
Implementation of Cursor object that is returned from Connection.cursor()
method.
"""
PUT_SQL_RE = re.compile(u(r'^(?:/\*.*\*/\s*)*put\s+'), flags=re.IGNORECASE)
GET_SQL_RE = re.compile(u(r'^(?:/\*.*\*/\s*)*get\s+'), flags=re.IGNORECASE)
INSERT_SQL_RE = re.compile(u(r'^insert\s+into'), flags=re.IGNORECASE)
COMMENT_SQL_RE = re.compile(u"/\*.*\*/")
INSERT_SQL_VALUES_RE = re.compile(u(r'.*VALUES\s*(\(.*\)).*'),
re.IGNORECASE | re.MULTILINE | re.DOTALL)
ALTER_SESSION_RE = re.compile(
u(r'alter\s+session\s+set\s+(.*)=\'?([^\']+)\'?\s*;'),
flags=re.IGNORECASE | re.MULTILINE | re.DOTALL)
def __init__(self, connection):
self._connection = connection
self._errorhandler = Error.default_errorhandler
self.messages = []
self._timebomb = None # must be here for abort_exit method
self._description = None
self._column_idx_to_name = None
self._sfqid = None
self._sqlstate = None
self._total_rowcount = -1
self._sequence_counter = -1
self._request_id = None
self._is_file_transfer = False
self._timestamp_output_format = None
self._timestamp_ltz_output_format = None
self._timestamp_ntz_output_format = None
self._timestamp_tz_output_format = None
self._date_output_format = None
self._time_output_format = None
self._timezone = None
self._binary_output_format = None
self._client_result_prefetch_slots = \
DEFAULT_CLIENT_RESULT_PREFETCH_SLOTS
self._client_result_prefetch_threads = \
DEFAULT_CLIENT_RESULT_PREFETCH_THREADS
self._arraysize = 1 # PEP-0249: defaults to 1
self._lock_canceling = Lock()
self.logger = getLogger(__name__)
self.reset()
def __del__(self):
try:
self.close()
except BASE_EXCEPTION_CLASS as e:
logger = getLogger(__name__)
if logger.getEffectiveLevel() <= logging.INFO:
logger.info(e)
@property
def description(self):
u"""
Columns information in a tuple:
- name
- type_code
- display_size
- internal_size
- precision
- scale
- null_ok
"""
return self._description
@property
def rowcount(self):
u"""
The number of records updated or selected.
If not clear, -1 is returned
"""
return self._total_rowcount if self._total_rowcount >= 0 else None
@property
def rownumber(self):
u"""
The current 0-based index of the cursor in the result set or None if
the index cannot be determined.
"""
return self._total_row_index if self._total_row_index >= 0 else None
@property
def sfqid(self):
u"""
Snowflake query id in UUID form. Include this in the problem report to
the customer support
"""
return self._sfqid
@property
def sqlstate(self):
u"""
SQL State code
"""
return self._sqlstate
@property
def timestamp_output_format(self):
u"""
Snowflake timestamp_output_format
"""
return self._timestamp_output_format
@property
def timestamp_ltz_output_format(self):
u"""
Snowflake timestamp_output_format
"""
return self._timestamp_ltz_output_format if \
self._timestamp_ltz_output_format else \
self._timestamp_output_format
@property
def timestamp_tz_output_format(self):
u"""
Snowflake timestamp_output_format
"""
return self._timestamp_tz_output_format if \
self._timestamp_tz_output_format else \
self._timestamp_output_format
@property
def timestamp_ntz_output_format(self):
u"""
Snowflake timestamp_output_format
"""
return self._timestamp_ntz_output_format if \
self._timestamp_ntz_output_format else \
self._timestamp_output_format
@property
def date_output_format(self):
u"""
Snowflake date_output_format
"""
return self._date_output_format
@property
def time_output_format(self):
u"""
Snowflake time_output_format
"""
return self._time_output_format
@property
def timezone(self):
u"""
Snowflake timezone
"""
return self._timezone
@property
def binary_output_format(self):
u"""
Snowflake binary_output_format
"""
return self._binary_output_format
@property
def arraysize(self):
u"""
The default number of rows fetched in fetchmany
"""
return self._arraysize
@arraysize.setter
def arraysize(self, value):
self._arraysize = int(value)
@property
def connection(self):
u"""
The connection object on which the cursor was created
"""
return self._connection
@property
def errorhandler(self):
return self._errorhandler
@errorhandler.setter
def errorhandler(self, value):
self.logger.debug(u'setting errorhandler: %s', value)
if value is None:
raise ProgrammingError(u'Invalid errorhandler is specified')
self._errorhandler = value
@property
def is_file_transfer(self):
"""
Is PUT or GET command?
"""
return hasattr(self, '_is_file_transfer') and self._is_file_transfer
def callproc(self, procname, args=()):
u"""
Not supported
"""
Error.errorhandler_wrapper(
self.connection, self,
NotSupportedError,
{
u'msg': u"callproc is not supported.",
u'errno': ER_UNSUPPORTED_METHOD,
u'sqlstate': SQLSTATE_FEATURE_NOT_SUPPORTED})
def close(self):
u"""
Closes the cursor object
"""
try:
if self.is_closed():
return False
with self._lock_canceling:
self.reset()
self._connection = None
del self.messages[:]
return True
except:
pass
def is_closed(self):
return self._connection is None or self._connection.is_closed()
def _execute_helper(
self, query, timeout=0, statement_params=None,
is_internal=False, _no_results=False, _is_put_get=None):
del self.messages[:]
if statement_params is not None and not isinstance(
statement_params, dict):
Error.errorhandler_wrapper(
self.connection, self,
ProgrammingError,
{
u'msg': u"The data type of statement params is invalid. "
u"It must be dict.",
u'errno': ER_INVALID_VALUE,
})
self._sequence_counter = self._connection._next_sequence_counter()
self._request_id = uuid.uuid4()
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug(
u'running query [%s]',
u' '.join(line.strip() for line in query.split(u'\n')),
)
if _is_put_get is not None:
# if told the query is PUT or GET, use the information
self._is_file_transfer = _is_put_get
else:
# or detect it.
self._is_file_transfer = self.PUT_SQL_RE.match(
query) or self.GET_SQL_RE.match(query)
self.logger.debug(u'is_file_transfer: %s',
self._is_file_transfer is not None)
real_timeout = timeout if timeout and timeout > 0 \
else self._connection.request_timeout
if real_timeout is not None:
self._timebomb = Timer(
real_timeout, self.__cancel_query, [query])
self._timebomb.start()
else:
self._timebomb = None
original_sigint = signal.getsignal(signal.SIGINT)
def abort_exit(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
if self._timebomb is not None:
self._timebomb.cancel()
self._timebomb = None
self.__cancel_query(query)
finally:
signal.signal(signal.SIGINT, original_sigint)
raise KeyboardInterrupt
try:
signal.signal(signal.SIGINT, abort_exit)
except ValueError:
self.logger.info(
u'Failed to set SIGINT handler. '
u'Not in main thread. Ignored...')
try:
ret = self._connection._cmd_query(
query,
self._sequence_counter,
self._request_id,
is_file_transfer=self._is_file_transfer,
statement_params=statement_params,
is_internal=is_internal,
_no_results=_no_results)
finally:
try:
signal.signal(signal.SIGINT, original_sigint)
except ValueError:
self.logger.info(
u'Failed to reset SIGINT handler. Not in main '
u'thread. Ignored...')
if self._timebomb is not None:
self._timebomb.cancel()
self.logger.debug(u'cancelled timebomb in finally')
if u'data' in ret and u'parameters' in ret[u'data']:
for kv in ret[u'data'][u'parameters']:
if u'TIMESTAMP_OUTPUT_FORMAT' in kv[u'name']:
self._timestamp_output_format = kv[u'value']
if u'TIMESTAMP_NTZ_OUTPUT_FORMAT' in kv[u'name']:
self._timestamp_ntz_output_format = kv[u'value']
if u'TIMESTAMP_LTZ_OUTPUT_FORMAT' in kv[u'name']:
self._timestamp_ltz_output_format = kv[u'value']
if u'TIMESTAMP_TZ_OUTPUT_FORMAT' in kv[u'name']:
self._timestamp_tz_output_format = kv[u'value']
if u'DATE_OUTPUT_FORMAT' in kv[u'name']:
self._date_output_format = kv[u'value']
if u'TIME_OUTPUT_FORMAT' in kv[u'name']:
self._time_output_format = kv[u'value']
if u'TIMEZONE' in kv[u'name']:
self._timezone = kv[u'value']
if u'BINARY_OUTPUT_FORMAT' in kv[u'name']:
self._binary_output_format = kv[u'value']
if u'CLIENT_RESULT_PREFETCH_THREADS' in kv[u'name']:
self._client_result_prefetch_threads = kv[u'value']
if u'CLIENT_RESULT_PREFETCH_SLOTS' in kv[u'name']:
self._client_result_prefetch_slots = kv[u'value']
self._connection.converter.set_parameters(
ret[u'data'][u'parameters'])
self._sequence_counter = -1
return ret
def execute(self, command, params=None, timeout=None,
_do_reset=True,
_put_callback=None,
_put_callback_output_stream=sys.stdout,
_get_callback=None,
_get_callback_output_stream=sys.stdout,
_statement_params=None,
_is_internal=False,
_no_results=False,
_use_ijson=False,
_is_put_get=None):
u"""
Executes a command/query
"""
self.logger.debug(u'executing SQL/command')
if self.is_closed():
Error.errorhandler_wrapper(
self.connection, self,
DatabaseError,
{u'msg': u"Cursor is closed in execute.",
u'errno': ER_CURSOR_IS_CLOSED})
if _do_reset:
self.reset()
command = command.strip(u' \t\n\r') if command else None
if not command:
self.logger.warning(u'execute: no query is given to execute')
return
processed_params = self.__process_params(params)
self.logger.debug(u'binding: %s with input=%s, processed=%s',
command,
params, processed_params)
if len(processed_params) > 0:
query = command % processed_params
else:
query = command
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug(
u'query: [%s]',
u' '.join(line.strip() for line in query.split(u'\n')))
m = DESC_TABLE_RE.match(query)
if m:
query1 = u'describe table {0}'.format(m.group(1))
if self.logger.getEffectiveLevel() <= logging.WARNING:
self.logger.warning(
u'query was rewritten: org=%s, new=%s',
u' '.join(line.strip() for line in query.split(u'\n')),
query1
)
query = query1
ret = self._execute_helper(query, timeout=timeout,
statement_params=_statement_params,
is_internal=_is_internal,
_no_results=_no_results,
_is_put_get=_is_put_get)
self._sfqid = ret[u'data'][
u'queryId'] if u'data' in ret and u'queryId' in ret[
u'data'] else None
self._sqlstate = ret[u'data'][
u'sqlState'] if u'data' in ret and u'sqlState' in ret[
u'data'] else None
self.logger.debug(u'sfqid=%s', self._sfqid)
if ret[u'success']:
self.logger.debug(u'SUCCESS')
data = ret[u'data']
if u'finalDatabaseName' in data:
self._connection._database = data[u'finalDatabaseName']
if u'finalSchemaName' in data:
self._connection._schema = data[u'finalSchemaName']
if u'finalWarehouseName' in data:
self._connection._warehouse = data[u'finalWarehouseName']
if u'finalRoleName' in data:
self._connection._role = data[u'finalRoleName']
# self.logger.debug(ret)
self.logger.debug(u"PUT OR GET: %s", self.is_file_transfer)
if self.is_file_transfer:
sf_file_transfer_agent = SnowflakeFileTransferAgent(
self, query, ret,
put_callback=_put_callback,
put_callback_output_stream=_put_callback_output_stream,
get_callback=_get_callback,
get_callback_output_stream=_get_callback_output_stream)
sf_file_transfer_agent.execute()
data = sf_file_transfer_agent.result()
self._total_rowcount = len(data[u'rowset']) if \
u'rowset' in data else -1
m = self.ALTER_SESSION_RE.match(query)
if m:
# session parameters
param = m.group(1).upper()
value = m.group(2)
self._connection.converter.set_parameter(param, value)
if _no_results:
self._total_rowcount = ret[u'data'][
u'total'] if u'data' in ret and u'total' in ret[
u'data'] else -1
return data
self.chunk_info(data, use_ijson=_use_ijson)
else:
self._total_rowcount = ret[u'data'][
u'total'] if u'data' in ret and u'total' in ret[u'data'] else -1
self.logger.info(u'failed')
self.logger.debug(ret)
err = ret[u'message']
code = ret[u'code'] if u'code' in ret else None
if u'data' in ret and u'errorMessage' in ret[u'data']:
err += ret[u'data'][u'errorMessage']
errvalue = {
u'msg': err,
u'errno': int(code),
u'sqlstate': self._sqlstate,
u'sfqid': self._sfqid
}
Error.errorhandler_wrapper(self.connection, self,
ProgrammingError,
errvalue)
return self
def _is_dml(self, data):
return u'statementTypeId' in data \
and int(data[u'statementTypeId']) in \
STATEMENT_TYPE_ID_DML_SET
def chunk_info(self, data, use_ijson=False):
is_dml = self._is_dml(data)
if self._total_rowcount == -1 and not is_dml and data.get(u'total') \
is not None:
self._total_rowcount = data['total']
self._description = []
self._column_idx_to_name = {}
self._column_converter = []
for idx, column in enumerate(data[u'rowtype']):
self._column_idx_to_name[idx] = column[u'name']
type_value = FIELD_NAME_TO_ID[column[u'type'].upper()]
self._description.append((column[u'name'],
type_value,
None,
column[u'length'],
column[u'precision'],
column[u'scale'],
column[u'nullable']))
self._column_converter.append(
self._connection.converter.to_python_method(
column[u'type'].upper(), column))
self._total_row_index = -1 # last fetched number of rows
self._chunk_index = 0
self._chunk_count = 0
self._current_chunk_row = iter(data.get(u'rowset'))
self._current_chunk_row_count = len(data.get(u'rowset'))
if u'chunks' in data:
chunks = data[u'chunks']
self._chunk_count = len(chunks)
self.logger.debug(u'chunk size=%s', self._chunk_count)
# prepare the downloader for further fetch
qrmk = data[u'qrmk'] if u'qrmk' in data else None
chunk_headers = None
if u'chunkHeaders' in data:
chunk_headers = {}
for header_key, header_value in data[
u'chunkHeaders'].items():
chunk_headers[header_key] = header_value
self.logger.debug(
u'added chunk header: key=%s, value=%s',
header_key,
header_value)
self.logger.debug(u'qrmk=%s', qrmk)
self._chunk_downloader = self._connection._chunk_downloader_class(
chunks, self._connection, self, qrmk, chunk_headers,
prefetch_slots=self._client_result_prefetch_slots,
prefetch_threads=self._client_result_prefetch_threads,
use_ijson=use_ijson)
if is_dml:
updated_rows = 0
for idx, desc in enumerate(self._description):
if desc[0] in (
u'number of rows updated',
u'number of multi-joined rows updated',
u'number of rows deleted') or \
desc[0].startswith(u'number of rows inserted'):
updated_rows += int(data[u'rowset'][0][idx])
if self._total_rowcount == -1:
self._total_rowcount = updated_rows
else:
self._total_rowcount += updated_rows
def query_result(self, qid, _use_ijson=False):
url = ('/queries/{qid}/result').format(qid=qid)
ret = self._connection._con.request(url=url, method='get')
self._sfqid = ret[u'data'][
u'queryId'] if u'data' in ret and u'queryId' in ret[
u'data'] else None
self._sqlstate = ret[u'data'][
u'sqlState'] if u'data' in ret and u'sqlState' in ret[
u'data'] else None
self.logger.debug(u'sfqid=%s', self._sfqid)
if ret.get(u'success'):
data = ret.get(u'data')
self.chunk_info(data, use_ijson=_use_ijson)
else:
self.logger.info(u'failed')
self.logger.debug(ret)
err = ret[u'message']
code = ret[u'code'] if u'code' in ret else None
if u'data' in ret and u'errorMessage' in ret[u'data']:
err += ret[u'data'][u'errorMessage']
errvalue = {
u'msg': err,
u'errno': int(code),
u'sqlstate': self._sqlstate,
u'sfqid': self._sfqid
}
Error.errorhandler_wrapper(self.connection, self,
ProgrammingError,
errvalue)
return self
def abort_query(self, qid):
url = '/queries/{qid}/abort-request'.format(qid=qid)
ret = self._connection._con.request(url=url, method='post')
return ret.get(u'success')
def executemany(self, command, seqparams):
u"""
Executes a command/query with the given set of parameters sequentially.
"""
self.logger.info(u'executing many SQLs/commands')
command = command.strip(u' \t\n\r') if command else None
if self.INSERT_SQL_RE.match(command):
self.logger.debug(u'rewriting INSERT query')
command_wo_comments = re.sub(self.COMMENT_SQL_RE, u'', command)
m = self.INSERT_SQL_VALUES_RE.match(command_wo_comments)
if not m:
errorvalue = {
u'msg': u"Failed to rewrite multi-row insert",
u'errno': ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT
}
Error.errorhandler_wrapper(
self.connection, self, InterfaceError, errorvalue
)
fmt = m.group(1)
values = []
for param in seqparams:
self.logger.debug(u'parameter: %s', param)
values.append(fmt % self.__process_params(param))
command = command.replace(fmt, u','.join(values), 1)
self.execute(command)
return self
self.reset()
for param in seqparams:
self.execute(command, param, _do_reset=False)
return self
def fetchone(self):
"""
Fetch one row
"""
try:
row = None
self._total_row_index += 1
try:
row = next(self._current_chunk_row)
except StopIteration:
if self._chunk_index < self._chunk_count:
self.logger.debug(
u"chunk index: %s, chunk_count: %s",
self._chunk_index, self._chunk_count)
next_chunk = self._chunk_downloader.next_chunk()
self._current_chunk_row_count = next_chunk.row_count
self._current_chunk_row = next_chunk.result_data
self._chunk_index += 1
try:
row = next(self._current_chunk_row)
except StopIteration:
raise IndexError
else:
if self._chunk_count > 0 and \
self._chunk_downloader is not None:
self._chunk_downloader.terminate()
self._chunk_downloader = None
self._chunk_count = 0
self._current_chunk_row = iter(())
return self._row_to_python(row) if row is not None else None
except IndexError:
# returns None if the iteration is completed so that iter() stops
return None
def fetchmany(self, size=None):
u"""
Fetch the number of specified rows
"""
if size is None:
size = self.arraysize
if size < 0:
errorvalue = {
u'msg': (u"The number of rows is not zero or "
u"positive number: {0}").format(
size),
u'errno': ER_NOT_POSITIVE_SIZE}
Error.errorhandler_wrapper(
self.connection, self, ProgrammingError, errorvalue)
ret = []
while size > 0:
row = self.fetchone()
if row is None:
break
ret.append(row)
if size is not None:
size -= 1
return ret
def fetchall(self):
u"""
Fetch all data
"""
ret = []
while True:
row = self.fetchone()
if row is None:
break
ret.append(row)
return ret
def nextset(self):
u"""
Not supporeted
"""
self.logger.info(u'nop')
return None
def setinputsizes(self, sizes):
u"""
Not supported
"""
del sizes
self.logger.info(u'nop')
def setoutputsize(self, size, column=None):
u"""
Not supported
"""
del column, size
self.logger.info(u'nop')
def scroll(self, value, mode=u'relative'):
Error.errorhandler_wrapper(
self.connection, self,
NotSupportedError,
{
u'msg': u"scroll is not supported.",
u'errno': ER_UNSUPPORTED_METHOD,
u'sqlstate': SQLSTATE_FEATURE_NOT_SUPPORTED})
def reset(self):
u"""
Reset the result set
"""
self._total_rowcount = -1 # reset the rowcount
self._total_row_index = -1 # last fetched number of rows
self._current_chunk_row_count = 0
self._current_chunk_row = iter(())
self._chunk_index = 0
if hasattr(self, u'_chunk_count') and self._chunk_count > 0 and \
self._chunk_downloader is not None:
self._chunk_downloader.terminate()
self._chunk_count = 0
self._chunk_downloader = None
def __iter__(self):
u"""
Iteration over the result set
"""
return iter(self.fetchone, None)
def __cancel_query(self, query):
if self._sequence_counter >= 0 and not self.is_closed():
self.logger.debug(u'canceled : %s, %s',
query, self._sequence_counter)
with self._lock_canceling:
self._connection._cancel_query(
query,
self._sequence_counter,
self._request_id)
def __process_params_dict(self, params):
try:
to_snowflake = self._connection.converter.to_snowflake
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k, v in params.items():
c = v
c = to_snowflake(c)
c = escape(c)
c = quote(c)
res[k] = c
self.logger.debug(u'parameters: %s', res)
return res
except Exception as e:
errorvalue = {
u'msg': u"Failed processing pyformat-parameters; {0}".format(
e),
u'errno': ER_FAILED_PROCESSING_PYFORMAT}
Error.errorhandler_wrapper(
self.connection, self, ProgrammingError, errorvalue)
def __process_params(self, params):
if params is None:
return {}
if isinstance(params, dict):
return self.__process_params_dict(params)
if not isinstance(params, (tuple, list)):
params = [params, ]
try:
res = params
res = map(self._connection.converter.to_snowflake, res)
res = map(self._connection.converter.escape, res)
res = map(self._connection.converter.quote, res)
ret = tuple(res)
self.logger.debug(u'parameters: %s', ret)
return ret
except Exception as e:
errorvalue = {
u'msg': u"Failed processing pyformat-parameters; {0}".format(
e),
u'errno': ER_FAILED_PROCESSING_PYFORMAT}
Error.errorhandler_wrapper(self.connection, self,
ProgrammingError,
errorvalue)
def _row_to_python(self, row):
"""
Converts data in row if required.
NOTE: surprisingly using idx+1 is faster than enumerate here. Also
removing generator improved performance even better.
"""
idx = 0
for col in row:
conv = self._column_converter[idx]
try:
row[idx] = col if conv is None or col is None else conv(col)
except Exception as e:
col_desc = self._description[idx]
msg = u'Failed to convert: ' \
u'field {name}: {type}::{value}, Error: ' \
u'{error}'.format(
name=col_desc[0],
type=FIELD_ID_TO_NAME[col_desc[1]],
value=col,
error=e
)
self.logger.exception(msg)
Error.errorhandler_wrapper(
self.connection, self, InterfaceError, {
u'msg': msg,
u'errno': ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
})
idx += 1
return tuple(row)
def __enter__(self):
"""
context manager
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
context manager with commit or rollback
"""
self.close()
class DictCursor(SnowflakeCursor):
"""
Cursor returning results in a dictionary
"""
def __init__(self, connection):
SnowflakeCursor.__init__(self, connection)
def _row_to_python(self, row):
# see the base class
res = {}
idx = 0
for col in row:
col_name = self._column_idx_to_name[idx]
conv = self._column_converter[idx]
try:
res[col_name] = col if conv is None or col is None else conv(col)
except Exception as e:
col_desc = self._description[idx]
msg = u'Failed to convert: ' \
u'field {name}: {type}::{value}, Error: ' \
u'{error}'.format(
name=col_desc[0],
type=FIELD_ID_TO_NAME[col_desc[1]],
value=col,
error=e
)
self.logger.exception(msg)
Error.errorhandler_wrapper(
self.connection, self, InterfaceError, {
u'msg': msg,
u'errno': ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
})
idx += 1
return res
|
|
# some methods should use 'put' instead of 'get'
# some seem to require 'delete' now?
# use the right (latest) version of this:
# http://s3.amazonaws.com/h2o-release/h2o-dev/master/1019/docs-website/REST/endpoints/markdown/toc.md
import os, sys, time, requests, zipfile, StringIO, re
import h2o_args
# from h2o_cmd import runInspect, infoFromSummary
import h2o_cmd, h2o_util, h2o_browse as h2b, h2o_sandbox
from h2o_objects import H2O
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors, get_sandbox_name, log
import urllib
def poll_job2(self, firstResult, algo=None, timeoutSecs=60, noPoll=False, **kwargs):
if noPoll:
result = firstResult
elif 'validation_error_count' in firstResult:
h2p.yellow_print("parameter error in %s" % algo)
result = firstResult
else:
job_result = result1['jobs'][0]
job_key = job_result['key']['name']
verboseprint("%s job_key: %s" % (algo, job_key))
job_result = self.poll_job(job_key, timeoutSecs=timeoutSecs)
verboseprint(job_result)
elapsed = time.time() - start
print algo, " end on ", training_frame, 'took', time.time() - start, 'seconds'
print "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
if job_result:
jobs = job_result['jobs'][0]
description = jobs['description']
dest = jobs['dest']
msec = jobs['msec']
status = jobs['status']
progress = jobs['progress']
if status=='FAILED':
print dump_json(job_result)
raise Exception("Taking exception on %s job status: %s %s %s %s" % \
(algo, status, progress, msec, description))
result = job_result
else:
raise Exception("build_model didn't get a job_result when it expected one")
verboseprint("result:", result)
h2o_sandbox.check_sandbox_for_errors()
return result
# This is done before import h2o_ray, which imports h2o_methods!
# ignoreNone is used if new = None shouldn't overwrite. Normally it does!
def check_params_update_kwargs(params_dict, kw, function, print_params, ignoreNone=False):
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k,v in kw.iteritems():
if k in params_dict:
if v or not ignoreNone:
# what if a type conversion happens here?
params_dict[k] = v
else:
raise Exception("illegal parameter '%s' with value '%s' in %s" % (k, v, function))
if print_params:
print "\n%s parameters:" % function, params_dict
sys.stdout.flush()
def get_cloud(self, noExtraErrorCheck=False, timeoutSecs=10):
# hardwire it to allow a 60 second timeout
a = self.do_json_request('1/Cloud.json', noExtraErrorCheck=noExtraErrorCheck, timeout=timeoutSecs)
# verboseprint(a)
version = a['version']
# local builds have (unknown) if not version.startswith('0'):
# local builds have (unknown) raise Exception("h2o version at node[0] doesn't look like h2o-dev version. (start with 0) %s" % version)
consensus = a['consensus']
locked = a['locked']
cloud_size = a['cloud_size']
cloud_name = a['cloud_name']
node_id = self.node_id
verboseprint('%s%s %s%s %s%s %s%s %s%s' % (
"\tnode_id: ", node_id,
"\tcloud_size: ", cloud_size,
"\tconsensus: ", consensus,
"\tlocked: ", locked,
"\tversion: ", version,
))
return a
def h2o_log_msg(self, message=None, timeoutSecs=15):
if not message:
message = "\n"
message += "\n#***********************"
message += "\npython_test_name: " + h2o_args.python_test_name
message += "\n#***********************"
params = {'message': message}
self.do_json_request('3/LogAndEcho.json', cmd='post', params=params, timeout=timeoutSecs)
# print "HACK: not doing 3/LogAndEcho.json"
def get_timeline(self):
return self.do_json_request('2/Timeline.json')
# Shutdown url is like a reset button. Doesn't send a response before it kills stuff
# safer if random things are wedged, rather than requiring response
# so request library might retry and get exception. allow that.
def shutdown_all(self):
try:
self.do_json_request('2/Shutdown.json', cmd='post', noExtraErrorCheck=True)
except:
print "Got exception on Shutdown.json. Ignoring"
pass
# don't want delayes between sending these to each node
# if you care, wait after you send them to each node
# Seems like it's not so good to just send to one node
# time.sleep(1) # a little delay needed?
return True
#*******************************************************************************
# examples from prithvi
# http://localhost:54321/Typeahead.json/files?src=?&limit=?
# http://localhost:54321/Typeahead.json/files?src=.%2Fsmalldata%2Fairlines%2F&limit=10
def typeahead(self, timeoutSecs=10, **kwargs):
params_dict = {
'src': None,
'limit': None,
}
check_params_update_kwargs(params_dict, kwargs, 'typeahead', print_params=True)
# odd ...needs /files
a = self.do_json_request('2/Typeahead.json/files', params=params_dict, timeout=timeoutSecs)
verboseprint("\ntypeahead result:", dump_json(a))
return a
#*******************************************************************************
def unlock (self, timeoutSecs=30, **kwargs):
a = self.do_json_request('2/UnlockKeys.json', params=None, timeout=timeoutSecs)
return a
# print "WARNING: faking unlock keys"
# pass
def remove_all_keys(self, timeoutSecs=120):
return self.do_json_request('1/RemoveAll.json', cmd='delete', timeout=timeoutSecs)
# ignore errors on remove..key might already be gone due to h2o removing it now after parse
def remove_key(self, key, timeoutSecs=120):
a = self.do_json_request('1/Remove.json',
params={"key": key}, ignoreH2oError=True, cmd='delete', timeout=timeoutSecs)
self.unlock()
return a
def jobs_admin(self, timeoutSecs=120, **kwargs):
params_dict = {
# 'expression': None,
}
params_dict.update(kwargs)
verboseprint("\njobs_admin:", params_dict)
a = self.do_json_request('3/Jobs.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_admin result:", dump_json(a))
# print "WARNING: faking jobs admin"
# a = { 'jobs': {} }
return a
#******************************************************************************************8
def put_file(self, f, key=None, timeoutSecs=60):
if key is None:
key = os.path.basename(f)
### print "putfile specifying this key:", key
fileObj = open(f, 'rb')
resp = self.do_json_request(
'3/PostFile.json',
cmd='post',
timeout=timeoutSecs,
params={"destination_key": key},
files={"file": fileObj},
extraComment=str(f))
verboseprint("\nput_file response: ", dump_json(resp))
fileObj.close()
return key
def csv_download(self, key, csvPathname, timeoutSecs=60, **kwargs):
params = {'key': key}
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
url = self.url('3/DownloadDataset.json')
log('Start ' + url + paramsStr, comment=csvPathname)
# do it (absorb in 1024 byte chunks)
r = requests.get(url, params=params, timeout=timeoutSecs)
print "csv_download r.headers:", r.headers
if r.status_code == 200:
f = open(csvPathname, 'wb')
for chunk in r.iter_content(1024):
f.write(chunk)
print csvPathname, "size:", h2o_util.file_size_formatted(csvPathname)
def log_view(self, timeoutSecs=10, **kwargs):
a = self.do_json_request('LogView.json', timeout=timeoutSecs)
verboseprint("\nlog_view result:", dump_json(a))
return a
def log_download(self, logDir=None, timeoutSecs=30, **kwargs):
if logDir == None:
logDir = get_sandbox_name()
url = self.url('LogDownload.json')
log('Start ' + url);
print "\nDownloading h2o log(s) using:", url
r = requests.get(url, timeout=timeoutSecs, **kwargs)
if not r or not r.ok:
raise Exception("Maybe bad url? no r in log_download %s in %s:" % inspect.stack()[1][3])
z = zipfile.ZipFile(StringIO.StringIO(r.content))
print "z.namelist:", z.namelist()
print "z.printdir:", z.printdir()
nameList = z.namelist()
# the first is the h2ologs dir name.
h2oLogDir = logDir + "/" + nameList.pop(0)
print "h2oLogDir:", h2oLogDir
print "logDir:", logDir
# it's a zip of zipped files
# first unzip it
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(logDir)
# unzipped file should be in LOG_DIR now
# now unzip the files in that directory
for zname in nameList:
resultList = h2o_util.flat_unzip(logDir + "/" + zname, logDir)
print "\nlogDir:", logDir
for logfile in resultList:
numLines = sum(1 for line in open(logfile))
print logfile, "Lines:", numLines
print
return resultList
#******************************************************************************************8
def inspect(self, key, offset=None, view=None, max_column_display=1000, ignoreH2oError=False,
timeoutSecs=30):
params = {
# "src_key": key,
"key": key,
"offset": offset,
# view doesn't exist for 2. let it be passed here from old tests but not used
}
a = self.do_json_request('1/Inspect.json',
params=params,
ignoreH2oError=ignoreH2oError,
timeout=timeoutSecs
)
return a
#******************************************************************************************8
def split_frame(self, timeoutSecs=120, noPoll=False, **kwargs):
params_dict = {
'dataset': None,
'ratios': None,
'destKeys': None, # ['bigger', 'smaller']
}
check_params_update_kwargs(params_dict, kwargs, 'split_frame', print_params=True)
firstResult = self.do_json_request('2/SplitFrame.json', cmd='post', timeout=timeoutSecs, params=params_dict)
print "firstResult:", dump_json(firstResult)
# FIX! what is ['dest']['name'] ..It's not there at the beginning?
job_key = firstResult['key']['name']
if noPoll:
h2o_sandbox.check_sandbox_for_errors()
return firstResult
# is it polllable while it's in the CREATED state? msec looks wrong. start_time is 0
time.sleep(2)
result = self.poll_job(job_key)
verboseprint("split_frame result:", dump_json(result))
return result
#******************************************************************************************8
def create_frame(self, timeoutSecs=120, noPoll=False, **kwargs):
# FIX! have to add legal params
params_dict = {
}
check_params_update_kwargs(params_dict, kwargs, 'create_frame', print_params=True)
firstResult = self.do_json_request('2/CreateFrame.json', cmd='post', timeout=timeoutSecs, params=params_dict)
job_key = firstResult['dest']['name']
if noPoll:
h2o_sandbox.check_sandbox_for_errors()
return firstResult
result = self.poll_job(job_key)
verboseprint("create_frame result:", dump_json(result))
return result
#******************************************************************************************8
def rapids(self, timeoutSecs=120, ignoreH2oError=False, **kwargs):
# FIX! assume both of these are strings for now, not lists
if 'ast' in kwargs and kwargs['ast'] is not None:
assert isinstance(kwargs['ast'], basestring), "only string assumed? %s" % kwargs['ast']
if 'funs' in kwargs and kwargs['funs'] is not None:
assert isinstance(kwargs['funs'], basestring), "only string assumed? %s" % kwargs['funs']
# currently runExec only does one or the other
params_dict = {
'ast': None,
'funs': None,
}
check_params_update_kwargs(params_dict, kwargs, 'rapids', True)
result = self.do_json_request('3/Rapids.json', cmd='post', timeout=timeoutSecs, postData=params_dict)
verboseprint("rapids result:", dump_json(result))
# FIX! maybe add something for ignoring conditionally?
if 'exception' in result and result['exception'] and not ignoreH2oError:
exception = result['exception']
raise Exception('rapids with kwargs:\n%s\ngot exception:\n"%s"\n' % (dump_json(kwargs), exception))
h2o_sandbox.check_sandbox_for_errors()
return result
#******************************************************************************************8
def rapids_iseval(self, timeoutSecs=120, ignoreH2oError=False, **kwargs):
# FIX! assume both of these are strings for now, not lists
if 'ast_key' in kwargs and kwargs['ast_key'] is not None:
assert isinstance(kwargs['ast_key'], basestring), "only string assumed? %s" % kwargs['ast_key']
# currently runExec only does one or the other
params_dict = {
'ast_key': None,
}
check_params_update_kwargs(params_dict, kwargs, 'rapids_iseval', True)
# doesn't like 'put' here?
# doesn't like empty key
result = self.do_json_request('3/Rapids.json/isEval', cmd='get', timeout=timeoutSecs, params=params_dict)
verboseprint("rapids_iseval result:", dump_json(result))
# FIX! maybe add something for ignoring conditionally?
if 'exception' in result and result['exception'] and not ignoreH2oError:
exception = result['exception']
raise Exception('rapids with kwargs:\n%s\ngot exception:\n"%s"\n' % (dump_json(kwargs), exception))
h2o_sandbox.check_sandbox_for_errors()
return result
#******************************************************************************************8
def quantiles(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'destination_key': None,
'training_frame': None,
'validation_frame': None,
'ignored_columns': None,
'score_each_iteration': None,
'probs': None,
}
check_params_update_kwargs(params_dict, kwargs, 'quantiles', print_params)
a = self.do_json_request('1/Quantiles.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nquantiles result:", dump_json(a))
h2o_sandbox.check_sandbox_for_errors()
return a
#******************************************************************************************8
# attach methods to H2O object
# this happens before any H2O instances are created
# this file is imported into h2o
# ray has jobs below..is this old?
H2O.jobs_admin = jobs_admin
H2O.get_cloud = get_cloud
H2O.shutdown_all = shutdown_all
H2O.h2o_log_msg = h2o_log_msg
H2O.inspect = inspect
H2O.quantiles = quantiles
H2O.rapids = rapids
H2O.rapids_iseval = rapids_iseval
H2O.unlock = unlock
H2O.typeahead = typeahead
H2O.get_timeline = get_timeline
H2O.split_frame = split_frame
H2O.create_frame = create_frame
H2O.log_view = log_view
H2O.log_download = log_download
H2O.csv_download = csv_download
H2O.put_file = put_file
H2O.remove_all_keys = remove_all_keys
H2O.remove_key = remove_key
# attach some methods from ray
import h2o_ray
H2O.jobs = h2o_ray.jobs
H2O.poll_job = h2o_ray.poll_job
H2O.import_files = h2o_ray.import_files
H2O.parse = h2o_ray.parse
H2O.frames = h2o_ray.frames
H2O.columns = h2o_ray.columns
H2O.column = h2o_ray.column
H2O.summary = h2o_ray.summary
H2O.delete_frame = h2o_ray.delete_frame
H2O.delete_frames = h2o_ray.delete_frames
H2O.model_builders = h2o_ray.model_builders
H2O.validate_model_parameters = h2o_ray.validate_model_parameters
H2O.build_model = h2o_ray.build_model
H2O.compute_model_metrics = h2o_ray.compute_model_metrics
H2O.predict = h2o_ray.predict
H2O.model_metrics = h2o_ray.model_metrics
H2O.models = h2o_ray.models
H2O.delete_model = h2o_ray.delete_model
H2O.delete_models = h2o_ray.delete_models
H2O.endpoints = h2o_ray.endpoints
H2O.endpoint_by_number = h2o_ray.endpoint_by_number
|
|
"""Attempts to figure out what version of things we're using in
playdoh-lib. This is made exceedingly difficult because of the
following things:
1. We install with pip to lib/python/, but nix the .egg-info files so
we have no clue what we installed unless the package also has the
version information tucked away somewhere.
This is dumb--we should stop doing this.
2. Some projects squirrel the version away in a place that's difficult
to pull out.
3. We have a few projects that apparently don't like doing version
releases.
Also interesting and vaguely related is that we're not including the
license this code that we're distributing is distributed under. That's
a huge license fail.
"""
import importlib
import logging
import os
import re
import site
import sys
import yaml
from victor import __version__
def fix_sys_path(cfg):
"""Adds sitedirs and moves packages to the beginning of sys.path
Uses "sitedirs" key in config which is a list of paths.
"""
if 'sitedirs' not in cfg:
return
prev_sys_path = list(sys.path)
for sitedir in cfg['sitedirs']:
site.addsitedir(sitedir)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
class NoVersion(Exception):
pass
def get_version_from_module(module_name):
mod = importlib.import_module(module_name)
# Try some possible version attributes
for version_string in ('__version__', 'VERSION', 'majorVersionId', 'ver'):
if hasattr(mod, version_string):
return getattr(mod, version_string)
raise NoVersion('{0}: {1}'.format(module_name, dir(mod)))
def get_version_from_requirement(line):
version_re = re.compile(
'^'
'([^=><]+)'
'(?:\\s*[=><]*\\s*([^=><]*?))?'
'$')
match = version_re.match(line)
return match.groups(0)
def get_version(module_name, verbosity=0):
if verbosity:
print '>>>', module_name
# Try importing various possible version modules
for version_module in (module_name,
'.'.join([module_name, '__version__']),
):
try:
return get_version_from_module(version_module)
except (NoVersion, ImportError):
logging.exception(version_module)
# There are a couple of packages that have setup.py **in** the
# source which is beyond bizarre, but whatevs.
try:
mod = importlib.import_module(module_name)
fp = open(os.path.join(os.path.dirname(mod.__file__), 'setup.py'), 'r')
for line in fp.readlines():
line = line.strip()
if line.startswith('version'):
line = line.split('=')
if len(line) > 1:
line = line[1].strip().strip('"\',')
return line
except (ImportError, IOError):
logging.exception(module_name)
raise NoVersion('{0}'.format(module_name))
def get_blacklist(cfg):
"""Returns list of blacklisted items
An item is in the blacklist because victor can't divine version
information for it. This could be because victor sucks or that the
item has no version information available. We keep it in the
blacklist because then we have a record that it's problematic.
Uses "sitedirs" key in config which is a list of paths.
"""
# This looks a little weird, but handles the None case, too.
return cfg.get('blacklist') or []
def load_cfg(cfg_fn):
return yaml.load(open(cfg_fn, 'rb'))
def cmdline_handler(scriptname, argv):
print '{0}: {1}'.format(scriptname, __version__)
logging.basicConfig(level=logging.CRITICAL)
cfg = load_cfg('victor.yaml')
logging.debug('Fixing sys path...')
fix_sys_path(cfg)
logging.debug('Getting blacklist...')
blacklist = get_blacklist(cfg)
logging.debug('Going through packagelist...')
package_to_version = {}
for mem in cfg.get('packagelist', []):
if mem.endswith(os.sep) or os.path.isdir(mem):
for mod in os.listdir(mem):
name, extension = os.path.splitext(mod)
if extension not in ('.py', ''):
logging.debug('skipping {0}: wrong file type'.format(mod))
continue
if mod in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mod))
continue
if mod.endswith('.py'):
mod = mod[:-3]
try:
version = get_version(mod)
package_to_version[mod] = version
except NoVersion:
package_to_version[mod] = 'NO VERSION'
elif mem.startswith('REQ '):
mem = mem[4:].strip()
fp = open(mem, 'r')
for mod in fp.readlines():
mod = mod.strip()
if not mod or mod.startswith('#'):
continue
mod, version = get_version_from_requirement(mod)
if mod in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mod))
continue
if mod.endswith('.py'):
mod = mod[:-3]
try:
version = get_version(mod)
package_to_version[mod] = version
except NoVersion:
package_to_version[mod] = 'NO VERSION'
else:
if mem in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mem))
continue
try:
version = get_version(mem)
package_to_version[mem] = version
except NoVersion:
package_to_version[mem] = 'NO VERSION'
print ''
print 'Versions:'
if package_to_version:
for key, val in sorted(package_to_version.items()):
print ' {0}: {1}'.format(key, val)
else:
print ' <None>'
print ''
print 'These have no discernable version:'
if blacklist:
for item in blacklist:
print ' {0}'.format(item)
else:
print ' <None>'
"""
bleach: 1.1.x (c381a)
commonware: 0.4.2 (b5544)
django-appconf: 0.5 (d7ff3)
django-compressor: 1.2a2 (90966)
django-cronjobs: (cfda8)
django-mobility: (644e0)
django-mozilla-product-details: (5a59a)
django-multidb-router: (7e608)
django-nose: 1.0 (83c78)
django-session-csrf: (f00ad)
django-sha2: (3ba2b)
funfactory: (faca9)
jingo: (1dc0e)
jingo-minify: (d2ff3)
nuggets: (ce506)
schematic: (e7499)
test-utils: (3c221)
tower: (6112e)
"""
|
|
import zlib
import logging
from .lzw import lzwdecode
from .ascii85 import ascii85decode
from .ascii85 import asciihexdecode
from .runlength import rldecode
from .ccitt import ccittfaxdecode
from .psparser import PSException
from .psparser import PSObject
from .psparser import LIT
from . import settings
from .utils import apply_png_predictor
from .utils import isnumber
import six #Python 2+3 compatibility
log = logging.getLogger(__name__)
LITERAL_CRYPT = LIT('Crypt')
# Abbreviation of Filter names in PDF 4.8.6. "Inline Images"
LITERALS_FLATE_DECODE = (LIT('FlateDecode'), LIT('Fl'))
LITERALS_LZW_DECODE = (LIT('LZWDecode'), LIT('LZW'))
LITERALS_ASCII85_DECODE = (LIT('ASCII85Decode'), LIT('A85'))
LITERALS_ASCIIHEX_DECODE = (LIT('ASCIIHexDecode'), LIT('AHx'))
LITERALS_RUNLENGTH_DECODE = (LIT('RunLengthDecode'), LIT('RL'))
LITERALS_CCITTFAX_DECODE = (LIT('CCITTFaxDecode'), LIT('CCF'))
LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
## PDF Objects
##
class PDFObject(PSObject):
pass
class PDFException(PSException):
pass
class PDFTypeError(PDFException):
pass
class PDFValueError(PDFException):
pass
class PDFObjectNotFound(PDFException):
pass
class PDFNotImplementedError(PDFException):
pass
## PDFObjRef
##
class PDFObjRef(PDFObject):
def __init__(self, doc, objid, _):
if objid == 0:
if settings.STRICT:
raise PDFValueError('PDF object id cannot be 0.')
self.doc = doc
self.objid = objid
#self.genno = genno # Never used.
return
def __repr__(self):
return '<PDFObjRef:%d>' % (self.objid)
def resolve(self, default=None):
try:
return self.doc.getobj(self.objid)
except PDFObjectNotFound:
return default
# resolve
def resolve1(x, default=None):
"""Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
return x
def resolve_all(x, default=None):
"""Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
if isinstance(x, list):
x = [resolve_all(v, default=default) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = resolve_all(v, default=default)
return x
def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, bytes):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [decipher_all(decipher, objid, genno, v) for v in x]
elif isinstance(x, dict):
for (k, v) in six.iteritems(x):
x[k] = decipher_all(decipher, objid, genno, v)
return x
# Type cheking
def int_value(x):
x = resolve1(x)
if not isinstance(x, int):
if settings.STRICT:
raise PDFTypeError('Integer required: %r' % x)
return 0
return x
def float_value(x):
x = resolve1(x)
if not isinstance(x, float):
if settings.STRICT:
raise PDFTypeError('Float required: %r' % x)
return 0.0
return x
def num_value(x):
x = resolve1(x)
if not isnumber(x):
if settings.STRICT:
raise PDFTypeError('Int or Float required: %r' % x)
return 0
return x
def str_value(x):
x = resolve1(x)
if not isinstance(x, six.binary_type):
if settings.STRICT:
raise PDFTypeError('String required: %r' % x)
return ''
return x
def list_value(x):
x = resolve1(x)
if not isinstance(x, (list, tuple)):
if settings.STRICT:
raise PDFTypeError('List required: %r' % x)
return []
return x
def dict_value(x):
x = resolve1(x)
if not isinstance(x, dict):
if settings.STRICT:
log.error('PDFTypeError : Dict required: %r', x)
raise PDFTypeError('Dict required: %r' % x)
return {}
return x
def stream_value(x):
x = resolve1(x)
if not isinstance(x, PDFStream):
if settings.STRICT:
raise PDFTypeError('PDFStream required: %r' % x)
return PDFStream({}, b'')
return x
## PDFStream type
##
class PDFStream(PDFObject):
def __init__(self, attrs, rawdata, decipher=None):
assert isinstance(attrs, dict), str(type(attrs))
self.attrs = attrs
self.rawdata = rawdata
self.decipher = decipher
self.data = None
self.objid = None
self.genno = None
return
def set_objid(self, objid, genno):
self.objid = objid
self.genno = genno
return
def __repr__(self):
if self.data is None:
assert self.rawdata is not None
return '<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs)
else:
assert self.data is not None
return '<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs)
def __contains__(self, name):
return name in self.attrs
def __getitem__(self, name):
return self.attrs[name]
def get(self, name, default=None):
return self.attrs.get(name, default)
def get_any(self, names, default=None):
for name in names:
if name in self.attrs:
return self.attrs[name]
return default
def get_filters(self):
filters = self.get_any(('F', 'Filter'))
params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {})
if not filters:
return []
if not isinstance(filters, list):
filters = [filters]
if not isinstance(params, list):
# Make sure the parameters list is the same as filters.
params = [params] * len(filters)
if settings.STRICT and len(params) != len(filters):
raise PDFException("Parameters len filter mismatch")
return list(zip(filters, params)) #solves https://github.com/pdfminer/pdfminer.six/issues/15
def decode(self):
assert self.data is None and self.rawdata is not None, str((self.data, self.rawdata))
data = self.rawdata
if self.decipher:
# Handle encryption
data = self.decipher(self.objid, self.genno, data, self.attrs)
filters = self.get_filters()
if not filters:
self.data = data
self.rawdata = None
return
for (f,params) in filters:
if f in LITERALS_FLATE_DECODE:
# will get errors if the document is encrypted.
try:
data = zlib.decompress(data)
except zlib.error as e:
if settings.STRICT:
raise PDFException('Invalid zlib bytes: %r, %r' % (e, data))
data = b''
elif f in LITERALS_LZW_DECODE:
data = lzwdecode(data)
elif f in LITERALS_ASCII85_DECODE:
data = ascii85decode(data)
elif f in LITERALS_ASCIIHEX_DECODE:
data = asciihexdecode(data)
elif f in LITERALS_RUNLENGTH_DECODE:
data = rldecode(data)
elif f in LITERALS_CCITTFAX_DECODE:
data = ccittfaxdecode(data, params)
elif f in LITERALS_DCT_DECODE:
# This is probably a JPG stream - it does not need to be decoded twice.
# Just return the stream to the user.
pass
elif f == LITERAL_CRYPT:
# not yet..
raise PDFNotImplementedError('/Crypt filter is unsupported')
else:
raise PDFNotImplementedError('Unsupported filter: %r' % f)
# apply predictors
if params and 'Predictor' in params:
pred = int_value(params['Predictor'])
if pred == 1:
# no predictor
pass
elif 10 <= pred:
# PNG predictor
colors = int_value(params.get('Colors', 1))
columns = int_value(params.get('Columns', 1))
bitspercomponent = int_value(params.get('BitsPerComponent', 8))
data = apply_png_predictor(pred, colors, columns, bitspercomponent, data)
else:
raise PDFNotImplementedError('Unsupported predictor: %r' % pred)
self.data = data
self.rawdata = None
return
def get_data(self):
if self.data is None:
self.decode()
return self.data
def get_rawdata(self):
return self.rawdata
|
|
"""
Test various information theory inequalities.
"""
from hypothesis import given, settings
import pytest
import numpy as np
from dit.utils.testing import distributions, markov_chains
from dit import ScalarDistribution as SD
from dit.divergences import (chernoff_information,
hellinger_distance,
hypercontractivity_coefficient,
maximum_correlation,
relative_entropy,
variational_distance,
)
from dit.helpers import normalize_pmfs
from dit.multivariate import (entropy as H,
total_correlation as I,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
)
epsilon = 1e-4
@given(dist=distributions())
def test_entropy_upper_bound(dist):
"""
H(X) <= log(|X|)
"""
h = H(dist)
logX = np.log2(len(dist.outcomes))
assert h <= logX + epsilon
@given(dist1=distributions(alphabets=(10,)), dist2=distributions(alphabets=(10,)))
def test_pinskers_inequality(dist1, dist2):
"""
DKL(p||q) >= V(p||q)**2 / (2log(2))
"""
dkl = relative_entropy(dist1, dist2)
vd = variational_distance(dist1, dist2)
assert dkl >= vd**2 / (2 * np.log(2)) - epsilon
@given(dist=distributions(alphabets=(10, 10), nondegenerate=True))
def test_fanos_inequality(dist):
"""
H(X|Y) <= hb(P_e) + P_e log(|X| - 1)
"""
dist1 = SD.from_distribution(dist.marginal([0]))
dist2 = SD.from_distribution(dist.marginal([1]))
ce = H(dist, [0], [1])
X = len(set().union(dist1.outcomes, dist2.outcomes))
eq_dist = dist1 == dist2
P_e = eq_dist[False] if False in eq_dist else 0
hb = H(SD([P_e, 1 - P_e]))
assert ce <= hb + P_e * np.log2(X - 1) + epsilon
@given(dist=distributions(alphabets=((2, 4),) * 4))
def test_entropy_subadditivity(dist):
"""
H(X1, ...) <= sum(H(X_i))
"""
h = H(dist)
h_sum = sum(H(dist.marginal(rv)) for rv in dist.rvs)
assert h <= h_sum + epsilon
@given(dist1=distributions(alphabets=(10,)), dist2=distributions(alphabets=(10,)))
def test_gibbs_inequality(dist1, dist2):
"""
DKL(p||q) >= 0
"""
dkl = relative_entropy(dist1, dist2)
assert dkl >= 0 - epsilon
@given(dist=distributions(alphabets=((2, 4),) * 2))
def test_conditional_entropy(dist):
"""
H(X|Y) <= H(X)
"""
ch = H(dist, [0], [1])
h = H(dist, [0])
assert ch <= h + epsilon
@given(dist=distributions(alphabets=((2, 4),) * 3))
def test_shannon_inequality(dist):
"""
I(X:Y|Z) >= 0
"""
i = I(dist, [[0], [1]], [2])
assert i >= 0 - epsilon
@given(dist=distributions(alphabets=((2, 4),) * 4))
def test_zhang_yeung_inequality(dist):
"""
2I(C:D) <= I(A:B)+I(A:CD)+3I(C:D|A)+I(C:D|B)
"""
I_a_b = I(dist, [[0], [1]])
I_c_d = I(dist, [[2], [3]])
I_a_cd = I(dist, [[0], [2, 3]])
I_c_d_g_a = I(dist, [[2], [3]], [0])
I_c_d_g_b = I(dist, [[2], [3]], [1])
assert 2 * I_c_d <= I_a_b + I_a_cd + 3 * I_c_d_g_a + I_c_d_g_b + epsilon
@given(dist=markov_chains(alphabets=((2, 4),) * 3))
def test_data_processing_inequality(dist):
"""
given X - Y - Z:
I(X:Z) <= I(X:Y)
"""
i_xy = I(dist, [[0], [1]])
i_xz = I(dist, [[0], [2]])
assert i_xz <= i_xy + epsilon
@given(dist=markov_chains(alphabets=((2, 4),) * 3))
def test_data_processing_inequality_mc(dist):
"""
given X - Y - Z:
rho(X:Z) <= rho(X:Y)
"""
rho_xy = maximum_correlation(dist, [[0], [1]])
rho_xz = maximum_correlation(dist, [[0], [2]])
assert rho_xz <= rho_xy + epsilon
@given(dist=markov_chains(alphabets=((2, 4),) * 3))
def test_data_processing_inequality_gk(dist):
"""
given X - Y - Z:
K(X:Z) <= K(X:Y)
"""
k_xy = K(dist, [[0], [1]])
k_xz = K(dist, [[0], [2]])
assert k_xz <= k_xy + epsilon
@pytest.mark.slow
@pytest.mark.flaky(reruns=5)
@given(dist=markov_chains(alphabets=(2,) * 3))
@settings(max_examples=5)
def test_data_processing_inequality_wyner(dist):
"""
given X - Y - Z:
C(X:Z) <= C(X:Y)
"""
c_xy = C(dist, [[0], [1]], niter=100)
c_xz = C(dist, [[0], [2]], niter=100)
assert c_xz <= c_xy + 10 * epsilon
@pytest.mark.slow
@pytest.mark.flaky(reruns=5)
@given(dist=markov_chains(alphabets=(2,) * 3))
@settings(max_examples=5)
def test_data_processing_inequality_exact(dist):
"""
given X - Y - Z:
G(X:Z) <= G(X:Y)
"""
g_xy = G(dist, [[0], [1]], niter=100)
g_xz = G(dist, [[0], [2]], niter=100)
assert g_xz <= g_xy + 10 * epsilon
@given(dist=distributions(alphabets=((2, 4),) * 2))
def test_max_correlation_mutual_information(dist):
"""
(p_min * rho(X:Y))^2 <= (2 ln 2)I(X:Y)
"""
p_min = dist.marginal([0]).pmf.min()
rho = maximum_correlation(dist, [[0], [1]])
i = I(dist, [[0], [1]])
assert (p_min * rho)**2 <= (2 * np.log(2)) * i + epsilon
@given(dist1=distributions(alphabets=(10,)), dist2=distributions(alphabets=(10,)))
def test_hellinger_variational(dist1, dist2):
"""
H^2(p||q) <= V(p||q) <= sqrt(2)*H(p||q)
"""
h = hellinger_distance(dist1, dist2)
v = variational_distance(dist1, dist2)
assert h**2 <= v + epsilon
assert v <= np.sqrt(2) * h + epsilon
@given(dist1=distributions(alphabets=(10,), nondegenerate=True),
dist2=distributions(alphabets=(10,), nondegenerate=True))
def test_chernoff_inequalities(dist1, dist2):
"""
1/8 sum p_i ((q_i - p_i)/max(p_i, q_i))^2 <= 1 - 2^(-C)
<= 1/8 sum p_i ((q_i - p_i)/min(p_i, q_i))^2
"""
p, q = normalize_pmfs(dist1, dist2)
pq = np.vstack([p, q])
c = chernoff_information(dist1, dist2)
a = (p * ((q - p) / pq.max(axis=0))**2).sum() / 8
b = (p * ((q - p) / pq.min(axis=0))**2).sum() / 8
assert a <= 1 - 2**(-c) + epsilon
assert 1 - 2**(-c) <= b + epsilon
@pytest.mark.slow
@given(dist=markov_chains(alphabets=(2,) * 3))
def test_mi_hc(dist):
"""
given U - X - Y:
I[U:Y] <= s*(X||Y)*I[U:X]
"""
a = I(dist, [[0], [2]])
b = hypercontractivity_coefficient(dist, [[1], [2]], niter=20)
c = I(dist, [[0], [1]])
assert a <= b * c + epsilon
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import concurrent.futures
from oslo_config import cfg
import six.moves
from testtools import matchers
import oslo_messaging
from oslo_messaging.tests.functional import utils
class CallTestCase(utils.SkipIfNoTransportURL):
def setUp(self):
super(CallTestCase, self).setUp(conf=cfg.ConfigOpts())
self.conf.prog = "test_prog"
self.conf.project = "test_project"
self.config(heartbeat_timeout_threshold=0,
group='oslo_messaging_rabbit')
def test_specific_server(self):
group = self.useFixture(utils.RpcServerGroupFixture(
self.conf, self.url)
)
client = group.client(1)
client.append(text='open')
self.assertEqual('openstack', client.append(text='stack'))
client.add(increment=2)
self.assertEqual(12, client.add(increment=10))
self.assertEqual(9, client.subtract(increment=3))
self.assertEqual('openstack', group.servers[1].endpoint.sval)
self.assertEqual(9, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client()
data = [c for c in 'abcdefghijklmn']
for i in data:
client.append(text=i)
for s in group.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
actual = [[c for c in s.endpoint.sval] for s in group.servers]
self.assertThat(actual, utils.IsValidDistributionOf(data))
def test_different_exchanges(self):
# If the different exchanges are not honoured, then the
# teardown may hang unless we broadcast all control messages
# to each server
group1 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url,
use_fanout_ctrl=True))
group2 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url, exchange="a",
use_fanout_ctrl=True))
group3 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url, exchange="b",
use_fanout_ctrl=True))
client1 = group1.client(1)
data1 = [c for c in 'abcdefghijklmn']
for i in data1:
client1.append(text=i)
client2 = group2.client()
data2 = [c for c in 'opqrstuvwxyz']
for i in data2:
client2.append(text=i)
actual1 = [[c for c in s.endpoint.sval] for s in group1.servers]
self.assertThat(actual1, utils.IsValidDistributionOf(data1))
actual1 = [c for c in group1.servers[1].endpoint.sval]
self.assertThat([actual1], utils.IsValidDistributionOf(data1))
for s in group1.servers:
expected = len(data1) if group1.servers.index(s) == 1 else 0
self.assertEqual(expected, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
actual2 = [[c for c in s.endpoint.sval] for s in group2.servers]
for s in group2.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
self.assertEqual(0, s.endpoint.ival)
self.assertThat(actual2, utils.IsValidDistributionOf(data2))
for s in group3.servers:
self.assertEqual(0, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
def test_timeout(self):
transport = self.useFixture(
utils.TransportFixture(self.conf, self.url)
)
target = oslo_messaging.Target(topic="no_such_topic")
c = utils.ClientStub(transport.transport, target, timeout=1)
self.assertThat(c.ping,
matchers.raises(oslo_messaging.MessagingTimeout))
def test_exception(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(1)
client.add(increment=2)
self.assertRaises(ValueError, client.subtract, increment=3)
def test_timeout_with_concurrently_queues(self):
transport = self.useFixture(
utils.TransportFixture(self.conf, self.url)
)
target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()),
server="server_" + str(uuid.uuid4()))
server = self.useFixture(
utils.RpcServerFixture(self.conf, self.url, target,
executor="threading"))
client = utils.ClientStub(transport.transport, target,
cast=False, timeout=5)
def short_periodical_tasks():
for i in range(10):
client.add(increment=1)
time.sleep(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future = executor.submit(client.long_running_task, seconds=10)
executor.submit(short_periodical_tasks)
self.assertRaises(oslo_messaging.MessagingTimeout, future.result)
self.assertEqual(10, server.endpoint.ival)
class CastTestCase(utils.SkipIfNoTransportURL):
# Note: casts return immediately, so these tests utilise a special
# internal sync() cast to ensure prior casts are complete before
# making the necessary assertions.
def test_specific_server(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(1, cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
client.sync()
group.sync(1)
self.assertEqual('openstack', group.servers[1].endpoint.sval)
self.assertEqual(12, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(cast=True)
for i in range(20):
client.add(increment=1)
for i in range(len(group.servers)):
# expect each server to get a sync
client.sync()
group.sync(server="all")
total = 0
for s in group.servers:
ival = s.endpoint.ival
self.assertThat(ival, matchers.GreaterThan(0))
self.assertThat(ival, matchers.LessThan(20))
total += ival
self.assertEqual(20, total)
def test_fanout(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client('all', cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
client.sync()
group.sync(server='all')
for s in group.servers:
self.assertEqual('openstack', s.endpoint.sval)
self.assertEqual(12, s.endpoint.ival)
class NotifyTestCase(utils.SkipIfNoTransportURL):
# NOTE(sileht): Each test must not use the same topics
# to be run in parallel
def test_simple(self):
listener = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test_simple']))
notifier = listener.notifier('abc')
notifier.info({}, 'test', 'Hello World!')
event = listener.events.get(timeout=1)
self.assertEqual('info', event[0])
self.assertEqual('test', event[1])
self.assertEqual('Hello World!', event[2])
self.assertEqual('abc', event[3])
def test_multiple_topics(self):
listener = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['a', 'b']))
a = listener.notifier('pub-a', topic='a')
b = listener.notifier('pub-b', topic='b')
sent = {
'pub-a': [a, 'test-a', 'payload-a'],
'pub-b': [b, 'test-b', 'payload-b']
}
for e in sent.values():
e[0].info({}, e[1], e[2])
received = {}
while len(received) < len(sent):
e = listener.events.get(timeout=1)
received[e[3]] = e
for key in received:
actual = received[key]
expected = sent[key]
self.assertEqual('info', actual[0])
self.assertEqual(expected[1], actual[1])
self.assertEqual(expected[2], actual[2])
def test_multiple_servers(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
if self.url.startswith("zmq:"):
self.skipTest("ZeroMQ-PUB-SUB")
listener_a = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test-topic']))
listener_b = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test-topic']))
n = listener_a.notifier('pub')
events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh']
for event_type, payload in events_out:
n.info({}, event_type, payload)
events_in = [[(e[1], e[2]) for e in listener_a.get_events()],
[(e[1], e[2]) for e in listener_b.get_events()]]
self.assertThat(events_in, utils.IsValidDistributionOf(events_out))
for stream in events_in:
self.assertThat(len(stream), matchers.GreaterThan(0))
def test_independent_topics(self):
listener_a = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['1']))
listener_b = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['2']))
a = listener_a.notifier('pub-1', topic='1')
b = listener_b.notifier('pub-2', topic='2')
a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh']
for event_type, payload in a_out:
a.info({}, event_type, payload)
b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop']
for event_type, payload in b_out:
b.info({}, event_type, payload)
for expected in a_out:
actual = listener_a.events.get(timeout=0.5)
self.assertEqual('info', actual[0])
self.assertEqual(expected[0], actual[1])
self.assertEqual(expected[1], actual[2])
self.assertEqual('pub-1', actual[3])
for expected in b_out:
actual = listener_b.events.get(timeout=0.5)
self.assertEqual('info', actual[0])
self.assertEqual(expected[0], actual[1])
self.assertEqual(expected[1], actual[2])
self.assertEqual('pub-2', actual[3])
def test_all_categories(self):
listener = self.useFixture(utils.NotificationFixture(
self.conf, self.url, ['test_all_categories']))
n = listener.notifier('abc')
cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical']
events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats]
for e in events:
e[0]({}, e[2], e[3])
# order between events with different categories is not guaranteed
received = {}
for expected in events:
e = listener.events.get(timeout=1)
received[e[0]] = e
for expected in events:
actual = received[expected[1]]
self.assertEqual(expected[1], actual[0])
self.assertEqual(expected[2], actual[1])
self.assertEqual(expected[3], actual[2])
def test_simple_batch(self):
listener = self.useFixture(
utils.BatchNotificationFixture(self.conf, self.url,
['test_simple_batch'],
batch_size=100, batch_timeout=2))
notifier = listener.notifier('abc')
for i in six.moves.range(0, 205):
notifier.info({}, 'test%s' % i, 'Hello World!')
events = listener.get_events(timeout=3)
self.assertEqual(3, len(events), events)
self.assertEqual(100, len(events[0][1]))
self.assertEqual(100, len(events[1][1]))
self.assertEqual(5, len(events[2][1]))
|
|
# -*- coding: utf-8 -*-
"""
Django settings for contmon project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import os
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('contmon')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'haystack',
'rest_framework',
'reversion',
'reversion_compare',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'contmon.users',
'contmon.content',
'contmon.scraper',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'contmon.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""daniel""", 'daniel@brandverity.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="mysql://root@localhost:3306/contmon"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'content:card-reviewer'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 150
}
|
|
#!/usr/bin/env python
import numpy as np
from dynamics import LinearDynamics
from distance import EuclideanDist
class FeatureBase(object):
def __init__(self):
pass
def __call__(self, *args):
return self.f(*args)
def f(self, *args):
raise Exception("method must be implemented by derived classes!")
def grad(self, *args):
raise Exception("method must be implemented by derived classes!")
def hessian(self, *args):
raise Exception("method must be implemented by derived classes!")
class Velocity(FeatureBase):
def f(self, x, u, xr, ur):
"""
:param x: Tx(|A|x|X|) matrix
:param u: Tx(|A|x|U|) matrix
"""
return np.sum(np.square(u))
def grad(self, x, u, xr, ur):
"""
:return: Tx|A|x|U| vector of the gradient with respect to u
"""
return 2.0 * u.flatten()
def hessian(self, x, u, xr, ur):
"""
:return: (Tx|A|x|U|)^2 matrix of the Hessian with respect to u
"""
return 2.0 * np.eye(u.size, dtype=float)
class Acceleration(FeatureBase):
def __init__(self, u0, dt):
super(Acceleration, self).__init__()
self.u0 = u0
self.dt = dt
def f(self, x, u, xr, ur):
acc = np.diff(u, axis=0) / self.dt
acc0 = (u[0] - self.u0) / self.dt
return np.sum(np.square(acc)) + np.sum(np.square(acc0))
def grad(self, x, u, xr, ur):
acc = np.diff(u, axis=0) / self.dt
acc0 = (u[0] - self.u0) / self.dt
acc_diff = np.diff(acc, axis=0)
return 2.0 / self.dt * np.vstack((acc0 - acc[0], -acc_diff, acc[-1])).flatten()
def hessian(self, x, u, xr, ur):
T, du = u.shape
s = (T - 1) * du
# main diagonal
main_diag = np.ones((s,), dtype=float) * 4.0
main_diag_end = np.ones((du,), dtype=float) * 2.0
main_diag = np.hstack((main_diag, main_diag_end)) / self.dt**2
# off diagonal
off_diag = np.ones((s,), dtype=float) * (-2.0) / self.dt**2
# hessian
return np.diag(main_diag) + np.diag(off_diag, k=du) + np.diag(off_diag, k=-du)
class GoalReward(FeatureBase):
def __init__(self, dyn, x_goal, R):
"""
Implements an exponetially decaying reward centered at goal position
:param dyn: Dynamic update function (dyn.compute() is assumed to be called already)
:param x_goal: Goals for each agent |A|x|X| matrix
:param R: Decaying radius
"""
super(GoalReward, self).__init__()
self.dyn = dyn
self.x_goal = x_goal
self.R2 = R**2
self.nA, self.nX = x_goal.shape
self.T = None
# save intermediate calculations
self.r_matrix = None
def f(self, x, u, xr, ur):
self.T = x.shape[0]
self.r_matrix = np.zeros((self.T, self.nA))
for a in range(self.nA):
xa = x[:, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
self.r_matrix[:, a] = np.exp(-np.sum(np.square(xa), axis=1) / self.R2)
return np.sum(self.r_matrix)
def grad(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(), self.grad_x(x, u, xr, ur))
def hessian(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(),
np.dot(self.hessian_x(x, u, xr, ur), self.dyn.jacobian()))
def grad_x(self, x, u, xr, ur):
# make sure that the intermediate calculation is there
self.f(x, u, xr, ur)
# calculate gradient
grad = np.zeros_like(x)
for a in range(self.nA):
xa = self.x_goal[a] - x[:, a*self.nX:(a+1)*self.nX]
# tmp0 = self.x_goal[a] - xa
# tmp = 2.0 / self.R2 * (self.x_goal[a] - xa)
grad[:, a*self.nX:(a+1)*self.nX] = \
self.r_matrix[:, a:(a+1)] * (2.0 / self.R2 * xa)
return grad.flatten()
def hessian_x(self, x, u, xr, ur):
# make sure that the intermediate calculation is there
self.f(x, u, xr, ur)
# calculate Hessian
hess = np.zeros((x.size, x.size))
for t in range(len(x)):
for a in range(self.nA):
hx = t * (self.nA * self.nX) + a * self.nX
x_ta = x[t, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
hess[hx:hx+self.nX, hx:hx+self.nX] = \
self.r_matrix[t, a] * 4.0 / self.R2**2 * np.outer(x_ta, x_ta)
hess[hx:hx+self.nX, hx:hx+self.nX] += \
-2.0 / self.R2 * self.r_matrix[t, a] * np.eye(self.nX)
return hess
class GoalRewardLinear(FeatureBase):
def __init__(self, dyn, x_goal):
"""
Implements an exponetially decaying reward centered at goal position
:param dyn: Dynamic update function (dyn.compute() is assumed to be called already)
:param x_goal: Goals for each agent |A|x|X| matrix
:param R: Decaying radius
"""
super(GoalRewardLinear, self).__init__()
self.dyn = dyn
self.x_goal = x_goal
self.nA, self.nX = x_goal.shape
self.T = None
# save intermediate calculations
self.r_matrix = None
def goal_dists(self, x):
self.T = x.shape[0]
d_matrix = np.zeros((self.T, self.nA))
for a in range(self.nA):
xa = x[:, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
d_matrix[:, a] = np.linalg.norm(xa, axis=1)
return d_matrix
def f(self, x, u, xr, ur):
d_matrix = self.goal_dists(x)
return np.sum(d_matrix)
def grad(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(), self.grad_x(x, u, xr, ur))
def hessian(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(),
np.dot(self.hessian_x(x, u, xr, ur), self.dyn.jacobian()))
def grad_x(self, x, u, xr, ur):
# make sure that the intermediate calculation is there
d_matrix = self.goal_dists(x)
# calculate gradient
grad = np.zeros_like(x)
for a in range(self.nA):
xa = x[:, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
grad[:, a*self.nX:(a+1)*self.nX] = xa / d_matrix[:, a:(a+1)]
return grad.flatten()
def hessian_x(self, x, u, xr, ur):
# make sure that the intermediate calculation is there
d_matrix = self.goal_dists(x)
# calculate Hessian
hess = np.zeros((x.size, x.size))
for t in range(len(x)):
for a in range(self.nA):
hx = t * (self.nA * self.nX) + a * self.nX
x_ta = x[t, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
hess[hx:hx+self.nX, hx:hx+self.nX] = -np.outer(x_ta, x_ta) / d_matrix[t, a]**3
hess[hx:hx+self.nX, hx:hx+self.nX] += 1.0 / d_matrix[t, a] * np.eye(self.nX)
return hess
class TerminationReward(FeatureBase):
def __init__(self, dyn, x_goal):
"""
Implements an exponetially decaying reward centered at goal position
:param dyn: Dynamic update function (dyn.compute() is assumed to be called already)
:param x_goal: Goals for each agent |A|x|X| matrix
:param R: Decaying radius
"""
super(TerminationReward, self).__init__()
self.dyn = dyn
self.x_goal = x_goal
self.nA, self.nX = x_goal.shape
self.T = None
def f(self, x, u, xr, ur):
T = x.shape[0]
goal_dist = np.linalg.norm(x[T-1] - self.x_goal)
return goal_dist
def grad(self, x, u, xr, ur):
T = x.shape[0]
grad_x = np.zeros_like(x[T-1])
for a in range(self.nA):
x_ta = x[T-1, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
goal_dist = np.linalg.norm(x_ta)
grad_x[a*self.nX:(a+1)*self.nX] = x_ta / goal_dist
J = self.dyn.jacobian()
return np.dot(J[(self.nX * self.nA * (T-1)):(self.nX * self.nA * T)].transpose(), grad_x)
def hessian(self, x, u, xr, ur):
T = x.shape[0]
hess_x = np.eye(self.nA * self.nX)
for a in range(self.nA):
x_ta = x[T-1, a*self.nX:(a+1)*self.nX] - self.x_goal[a]
goal_dist = np.linalg.norm(x_ta)
hess_x[self.nX*a:self.nX*(a+1), self.nX*a:self.nX*(a+1)] = -np.outer(x_ta, x_ta) / goal_dist**3
hess_x[self.nX*a:self.nX*(a+1), self.nX*a:self.nX*(a+1)] += 1.0 / goal_dist * np.eye(self.nX)
J = self.dyn.jacobian()
Jt = J[(self.nX * self.nA * (T-1)):(self.nX * self.nA * T)]
return np.dot(Jt.transpose(), np.dot(hess_x, Jt))
class CollisionHR(FeatureBase):
def __init__(self, dist_func, dyn):
super(CollisionHR, self).__init__()
self.dist_func = dist_func
self.dyn = dyn
self.dists = None
self.grad_d = None
self.grad_d_x = None
self.T = None
self.nA = None
self.nX = None
def f(self, x, u, xr, ur):
self.T, self.nX = xr.shape
self.nA = x.shape[1] / self.nX
self.dists = self.dist_func.compute(x, xr)
return np.sum(1.0 / self.dists)
def grad(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(), self.grad_x(x, u, xr, ur))
def hessian(self, x, u, xr, ur):
return np.dot(self.dyn.jacobian().transpose(),
np.dot(self.hessian_x(x, u, xr, ur), self.dyn.jacobian()))
def grad_dist(self, x, u, xr, ur):
self.dists = self.dist_func.compute(x, xr)
return -1.0 / self.dists**2
def hessian_dist(self, x, u, xr, ur):
self.dists = self.dist_func.compute(x, xr)
return 2.0 / self.dists**3
def grad_x(self, x, u, xr, ur):
grad = np.zeros_like(x)
self.grad_d = self.grad_dist(x, u, xr, ur)
self.grad_d_x = self.dist_func.grad()
if self.nA is None:
self.T, self.nX = xr.shape
self.nA = x.shape[1] / self.nX
for a in range(self.nA):
grad[:, a*self.nX:(a+1)*self.nX] = \
self.grad_d[:, a:(a+1)] * self.grad_d_x[:, a*self.nX:(a+1)*self.nX]
return grad.flatten()
def hessian_x(self, x, u, xr, ur):
# make sure that intermediate calculation is there
self.grad_d = self.grad_dist(x, u, xr, ur)
# calculate Hessian
hess = np.zeros((x.size, x.size))
hess_d = self.hessian_dist(x, u, xr, ur)
hess_d_x = self.dist_func.hessian()
for t in range(self.T):
for a in range(self.nA):
hx = t * (self.nA * self.nX) + a * self.nX
grad_dx_ta = self.grad_d_x[t, a*self.nX:(a+1)*self.nX]
hess_dx_ta = hess_d_x[t*self.nX:(t+1)*self.nX, a*self.nX:(a+1)*self.nX]
hess[hx:hx+self.nX, hx:hx+self.nX] = \
np.outer(grad_dx_ta, grad_dx_ta) * hess_d[t, a] + \
hess_dx_ta * self.grad_d[t, a]
return hess
# TODO: implement human-human collision avoidance feature
# TODO: implement human-static obstacle collision avoidance feature
# test the features
if __name__ == "__main__":
# generate a set of motions
x0_human = np.array([0.0, 0.0])
x_goal_human = np.array([[0.0, 7.5]])
x0_robot = np.array([-2.0, 4.0])
x_goal_robot = np.array([3.0, 4.0])
dt = 1.0
t_end = 10.0
T = int(t_end / dt)
vel_human = np.linalg.norm(x_goal_human - x0_human) / t_end
vel_robot = np.linalg.norm(x_goal_robot - x0_robot) / t_end
u_h = vel_human * np.hstack((np.zeros((T, 1)), np.ones((T, 1))))
u_r = vel_robot * np.hstack((np.ones((T, 1)), np.zeros((T, 1))))
dyn = LinearDynamics(dt)
dyn_r = LinearDynamics(dt)
dyn.compute(x0_human, u_h)
dyn_r.compute(x0_robot, u_r)
xh = dyn.traj()
xr = dyn_r.traj()
np.set_printoptions(precision=3)
np.set_printoptions(linewidth=np.nan)
# create the features
# velocity feature
f_vel = Velocity()
print "velocity feature: ", f_vel(xh, u_h, xr, u_r)
print "velocity feature gradient: \n", f_vel.grad(xh, u_h, xr, u_r)
print "velocity feature Hessian: \n", f_vel.hessian(xh, u_h, xr, u_r)
# goal reward
R = np.linalg.norm(x_goal_human - x0_human)
f_goal = GoalReward(dyn, x_goal_human, R)
print "goal reward feature: ", f_goal(xh, u_h, xr, u_r)
print "goal reward feature gradient: \n", f_goal.grad(xh, u_h, xr, u_r)
print "goal reward feature Hessian: \n", f_goal.hessian(xh, u_h, xr, u_r)
# collision avoidance
dist_func = EuclideanDist()
f_collision = CollisionHR(dist_func, dyn)
print "collision feature: ", f_collision(xh, u_h, xr, u_r)
print "collision feature gradient: \n", f_collision.grad(xh, u_h, xr, u_r)
print "collision feature Hessian: \n", f_collision.hessian(xh, u_h, xr, u_r)
|
|
# -*- coding: UTF-8 -*-
## Base modules
import numpy as np
import os, re
import pandas as pd
import zipfile
zf = zipfile.ZipFile( './train.csv.zip' )
df = pd.read_csv( zf.open( 'train.csv' ) )#, nrows = 1000 )
## Take a random small subsample of the train data
from sklearn.cross_validation import train_test_split
## Get the full train data and tplit it in halves
X_train_full, y_train_full = df[df.columns[1:-1]], df[df.columns[-1]]
X_train_0, X_train_1, y_train_0, y_train_1 = train_test_split( X_train_full, y_train_full, train_size = 0.50 )
#### Making an ensemble classifier
# from sklearn.metrics import log_loss
from sklearn.grid_search import GridSearchCV
def scores_to_df( grid ) :
res = pd.DataFrame( par[ 0 ] for par in grid )
res[ 'metric' ] = pd.DataFrame( par[ 1 ] for par in grid )
return res
## The strategy is to use as many classifiers as possible in hope that each one could
## capture some hidden structural aspects of the data so that together the classifiers
## would have better performance.
ensemble_clf = list( )
## 1. LogisticRegression
from sklearn.linear_model import LogisticRegression
logreg_grid = GridSearchCV( LogisticRegression( multi_class = "ovr" ), param_grid = {
"C" : np.logspace( -1, 2, num = 4 ),
}, cv = 10, n_jobs = -1, verbose = 50, scoring = "log_loss" ).fit( X_train_0, y_train_0 )
ensemble_clf.append( ( "Logistic", logreg_grid.best_estimator_ ) )
scores_to_df( logreg_grid.grid_scores_ )
## 2.Random Forest
from sklearn.ensemble import RandomForestClassifier
rf_grid = GridSearchCV( RandomForestClassifier( n_estimators = 256 ), param_grid = {
## max_depth -- the maximum allowed number of levels in the decision tree.
"max_depth" : [ 1, 3, 5, 7, 10, ],
}, cv = 10, scoring = 'log_loss', verbose = 10 ).fit( X_train_0, y_train_0 )
ensemble_clf.append( ( "Forest", rf_grid.best_estimator_ ) )
scores_to_df( rf_grid.grid_scores_ )
## 3. k - nearest neighbour
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier( n_neighbors = 2 ).fit( X_train_0, y_train_0 )
## Survey the data landscape with the Nearest Neighbours Classifiers
knn_clf = [
( "knn-%d" % ( n_neighbors, ), KNeighborsClassifier( n_neighbors = n_neighbors ).fit( X_train_0, y_train_0 ) )
for n_neighbors in [ 2, 8, 32, 128, 512, ] ]
ensemble_clf.extend( knn_clf )
## 4. XGboost
from xgboost import XGBClassifier
xgb_clf = XGBClassifier( )
## show log loss
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer( ).fit( df[ df.columns[ -1 ] ] )
y_test_ovr = lb.transform( y_train_1 )
for name, clf in ensemble_clf :
theta = clf.predict_proba( X_train_1 )
A = np.tensordot( y_test_ovr, np.log( np.clip( theta, 1e-15, 1-1e-15) ), ( 0, 0 ) )
logloss = -np.sum( np.diag( A ), dtype = np.float ) / y_test_ovr.shape[ 0 ]
print "%s: logLoss %.5f" % ( name, logloss, )
## Make a cube of predicted probabilities NxKxC
proba_cube = np.zeros( ( y_train_1.shape[0], len( lb.classes_ ), len( ensemble_clf ), ), dtype = np.float )
for k, ( name, clf ) in enumerate( ensemble_clf[:2] ) :
proba_cube[:,:,k] = clf.predict_proba( X_train_1 )
pass
print proba_cube.sum( axis = 1 )
## Combine the classifiers by a uniform weight
weight = np.asarray( [ .5, .5 ], dtype = np.float )
theta = np.tensordot( weight, proba_cube, (0, 2) )
print -np.sum( np.diag( A ), dtype = np.float ) / y_test_ovr.shape[ 0 ]
####################################################################################################
# from sklearn.metrics import log_loss
## ## Encode the class assignments as a 1-of-all vector
## from sklearn.preprocessing import LabelBinarizer
## lb = LabelBinarizer( ).fit( df[ df.columns[ -1 ] ] )
## ## Compute the logloss
## e_hat = lb.transform( y_hat )
## A = np.tensordot( e_hat, np.log( proba ), ( 0, 0 ) )
## logloss = -np.sum( np.diag( A ) / e_hat.shape[ 0 ] )
## Predict the class labels
proba = lreg.predict_proba( X_test )
## Compute the logloss
e_test = lb.transform( y_test )
## Logloss = -\frac{1}{N} \sum_{i=1}^N \sum_{k=1}^K t_{ik} \log \hat{p}_{ik}
## = - \sum_{k=1}^K \frac{1}{N} A_{kk}, where
## A_{jk} =\sum_{i=1}^N t_{ij} \log \hat{p}_{ik}
A = np.tensordot( e_test, np.log( proba ), ( 0, 0 ) )
logloss = -np.sum( np.diag( A ) / e_test.shape[ 0 ] )
####################################################################################################
## Encode the class assignments as a 1-of-all vector
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer( ).fit( df[ df.columns[ -1 ] ] )
## Train a set of logixtic regressions with 1-vs-ALL classification
from sklearn.linear_model import LogisticRegression
lreg = LogisticRegression( multi_class = 'ovr' ).fit( X_train_full, y_train_full )
## Load the test sample
zf = zipfile.ZipFile( './test.csv.zip' )
df_test = pd.read_csv( zf.open( 'test.csv' ) ) #, nrows = 100 )
## Get the independent variables
X_test = df_test[df.columns[1:-1]]
proba = lreg.predict_proba( X_test )
y_hat = lreg.predict( X_test )
## Compute the logloss
e_hat = lb.transform( y_hat )
A = np.tensordot( e_hat, np.log( proba ), ( 0, 0 ) )
logloss = -np.sum( np.diag( A ) / e_hat.shape[ 0 ] )
result = pd.DataFrame( { df.columns[ 0 ] : df_test[ df.columns[ 0 ] ] } )
result[ lb.classes_ ] = pd.DataFrame( { k : e_hat[:,j] for j, k in enumerate( lb.classes_, 0 ) } )
result.to_csv( "./sampleSubmission.csv", index = False )
####################################################################################################
from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import AdaBoostClassifier
from sklearn.grid_search import GridSearchCV
rf_grid = GridSearchCV( RandomForestClassifier( n_estimators = 256 ), cv = 10,
param_grid = { "max_depth": [ 3, 5, 12, 25, ], }, verbose = 10, n_jobs = -1 ).fit( X_train, y_train )
clf = rf_grid.best_estimator_.fit( X_train, y_train )
y_hat = clf.predict( X_test )
##################
zf = zipfile.ZipFile( './test.csv.zip' )
df_test = pd.read_csv( zf.open( 'test.csv' ) ) #, nrows = 100 )
y_test_hat = clf.predict( df_test[df.columns[1:-1]] )
result = pd.DataFrame( { df.columns[ 0 ] : df_test[ df.columns[ 0 ] ] } )
class_labels = np.unique( df[ "target" ].values )
result[ class_labels ] = pd.DataFrame( { k : (y_test_hat == k)*1 for k in class_labels } )
result.to_csv( "./out.csv", index = False )
###################
## EM algorithm ##
import scipy as sp
theta_init = np.random.gamma( 1.0, size = ( X_train.shape[ 1 ], 9 ) )
theta_init /= np.sum( theta_init, axis = 0 ).reshape( ( 1, -1 ) )
##
sp.special.gammaln( .5 )
####################################################################################################
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer( ).fit( df[df.columns[-1]] )
## Convert to 1-of-all encoding
t_train = lb.transform( y_train )
t_test = lb.transform( y_test )
class_freq = np.tensordot( X_train, t_train, ( 0, 0 ) )
pi = np.sum( t_train, axis = 0, dtype = np.float ).reshape( ( 1, -1 ) ) / t_train.shape[ 0 ]
################################# (Categorical) Multinomial model #################################
theta = class_freq / np.sum( class_freq, axis = 0, dtype = np.float ).reshape( ( 1, -1 ) )
ll_mult = np.dot( X_test, np.log( theta ) ) + np.log( pi )
t_hat_mult = np.argmax( ll_mult, axis = 1 )
################################# (Categorical) Multinomial model #################################
feature_rate = class_freq / np.sum( t_train, axis = 0, dtype = np.float ).reshape( ( 1, -1 ) )
ll_pois = np.dot( X_test, np.log( feature_rate ) ) - np.sum( feature_rate, axis = 0 ) + np.log( pi )
t_hat_pois = np.argmax( ll_pois, axis = 1 )
cl_hat_pois = lb.classes_[t_hat_pois]
np.sum( cl_hat_pois == y_test )
####################################################################################################
np.unique( t_hat_mult, return_counts = True )
np.unique( t_hat_pois, return_counts = True )
####
from sklearn.neighbors import KNeighborsClassifier
from sklearn.grid_search import GridSearchCV
knn_grid = GridSearchCV( KNeighborsClassifier( ), cv = 10, n_jobs = -1, verbose = 10,
param_grid = { "n_neighbors" : [ 1, 3, 5, 12, 25, 60 ] } ).fit( X_train, y_train )
knn_clf = knn_grid.best_estimator_.fit( X_train, y_train )
y_hat = knn_clf.predict( X_test )
## Make the confusion matrix
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder( ).fit( df[df.columns[-1]].values )
i_test, i_hat = le.transform( y_test ), le.transform( y_hat )
confusion = np.zeros( 2 * [ len( le.classes_ ) ], dtype = np.int )
for i in range( confusion.shape[0] ) :
j, f = np.unique( i_test[ i_hat == i ], return_counts = True )
confusion[ i, j ] = f
print confusion
from sklearn.pipeline import Pipeline, FeatureUnion
## Survey the data landscape with the Nearest Neighbours Classifiers
knn_clf = [
( "knn-%d" % ( n_neighbors, ), KNeighborsClassifier( n_neighbors = n_neighbors ).fit( X_train, y_train ) )
for n_neighbors in [ 2, 4, 8, 16, 32, 64, 128, 256 ] ]
nn_features = pd.DataFrame( { name : knn.predict( X_train ) for name, knn in knn_clf } )
X_train[nn_features.columns] = nn_features
pipeline = Pipeline( [
( "features", combined_features ),
## Use rbf-kernel svm
# ( "svm", SVC( kernel = "linear" ) ),
( "svm", SVC( kernel = "rbf" ) ),
# ( "forest", RandomForestClassifier( ) ),
] )
le = LabelEncoder( ).fit( df[df.columns[-1]].values )
i_train, i_hat = le.transform( y_train ), le.transform( nn_features.values[:,8] )
confusion = np.zeros( 2 * [ len( le.classes_ ) ], dtype = np.int )
for i in range( confusion.shape[0] ) :
j, f = np.unique( i_train[ i_hat == i ], return_counts = True )
confusion[ i, j ] = f
print confusion
import numpy as np
import scipy.sparse as sp
A = sp.csr_matrix( [
[0,1,0,0,1,0],
[0,0,1,0,0,1],
[0,0,0,1,1,0],
[0,0,0,0,1,0],
[0,1,0,0,0,0],
[0,0,0,0,0,0], ], shape = ( 6, 6 ), dtype = np.float )
## Out-degree
out_deg = np.asarray( A.sum( axis = 1 ).getA1( ), dtype = np.float )
dangling = np.where( out_deg == 0 )[ 0 ]
out_deg[ dangling ] = 1.0
beta = 0.85
E = np.full( A.shape[ 0 ], 1.0 / A.shape[ 0 ], dtype = np.float )
## Since matrix is a linear operator and the eigenvalues we seek is one, the requirement
## that the scores vector sum to one is automatically stisfied once it has been imposed.
x_0 = E.copy( )
x_0 = beta * ( x_0 / out_deg ) * A + beta * np.sum( x_0[ dangling ] ) * E + ( 1 - beta ) * np.sum( x_0[ 0 ] ) * E
## D = \text{diag}\bigl( \delta^+_v + 1_{\delta^+_v=0} \bigr)_{v\in V}\,,
## d = \bigl( 1_{\delta^+_v=0} \bigr)_{v\in V}\,, the indicator vector of dangling vertices
## Q = D^{-1} A + d ( 1'1 )^{-1} 1'\,,
## The matrix with teleportation option:
## M = \beta Q + ( 1 - \beta ) 1 ( 1'1 )^{-1} 1'\,.
## For a personalized pagerank for w\in V use e_w = ( 1_{v=w} )_{v\in V}\,,
## ... + ( 1 - \beta ) 1 (1' e_w)^{-1} e_w'\,.
## Y = X M = \beta X Q + (1-\beta) X 1 (1'1)^{-1} 1'
## = \beta X D^{-1} A + \beta X d ( 1'1 )^{-1} 1' + (1-\beta) X 1 (1'1)^{-1} 1'
def sparse_pagerank( A, beta = 0.85, one = None, niter = 1000, rel_eps = 1e-6 ) :
## Initialize the iterations
one = one if one is not None else np.ones( ( 1, A.shape[ 0 ] ), dtype = np.float )
one = sp.csr_matrix( one / one.sum( axis = 1 ) )
## Get the out-degree
out = np.asarray( A.sum( axis = 1 ).getA1( ), dtype = np.float )
## Obtain the mask of dangling vertices
dangling = np.where( out == 0.0 )[ 0 ]
## Correct the out-degree for sink nodes
out[ dangling ] = 1.0
## Just one iteration: all dangling nodes add to the importance of all vertices.
pi = np.full( ( one.shape[0], A.shape[0] ), 1.0 / A.shape[ 0 ], dtype = np.float )
## If there are no dangling vertices then use simple iterations
kiter, status = 0, -1
## Make a stochastic matrix
P = sp.diags( 1.0 / out, 0, dtype = np.float ).dot( A ).tocsc( )
while kiter < niter :
## make a copy of hte current ranking estimates
pi_last = pi.copy( )
## Use sparse inplace operations for speed. Firstt the random walk part
pi *= beta ; pi *= P
## Now the teleportaiton ...
pi += ( 1 - beta ) * one
## ... and dangling vertices part
if len( dangling ) > 0 :
pi += beta * one.multiply( np.sum( pi_last[ :, dangling ], axis = 1 ).reshape( ( -1, 1 ) ) )
## Normalize
pi /= np.sum( pi, axis = 1 )
if np.sum( np.abs( pi - pi_last ) ) <= one.shape[0] * rel_eps * np.sum( np.abs( pi_last ) ) :
status = 0
break
## Next iteration
kiter += 1
if kiter % 10 == 0 :
print kiter
return pi, status, kiter
pi1, s, k = sparse_pagerank( A, one = None )
one = sp.eye( A.shape[ 0 ] ).tocsr()[:100]
pi2, s, k = sparse_pagerank( A, one = one )
one = sp.eye( A.shape[ 0 ] ).tocsr()[:100]
pi3, s, k = sparse_pagerank( A, one = one )
x0 = np.full( A.shape, 1.0 / A.shape[ 0 ], dtype = np.float )
for i in range( 550 ):
x0 = beta * ( x0 / out ) * A + ( beta * np.sum( x0[ :, dangling ], axis = 1 ).reshape( ( -1, 1 ) ) + ( 1 - beta ) ) * one
## One iteration of the power iterations method
x1 = beta * ( x0 / out ) * A + ( beta * np.sum( x0[ dangling ], axis = ) + ( 1 - beta ) ) * one
x1 = beta * ( x0 / out ) * A + ( beta * np.dot( x0.T, d ) + ( 1 - beta ) * np.dot( x0.T, one ) ) * one.T
pass
import networkx as nx
G = nx.from_scipy_sparse_matrix( A, create_using = nx.DiGraph( ) )
nx.pagerank_scipy( G )
|
|
#!/usr/bin/env python
# This script is used to update the version of mbed-os used within a specified set of example
# applications. The list of examples to be updated lives in the examples.json file and is
# shared with the examples.py script. Logging is used to provide varying levels of output
# during execution.
#
# There are two modes that can be used:
# 1) Update the ARMmbed/master branch of the specified example
#
# This is done by updating a user fork of the example and then raising a pull request
# against ARMmbed/master.
#
# 2) Update a different ARMmbed branch of the specified example
#
# A branch to update is specified. If it doesn't already exist then it is first created.
# This branch will be updated and the change automatically pushed. The new branch will
# be created from the specified source branch.
#
# The modes are controlled via configuration data in the json file.
# E.g.
#
# "update-config" : {
# "help" : "Update each example repo with a version of mbed-os identified by the tag",
# "via-fork" : {
# "help" : "-f cmd line option. Update a fork",
# "github-user" : "adbridge"
# },
# "via-branch" : {
# "help" : "-b cmd line option. Update dst branch, created from src branch",
# "src-branch" : "mbed-os-5.5.0-rc1-oob",
# "dst-branch" : "mbed-os-5.5.0-rc2-oob"
# },
# "tag" : "mbed-os-5.5.0-rc2"
#
#
# Command usage:
#
# update.py -c <config file> - T <github_token> -l <logging level> -f -b
#
# Where:
# -c <config file> - Optional path to an examples file.
# If not proved the default is 'examples.json'
# -T <github_token> - GitHub token for secure access (required)
# -l <logging level> - Optional Level for providing logging output. Can be one of,
# CRITICAL, ERROR, WARNING, INFO, DEBUG
# If not provided the default is 'INFO'
# -f - Update forked repos. This will use the 'github-user' parameter in
# the 'via-fork' section.
# -b - Update branched repos. This will use the "src-branch" and
# "dst-branch" parameters in the 'via-branch' section. The destination
# branch is created from the source branch (if it doesn't already exist).
#
# The options -f and -b are mutually exlusive. Only one can be specified.
#
#
import os
from os.path import dirname, abspath, basename, join
import sys
import logging
import argparse
import json
import subprocess
import shutil
import stat
import re
from github import Github, GithubException
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
sys.path.insert(0, ROOT)
import examples_lib as lib
from examples_lib import SUPPORTED_TOOLCHAINS
def run_cmd(command, exit_on_failure=False):
""" Run a system command and return the result status
Description:
Passes a command to the system and returns a True/False result, once the
command has been executed, indicating success/failure. Commands are passed
as a list of tokens.
E.g. The command 'git remote -v' would be passed in as ['git', 'remote', '-v']
Args:
command - system command as a list of tokens
exit_on_failure - If True exit the program on failure (default = False)
Returns:
return_code - True/False indicating the success/failure of the command
"""
update_log.debug('[Exec] %s', ' '.join(command))
return_code = subprocess.call(command, shell=True)
if return_code:
update_log.warning("Command '%s' failed with return code: %s",
' '.join(command), return_code)
if exit_on_failure:
sys.exit(1)
return return_code
def run_cmd_with_output(command, exit_on_failure=False):
""" Run a system command and return the result status plus output
Description:
Passes a command to the system and returns a True/False result once the
command has been executed, indicating success/failure. If the command was
successful then the output from the command is returned to the caller.
Commands are passed as a list of tokens.
E.g. The command 'git remote -v' would be passed in as ['git', 'remote', '-v']
Args:
command - system command as a list of tokens
exit_on_failure - If True exit the program on failure (default = False)
Returns:
returncode - True/False indicating the success/failure of the command
output - The output of the command if it was successful, else empty string
"""
update_log.debug('[Exec] %s', ' '.join(command))
returncode = 0
output = ""
try:
output = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
update_log.warning("Command '%s' failed with return code: %s",
' '.join(command), e.returncode)
returncode = e.returncode
if exit_on_failure:
sys.exit(1)
return returncode, output
def rmtree_readonly(directory):
""" Deletes a readonly directory tree.
Args:
directory - tree to delete
"""
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
shutil.rmtree(directory, onerror=remove_readonly)
def find_all_examples(path):
""" Search the path for examples
Description:
Searches the path specified for sub-example folders, ie those containing an
mbed-os.lib file. If found adds the path to the sub-example to a list which is
then returned.
Args:
path - path to search.
examples - (returned) list of paths to example directories.
"""
examples = []
for root, dirs, files in os.walk(path):
if 'mbed-os.lib' in files:
examples += [root]
return examples
def upgrade_single_example(example, tag, directory, ref):
""" Update the mbed-os version for a single example
Description:
Updates the mbed-os.lib file in the example specified to correspond to the
version specified by the GitHub tag supplied. Also deals with
multiple sub-examples in the GitHub repo, updating them in the same way.
Args:
example - json example object containing the GitHub repo to update.
tag - GitHub tag corresponding to a version of mbed-os to upgrade to.
directory - directory path for the example.
ref - SHA corresponding to the supplied tag
returns - True if the upgrade was successful, False otherwise.
"""
cwd = os.getcwd()
os.chdir(directory)
return_code = False
if os.path.isfile("mbed-os.lib"):
# Rename command will fail on some OS's if the target file already exist,
# so ensure if it does, it is deleted first.
if os.path.isfile("mbed-os.lib_bak"):
os.remove("mbed-os.lib_bak")
os.rename("mbed-os.lib", "mbed-os.lib_bak")
else:
update_log.error("Failed to backup mbed-os.lib prior to updating.")
return False
# mbed-os.lib file contains one line with the following format
# e.g. https://github.com/ARMmbed/mbed-os/#0789928ee7f2db08a419fa4a032fffd9bd477aa7
lib_re = re.compile('https://github.com/ARMmbed/mbed-os/#[A-Za-z0-9]+')
updated = False
# Scan through mbed-os.lib line by line
with open('mbed-os.lib_bak', 'r') as ip, open('mbed-os.lib', 'w') as op:
for line in ip:
opline = line
regexp = lib_re.match(line)
if regexp:
opline = 'https://github.com/ARMmbed/mbed-os/#' + ref
updated = True
op.write(opline)
if updated:
# Setup and run the git add command
cmd = ['git', 'add', 'mbed-os.lib']
return_code = run_cmd(cmd)
os.chdir(cwd)
return not return_code
def prepare_fork(arm_example):
""" Synchronises a cloned fork to ensure it is up to date with the original.
Description:
This function sets a fork of an ARMmbed repo to be up to date with the
repo it was forked from. It does this by hard resetting to the ARMmbed
master branch.
Args:
arm_example - Full GitHub repo path for original example
"""
logstr = "In: " + os.getcwd()
update_log.debug(logstr)
for cmd in [['git', 'remote', 'add', 'armmbed', arm_example],
['git', 'fetch', 'armmbed'],
['git', 'reset', '--hard', 'armmbed/master'],
['git', 'push', '-f', 'origin']]:
run_cmd(cmd, exit_on_failure=True)
def prepare_branch(src, dst):
""" Set up at branch ready for use in updating examples
Description:
This function checks whether or not the supplied dst branch exists.
If it does not, the branch is created from the src and pushed to the origin.
The branch is then switched to.
Args:
src - branch to create the dst branch from
dst - branch to update
"""
update_log.debug("Preparing branch: %s", dst)
# Check if branch already exists or not.
cmd = ['git', 'branch']
_, output = run_cmd_with_output(cmd, exit_on_failure=True)
if not dst in output:
# OOB branch does not exist thus create it, first ensuring we are on
# the src branch and then check it out
for cmd in [['git', 'checkout', src],
['git', 'checkout', '-b', dst],
['git', 'push', '-u', 'origin', dst]]:
run_cmd(cmd, exit_on_failure=True)
else:
cmd = ['git', 'checkout', dst]
run_cmd(cmd, exit_on_failure=True)
def upgrade_example(github, example, tag, ref, user, src, dst, template):
""" Upgrade all versions of mbed-os.lib found in the specified example repo
Description:
Clone a version of the example specified and upgrade all versions of
mbed-os.lib found within its tree. The version cloned and how it
is upgraded depends on the user, src and dst settings.
1) user == None
The destination branch will be updated with the version of mbed-os
idenfied by the tag. If the destination branch does not exist then it
will be created from the source branch.
2) user != None
The master branch of a fork of the example will be updated with the
version of mbed-os identified by the tag.
Args:
github - GitHub instance to allow internal git commands to be run
example - json example object containing the GitHub repo to update.
tag - GitHub tag corresponding to a version of mbed-os to upgrade to.
ref - SHA corresponding to the tag
user - GitHub user name
src - branch to create the dst branch from
dst - branch to update
returns True if the upgrade was successful, False otherwise
"""
# If a user has not been specified then branch update will be used and thus
# the git user will be ARMmbed.
if not user:
user = 'ARMmbed'
ret = False
update_log.info("Updating example '%s'", example['name'])
update_log.debug("User: %s", user)
update_log.debug("Src branch: %s", (src or "None"))
update_log.debug("Dst branch: %s", (dst or "None"))
cwd = os.getcwd()
update_repo = "https://github.com/" + user + '/' + example['name']
update_log.debug("Update repository: %s", update_repo)
# Clone the example repo
clone_cmd = ['git', 'clone', update_repo]
return_code = run_cmd(clone_cmd)
if not return_code:
# Find all examples
example_directories = find_all_examples(example['name'])
os.chdir(example['name'])
# If the user is ARMmbed then a branch is used.
if user == 'ARMmbed':
prepare_branch(src, dst)
else:
prepare_fork(example['github'])
for example_directory in example_directories:
if not upgrade_single_example(example, tag, os.path.relpath(example_directory, example['name']), ref):
os.chdir(cwd)
return False
# Setup the default commit message
commit_message = 'Updating mbed-os to ' + tag
# Setup and run the commit command
commit_cmd = ['git', 'commit', '-m', commit_message]
return_code = run_cmd(commit_cmd)
if not return_code:
# Setup and run the push command
push_cmd = ['git', 'push', 'origin']
return_code = run_cmd(push_cmd)
if not return_code:
# If the user is not ARMmbed then a fork is being used
if user != 'ARMmbed':
upstream_repo = 'ARMmbed/'+ example['name']
update_log.debug("Upstream repository: %s", upstream_repo)
# Check access to mbed-os repo
try:
repo = github.get_repo(upstream_repo, False)
except:
update_log.error("Upstream repo: %s, does not exist - skipping", upstream_repo)
return False
jinja_loader = FileSystemLoader(template)
jinja_environment = Environment(loader=jinja_loader,
undefined=StrictUndefined)
pr_body = jinja_environment.get_template("pr.tmpl").render(tag=tag)
# Raise a PR from release-candidate to master
user_fork = user + ':master'
try:
pr = repo.create_pull(title='Updating mbed-os to ' + tag, head=user_fork, base='master', body=pr_body)
ret = True
except GithubException as e:
# Default to False
update_log.error("Pull request creation failed with error: %s", e)
else:
ret = True
else:
update_log.error("Git push command failed.")
else:
update_log.error("Git commit command failed.")
else:
update_log.error("Git clone %s failed", update_repo)
os.chdir(cwd)
return ret
def create_work_directory(path):
""" Create a new directory specified in 'path', overwrite if the directory already
exists.
Args:
path - directory path to be created.
"""
if os.path.exists(path):
update_log.info("'%s' directory already exists. Deleting...", path)
rmtree_readonly(path)
os.makedirs(path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--config_file', help="Path to the configuration file (default is 'examples.json')", default='examples.json')
parser.add_argument('-T', '--github_token', help="GitHub token for secure access")
parser.add_argument('-l', '--log-level',
help="Level for providing logging output",
default='INFO')
exclusive = parser.add_mutually_exclusive_group(required=True)
exclusive.add_argument('-f', '--fork', help="Update a fork", action='store_true')
exclusive.add_argument('-b', '--branch', help="Update a branch", action='store_true')
args = parser.parse_args()
default = getattr(logging, 'INFO')
level = getattr(logging, args.log_level.upper(), default)
# Set logging level
logging.basicConfig(level=level)
update_log = logging.getLogger("Update")
# Load the config file
with open(os.path.join(os.path.dirname(__file__), args.config_file)) as config:
if not config:
update_log.error("Failed to load config file '%s'", args.config_file)
sys.exit(1)
json_data = json.load(config)
# Create working directory
create_work_directory('examples')
github = Github(args.github_token)
config = json_data['update-config']
tag = config['tag']
user = None
src = "master"
dst = None
if args.fork:
user = config['via-fork']['github-user']
elif args.branch:
src = config['via-branch']['src-branch']
dst = config['via-branch']['dst-branch']
else:
userlog.error("Must specify either -f or -b command line option")
exit(1)
# Get the github sha corresponding to the specified mbed-os tag
cmd = ['git', 'rev-list', '-1', tag]
return_code, ref = run_cmd_with_output(cmd)
if return_code:
update_log.error("Could not obtain SHA for tag: %s", tag)
sys.exit(1)
# Loop through the examples
failures = []
successes = []
results = {}
template = dirname(abspath(__file__))
os.chdir('examples')
for example in json_data['examples']:
# Determine if this example should be updated and if so update any found
# mbed-os.lib files.
result = upgrade_example(github, example, tag, ref, user, src, dst, template)
if result:
successes += [example['name']]
else:
failures += [example['name']]
os.chdir('../')
# Finish the script and report the results
update_log.info("Finished updating examples")
if successes:
for success in successes:
update_log.info(" SUCCEEDED: %s", success)
if failures:
for fail in failures:
update_log.info(" FAILED: %s", fail)
|
|
from builtins import hex
from builtins import object
import json
import os
from random import SystemRandom
import shutil
from uuid import UUID
from devp2p.service import BaseService
from ethereum.tools import keys
from ethereum.slogging import get_logger
from ethereum.utils import privtopub # this is different than the one used in devp2p.crypto
from ethereum.utils import sha3, is_string, encode_hex, remove_0x_head, to_string
from rlp.utils import decode_hex
from pyethapp.utils import MinType
log = get_logger('accounts')
DEFAULT_COINBASE = decode_hex('de0b295669a9fd93d5f28d9ec85e40f4cb697bae')
random = SystemRandom()
def mk_privkey(seed):
return sha3(seed)
def mk_random_privkey():
k = hex(random.getrandbits(256))[2:-1].zfill(64)
assert len(k) == 64
return decode_hex(k)
class Account(object):
"""Represents an account.
:ivar keystore: the key store as a dictionary (as decoded from json)
:ivar locked: `True` if the account is locked and neither private nor public keys can be
accessed, otherwise `False`
:ivar path: absolute path to the associated keystore file (`None` for in-memory accounts)
"""
def __init__(self, keystore, password=None, path=None):
self.keystore = keystore
try:
self._address = decode_hex(self.keystore['address'])
except KeyError:
self._address = None
self.locked = True
if password is not None:
self.unlock(password)
if path is not None:
self.path = os.path.abspath(path)
else:
self.path = None
@classmethod
def new(cls, password, key=None, uuid=None, path=None):
"""Create a new account.
Note that this creates the account in memory and does not store it on disk.
:param password: the password used to encrypt the private key
:param key: the private key, or `None` to generate a random one
:param uuid: an optional id
"""
if key is None:
key = mk_random_privkey()
# [NOTE]: key and password should be bytes
if not is_string(key):
key = to_string(key)
if not is_string(password):
password = to_string(password)
keystore = keys.make_keystore_json(key, password)
keystore['id'] = uuid
return Account(keystore, password, path)
@classmethod
def load(cls, path, password=None):
"""Load an account from a keystore file.
:param path: full path to the keyfile
:param password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not keys.check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path)
def dump(self, include_address=True, include_id=True):
"""Dump the keystore for later disk storage.
The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and
adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and
`'include_id`'.
If address or id are not known, they are not added, even if requested.
:param include_address: flag denoting if the address should be included or not
:param include_id: flag denoting if the id should be included or not
"""
d = {}
d['crypto'] = self.keystore['crypto']
d['version'] = self.keystore['version']
if include_address and self.address is not None:
d['address'] = encode_hex(self.address)
if include_id and self.uuid is not None:
d['id'] = str(self.uuid)
return json.dumps(d)
def unlock(self, password):
"""Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
:raises: :exc:`ValueError` (originating in ethereum.keys) if the password is wrong (and the
account is locked)
"""
if self.locked:
self._privkey = keys.decode_keystore_json(self.keystore, password)
self.locked = False
self.address # get address such that it stays accessible after a subsequent lock
def lock(self):
"""Relock an unlocked account.
This method sets `account.privkey` to `None` (unlike `account.address` which is preserved).
After calling this method, both `account.privkey` and `account.pubkey` are `None.
`account.address` stays unchanged, even if it has been derived from the private key.
"""
self._privkey = None
self.locked = True
@property
def privkey(self):
"""The account's private key or `None` if the account is locked"""
if not self.locked:
return self._privkey
else:
return None
@property
def pubkey(self):
"""The account's public key or `None` if the account is locked"""
if not self.locked:
return privtopub(self.privkey)
else:
return None
@property
def address(self):
"""The account's address or `None` if the address is not stored in the key file and cannot
be reconstructed (because the account is locked)
"""
if self._address:
pass
elif 'address' in self.keystore:
self._address = decode_hex(self.keystore['address'])
elif not self.locked:
self._address = keys.privtoaddr(self.privkey)
else:
return None
return self._address
@property
def uuid(self):
"""An optional unique identifier, formatted according to UUID version 4, or `None` if the
account does not have an id
"""
try:
return self.keystore['id']
except KeyError:
return None
@uuid.setter
def uuid(self, value):
"""Set the UUID. Set it to `None` in order to remove it."""
if value is not None:
self.keystore['id'] = value
elif 'id' in self.keystore:
self.keystore.pop('id')
def sign_tx(self, tx):
"""Sign a Transaction with the private key of this account.
If the account is unlocked, this is equivalent to ``tx.sign(account.privkey)``.
:param tx: the :class:`ethereum.transactions.Transaction` to sign
:raises: :exc:`ValueError` if the account is locked
"""
if self.privkey:
log.info('signing tx', tx=tx, account=self)
tx.sign(self.privkey)
else:
raise ValueError('Locked account cannot sign tx')
def __repr__(self):
if self.address is not None:
address = encode_hex(self.address)
else:
address = '?'
return '<Account(address={address}, id={id})>'.format(address=address, id=self.uuid)
class AccountsService(BaseService):
"""Service that manages accounts.
At initialization, this service collects the accounts stored as key files in the keystore
directory (config option `accounts.keystore_dir`) and below.
To add more accounts, use :method:`add_account`.
:ivar accounts: the :class:`Account`s managed by this service, sorted by the paths to their
keystore files
:ivar keystore_dir: absolute path to the keystore directory
"""
name = 'accounts'
default_config = dict(accounts=dict(keystore_dir='keystore', must_include_coinbase=True))
def __init__(self, app):
super(AccountsService, self).__init__(app)
self.keystore_dir = app.config['accounts']['keystore_dir']
if not os.path.isabs(self.keystore_dir):
self.keystore_dir = os.path.abspath(os.path.join(app.config['data_dir'],
self.keystore_dir))
assert os.path.isabs(self.keystore_dir)
self.accounts = []
if not os.path.exists(self.keystore_dir):
log.warning('keystore directory does not exist', directory=self.keystore_dir)
elif not os.path.isdir(self.keystore_dir):
log.error('configured keystore directory is a file, not a directory',
directory=self.keystore_dir)
else:
# traverse file tree rooted at keystore_dir
log.info('searching for key files', directory=self.keystore_dir)
for dirpath, _, filenames in os.walk(self.keystore_dir):
for filename in [os.path.join(dirpath, filename) for filename in filenames]:
try:
self.accounts.append(Account.load(filename))
except ValueError:
log.warning('invalid file skipped in keystore directory',
path=filename)
self.accounts.sort(key=lambda account: account.path) # sort accounts by path
if not self.accounts:
log.warn('no accounts found')
else:
log.info('found account(s)', accounts=self.accounts)
@property
def coinbase(self):
"""Return the address that should be used as coinbase for new blocks.
The coinbase address is given by the config field pow.coinbase_hex. If this does not exist
or is `None`, the address of the first account is used instead. If there are no accounts,
the coinbase is `DEFAULT_COINBASE`.
:raises: :exc:`ValueError` if the coinbase is invalid (no string, wrong length) or there is
no account for it and the config flag `accounts.check_coinbase` is set (does not
apply to the default coinbase)
"""
cb_hex = self.app.config.get('pow', {}).get('coinbase_hex')
if cb_hex is None:
if not self.accounts_with_address:
return DEFAULT_COINBASE
cb = self.accounts_with_address[0].address
else:
# [NOTE]: check it!
# if not is_string(cb_hex):
if not isinstance(cb_hex, str):
raise ValueError('coinbase must be string')
try:
cb = decode_hex(remove_0x_head(cb_hex))
except (ValueError, TypeError):
raise ValueError('invalid coinbase')
if len(cb) != 20:
raise ValueError('wrong coinbase length')
if self.config['accounts']['must_include_coinbase']:
if cb not in (acct.address for acct in self.accounts):
raise ValueError('no account for coinbase')
return cb
def add_account(self, account, store=True, include_address=True, include_id=True):
"""Add an account.
If `store` is true the account will be stored as a key file at the location given by
`account.path`. If this is `None` a :exc:`ValueError` is raised. `include_address` and
`include_id` determine if address and id should be removed for storage or not.
This method will raise a :exc:`ValueError` if the new account has the same UUID as an
account already known to the service. Note that address collisions do not result in an
exception as those may slip through anyway for locked accounts with hidden addresses.
"""
log.info('adding account', account=account)
if account.uuid is not None:
if len([acct for acct in self.accounts if acct.uuid == account.uuid]) > 0:
log.error('could not add account (UUID collision)', uuid=account.uuid)
raise ValueError('Could not add account (UUID collision)')
if store:
if account.path is None:
raise ValueError('Cannot store account without path')
assert os.path.isabs(account.path), account.path
if os.path.exists(account.path):
log.error('File does already exist', path=account.path)
raise IOError('File does already exist')
assert account.path not in [acct.path for acct in self.accounts]
try:
directory = os.path.dirname(account.path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(account.path, 'w') as f:
f.write(account.dump(include_address, include_id))
except IOError as e:
log.error('Could not write to file', path=account.path, message=e.strerror,
errno=e.errno)
raise
self.accounts.append(account)
min_value = MinType()
self.accounts.sort(key=lambda account: min_value if account.path is None else account.path)
def update_account(self, account, new_password, include_address=True, include_id=True):
"""Replace the password of an account.
The update is carried out in three steps:
1) the old keystore file is renamed
2) the new keystore file is created at the previous location of the old keystore file
3) the old keystore file is removed
In this way, at least one of the keystore files exists on disk at any time and can be
recovered if the process is interrupted.
:param account: the :class:`Account` which must be unlocked, stored on disk and included in
:attr:`AccountsService.accounts`.
:param include_address: forwarded to :meth:`add_account` during step 2
:param include_id: forwarded to :meth:`add_account` during step 2
:raises: :exc:`ValueError` if the account is locked, if it is not added to the account
manager, or if it is not stored
"""
if account not in self.accounts:
raise ValueError('Account not managed by account service')
if account.locked:
raise ValueError('Cannot update locked account')
if account.path is None:
raise ValueError('Account not stored on disk')
assert os.path.isabs(account.path)
# create new account
log.debug('creating new account')
new_account = Account.new(new_password, key=account.privkey, uuid=account.uuid)
new_account.path = account.path
# generate unique path and move old keystore file there
backup_path = account.path + '~'
i = 1
while os.path.exists(backup_path):
backup_path = backup_path[:backup_path.rfind('~') + 1] + str(i)
i += 1
assert not os.path.exists(backup_path)
log.info('moving old keystore file to backup location', **{'from': account.path,
'to': backup_path})
try:
shutil.move(account.path, backup_path)
except:
log.error('could not backup keystore, stopping account update',
**{'from': account.path, 'to': backup_path})
raise
assert os.path.exists(backup_path)
assert not os.path.exists(new_account.path)
account.path = backup_path
# remove old account from manager (not from disk yet) and add new account
self.accounts.remove(account)
assert account not in self.accounts
try:
self.add_account(new_account, include_address, include_id)
except:
log.error('adding new account failed, recovering from backup')
shutil.move(backup_path, new_account.path)
self.accounts.append(account)
self.accounts.sort(key=lambda account: account.path)
raise
assert os.path.exists(new_account.path)
assert new_account in self.accounts
# everything was successful (we are still here), so delete old keystore file
log.info('deleting backup of old keystore', path=backup_path)
try:
os.remove(backup_path)
except:
log.error('failed to delete no longer needed backup of old keystore',
path=account.path)
raise
# set members of account to values of new_account
account.keystore = new_account.keystore
account.path = new_account.path
assert account.__dict__ == new_account.__dict__
# replace new_account by old account in account list
self.accounts.append(account)
self.accounts.remove(new_account)
self.accounts.sort(key=lambda account: account.path)
log.debug('account update successful')
@property
def accounts_with_address(self):
"""Return a list of accounts whose address is known."""
return [account for account in self if account.address]
@property
def unlocked_accounts(self):
"""Return a list of all unlocked accounts."""
return [account for account in self if not account.locked]
def find(self, identifier):
"""Find an account by either its address, its id or its index as string.
Example identifiers:
- '9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address)
- '0x9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address with 0x prefix)
- '01dd527b-f4a5-4b3c-9abb-6a8e7cd6722f' (UUID)
- '3' (index)
:param identifier: the accounts hex encoded, case insensitive address (with optional 0x
prefix), its UUID or its index (as string, >= 1) in
`account_service.accounts`
:raises: :exc:`ValueError` if the identifier could not be interpreted
:raises: :exc:`KeyError` if the identified account is not known to the account_service
"""
try:
uuid = UUID(identifier)
except ValueError:
pass
else:
return self.get_by_id(uuid.hex)
try:
index = int(identifier, 10)
except ValueError:
pass
else:
if index <= 0:
raise ValueError('Index must be 1 or greater')
try:
return self.accounts[index - 1]
except IndexError as e:
raise KeyError(e.message)
if identifier[:2] == '0x':
identifier = identifier[2:]
try:
address = decode_hex(identifier)
except TypeError:
success = False
else:
if len(address) != 20:
success = False
else:
return self[address]
assert not success
raise ValueError('Could not interpret account identifier')
def get_by_id(self, id):
"""Return the account with a given id.
Note that accounts are not required to have an id.
:raises: `KeyError` if no matching account can be found
"""
accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]
assert len(accts) <= 1
if len(accts) == 0:
raise KeyError('account with id {} unknown'.format(id))
elif len(accts) > 1:
log.warning('multiple accounts with same UUID found', uuid=id)
return accts[0]
def get_by_address(self, address):
"""Get an account by its address.
Note that even if an account with the given address exists, it might not be found if it is
locked. Also, multiple accounts with the same address may exist, in which case the first
one is returned (and a warning is logged).
:raises: `KeyError` if no matching account can be found
"""
assert len(address) == 20
accounts = [account for account in self.accounts if account.address == address]
if len(accounts) == 0:
raise KeyError('account with address {} not found'.format(encode_hex(address)))
elif len(accounts) > 1:
log.warning('multiple accounts with same address found', address=encode_hex(address))
return accounts[0]
def sign_tx(self, address, tx):
self.get_by_address(address).sign_tx(tx)
def propose_path(self, address):
return os.path.join(self.keystore_dir, encode_hex(address))
def __contains__(self, address):
assert len(address) == 20
return address in [a.address for a in self.accounts]
def __getitem__(self, address_or_idx):
if isinstance(address_or_idx, bytes):
address = address_or_idx
assert len(address) == 20
for a in self.accounts:
if a.address == address:
return a
raise KeyError
else:
assert isinstance(address_or_idx, int)
return self.accounts[address_or_idx]
def __iter__(self):
return iter(self.accounts)
def __len__(self):
return len(self.accounts)
"""
--import-key = key.json
--unlock <password dialog>
--password passwordfile
--newkey <password dialog>
"""
|
|
"""
Tests for `kolibri` module.
"""
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import kolibri
import mock
from kolibri.utils import version
#: Because we don't want to call the original (decorated function), it uses
#: caching and will return the result of the first call always. We call
#: the wrapped function `__wrapped__` directly.
get_version = version.get_version.__wrapped__ # @UndefinedVariable
def dont_call_me_maybe(msg):
raise AssertionError(msg)
class TestKolibriVersion(unittest.TestCase):
def test_version(self):
"""
Test that the major version is set as expected
"""
major_version_tuple = "{}.{}".format(*kolibri.VERSION[0:2])
self.assertIn(major_version_tuple, kolibri.__version__)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_alpha_0_version(self, file_mock, describe_mock):
"""
Test that when doing something with a 0th alpha doesn't provoke any
hickups with ``git describe --tag``.
"""
v = get_version((0, 1, 0, "alpha", 0))
self.assertIn("0.1.0.dev", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_alpha_1_version(self, file_mock, describe_mock):
"""
Test some normal alpha version, but don't assert that the
``git describe --tag`` is consistent (it will change in future test
runs)
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_alpha_1_version_no_git(self, describe_mock):
"""
Not running from git and no VERSION file.
"""
# Simple mocking
get_version_file = version.get_version_file
version.get_version_file = lambda: None
try:
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_alpha_1_version_file(self, describe_mock, file_mock):
"""
Test that a simple 0.1a1 works when loaded from a VERSION file
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a1\n")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_version_file_linebreaks(self, describe_mock, file_mock):
"""
Test that line breaks don't get included in the final version
See: https://github.com/learningequality/kolibri/issues/2464
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.7.1b1.dev+git-2-gfd48a7a")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_version_file_local_git_version(self, describe_mock, file_mock):
"""
Test that a version file with git describe output is correctly parsed
"""
v = get_version((0, 7, 1, "beta", 1))
self.assertIn("0.7.1b1.dev+git-2-gfd48a7a", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_0_inconsistent_version_file(self, get_git_changeset_mock, describe_mock):
"""
Test that inconsistent file data also just fails
"""
# Simple mocking
get_version_file = version.get_version_file
inconsistent_versions = ("0.2.0a1", "0.1.1a1", "0.1.0")
for v in inconsistent_versions:
version.get_version_file = lambda: v
try:
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 0)
)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_1_inconsistent_version_file(self, get_git_changeset_mock, describe_mock):
"""
Test that inconsistent file data also just fails
"""
# Simple mocking
get_version_file = version.get_version_file
inconsistent_versions = ("0.2.0a1", "0.1.1a1", "0.1.0")
for v in inconsistent_versions:
version.get_version_file = lambda: v
try:
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 1)
)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_0_consistent_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an alpha-0 (dev) state.
Because a prerelease can be made with a version file.
"""
assert get_version((0, 1, 0, "alpha", 0)) == "0.1.0b1"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b2")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_beta_1_consistent_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an beta-1 state in case the
version was bumped in ``kolibri.VERSION``.
"""
assert get_version((0, 1, 0, "beta", 1)) == "0.1.0b2"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.7.1b1.dev+git-12-g2a8fe31")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_beta_1_consistent_dev_release_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an beta-1 state in case the
version was bumped in ``kolibri.VERSION``.
"""
assert get_version((0, 7, 1, "alpha", 0)) == "0.7.1b1.dev+git-12-g2a8fe31"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.0.1")
@mock.patch('kolibri.utils.version.get_git_changeset', return_value="+git123")
def test_version_file_ignored(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that the VERSION file is NOT used where git data is available
"""
assert get_version((0, 1, 0, "alpha", 0)) == "0.1.0.dev+git123"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_version_file_final(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION specifying a final version will work when the
kolibri.VERSION tuple is consistent.
"""
assert get_version((0, 1, 0, "final", 0)) == "0.1.0"
def test_alpha_1_inconsistent_git(self):
"""
Test that we fail when git returns inconsistent data
"""
# Simple mocking
git_describe = version.get_git_describe
try:
version.get_git_describe = lambda *x: 'v0.2.0-beta1'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 1)
)
version.get_git_describe = lambda *x: 'v0.2.0-beta2'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "beta", 0)
)
version.get_git_describe = lambda *x: 'v0.1.0'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 0)
)
finally:
version.get_git_describe = git_describe
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.1.0-beta1-123-abcdfe12")
def test_alpha_1_consistent_git(self, describe_mock):
"""
Test that we get the git describe data when it's there
"""
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0b1.dev+git-123-abcdfe12"
@mock.patch('subprocess.Popen')
def test_git_describe_parser(self, popen_mock):
"""
Test that we get the git describe data when it's there
"""
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('v0.1.0-beta1-123-abcdfe12', '')}
process_mock.configure_mock(**attrs)
popen_mock.return_value = process_mock
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0b1.dev+git-123-abcdfe12"
@mock.patch('subprocess.Popen')
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_git_random_tag(self, file_mock, popen_mock):
"""
Test that we don't fail if some random tag appears
"""
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('foobar', '')}
process_mock.configure_mock(**attrs)
popen_mock.return_value = process_mock
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0a1"
@mock.patch('subprocess.Popen', side_effect=EnvironmentError())
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a2")
def test_prerelease_no_git(self, file_mock, popen_mock):
"""
Test that we don't fail and that the version file is used
"""
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0a2"
@mock.patch('kolibri.utils.version.get_complete_version', side_effect=lambda x: x if x else (0, 2, 0, 'alpha', 2))
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.2.0-beta1")
def test_beta_1_git(self, describe_mock, complete_mock):
"""
Test that we use git tag data when our version is alpha
"""
self.assertEqual(
get_version(),
'0.2.0b1'
)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_final(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 0, "final", 0))
self.assertEqual(v, "0.1.0")
assert describe_mock.call_count == 0
@mock.patch('kolibri.utils.version.get_git_describe')
def test_final_patch(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 1, "final", 0))
self.assertEqual(v, "0.1.1")
assert describe_mock.call_count == 0
@mock.patch('kolibri.utils.version.get_git_describe')
def test_final_post(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 1, "final", 1))
self.assertEqual(v, "0.1.1.post1")
assert describe_mock.call_count == 0
def test_version_compat(self):
"""
Test that our version glue works for some really old releases of
setuptools, like the one in Ubuntu 14.04.
We don't have a reference implementation, but parse_version will return
a tuple, and this is from a live system::
test@test-VirtualBox:~$ python
Python 2.7.6 (default, Jun 22 2015, 17:58:13)
[GCC 4.8.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> from pkg_resources import parse_version
>>> parse_version("1.2.3")
('00000001', '00000002', '00000003', '*final')
>>> parse_version("1.2.3.dev0")
('00000001', '00000002', '00000003', '*@', '*final')
>>> parse_version("1.2.3a1")
('00000001', '00000002', '00000003', '*a', '00000001', '*final')
>>> parse_version("1.2.3a0")
('00000001', '00000002', '00000003', '*a', '*final')
>>> parse_version("1.2.3b1")
('00000001', '00000002', '00000003', '*b', '00000001', '*final')
>>> parse_version("1.2.3b1+git-123")
('00000001', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
"""
from kolibri.utils.compat import VersionCompat
assert VersionCompat(
('00000001', '00000002', '00000003', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*@', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*a', '00000001', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*b', '00000001', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000000', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
).base_version == "0.2.3"
|
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Side Effects Penalties.
Abstract class for implementing a side effects (impact measure) penalty,
and various concrete penalties deriving from it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import enum
import random
import numpy as np
import six
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow.compat.v1 as tf
class Actions(enum.IntEnum):
"""Enum for actions the agent can take."""
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
NOOP = 4
@six.add_metaclass(abc.ABCMeta)
class Baseline(object):
"""Base class for baseline states."""
def __init__(self, start_timestep, exact=False, env=None,
timestep_to_state=None):
"""Create a baseline.
Args:
start_timestep: starting state timestep
exact: whether to use an exact or approximate baseline
env: a copy of the environment (used to simulate exact baselines)
timestep_to_state: a function that turns timesteps into states
"""
self._exact = exact
self._env = env
self._timestep_to_state = timestep_to_state
self._start_timestep = start_timestep
self._baseline_state = self._timestep_to_state(self._start_timestep)
self._inaction_next = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
@abc.abstractmethod
def calculate(self):
"""Update and return the baseline state."""
def sample(self, state):
"""Sample the outcome of a noop in `state`."""
d = self._inaction_next[state]
counts = np.array(list(d.values()))
index = np.random.choice(a=len(counts), p=counts/sum(counts))
return list(d.keys())[index]
def reset(self):
"""Signal start of new episode."""
self._baseline_state = self._timestep_to_state(self._start_timestep)
if self._exact:
self._env.reset()
@abc.abstractproperty
def rollout_func(self):
"""Function to compute a rollout chain, or None if n/a."""
@property
def baseline_state(self):
return self._baseline_state
class StartBaseline(Baseline):
"""Starting state baseline."""
def calculate(self, *unused_args):
return self._baseline_state
@property
def rollout_func(self):
return None
class InactionBaseline(Baseline):
"""Inaction baseline: the state resulting from taking no-ops from start."""
def calculate(self, prev_state, action, current_state):
if self._exact:
self._baseline_state = self._timestep_to_state(
self._env.step(Actions.NOOP))
else:
if action == Actions.NOOP:
self._inaction_next[prev_state][current_state] += 1
if self._baseline_state in self._inaction_next:
self._baseline_state = self.sample(self._baseline_state)
return self._baseline_state
@property
def rollout_func(self):
return None
class StepwiseBaseline(Baseline):
"""Stepwise baseline: the state one no-op after the previous state."""
def __init__(self, start_timestep, exact=False, env=None,
timestep_to_state=None, use_rollouts=True):
"""Create a stepwise baseline.
Args:
start_timestep: starting state timestep
exact: whether to use an exact or approximate baseline
env: a copy of the environment (used to simulate exact baselines)
timestep_to_state: a function that turns timesteps into states
use_rollouts: whether to use inaction rollouts
"""
super(StepwiseBaseline, self).__init__(
start_timestep, exact, env, timestep_to_state)
self._rollouts = use_rollouts
def calculate(self, prev_state, action, current_state):
"""Update and return the baseline state.
Args:
prev_state: the state in which `action` was taken
action: the action just taken
current_state: the state resulting from taking `action`
Returns:
the baseline state, for computing the penalty for this transition
"""
if self._exact:
if prev_state in self._inaction_next:
self._baseline_state = self.sample(prev_state)
else:
inaction_env = copy.deepcopy(self._env)
timestep_inaction = inaction_env.step(Actions.NOOP)
self._baseline_state = self._timestep_to_state(timestep_inaction)
self._inaction_next[prev_state][self._baseline_state] += 1
timestep_action = self._env.step(action)
assert current_state == self._timestep_to_state(timestep_action)
else:
if action == Actions.NOOP:
self._inaction_next[prev_state][current_state] += 1
if prev_state in self._inaction_next:
self._baseline_state = self.sample(prev_state)
else:
self._baseline_state = prev_state
return self._baseline_state
def _inaction_rollout(self, state):
"""Compute an (approximate) inaction rollout from a state."""
chain = []
st = state
while st not in chain:
chain.append(st)
if st in self._inaction_next:
st = self.sample(st)
return chain
def parallel_inaction_rollouts(self, s1, s2):
"""Compute (approximate) parallel inaction rollouts from two states."""
chain = []
states = (s1, s2)
while states not in chain:
chain.append(states)
s1, s2 = states
states = (self.sample(s1) if s1 in self._inaction_next else s1,
self.sample(s2) if s2 in self._inaction_next else s2)
return chain
@property
def rollout_func(self):
return self._inaction_rollout if self._rollouts else None
@six.add_metaclass(abc.ABCMeta)
class DeviationMeasure(object):
"""Base class for deviation measures."""
@abc.abstractmethod
def calculate(self):
"""Calculate the deviation between two states."""
@abc.abstractmethod
def update(self):
"""Update any models after seeing a state transition."""
class ReachabilityMixin(object):
"""Class for computing reachability deviation measure.
Computes the relative/un- reachability given a dictionary of
reachability scores for pairs of states.
Expects _reachability, _discount, and _dev_fun attributes to exist in the
inheriting class.
"""
def calculate(self, current_state, baseline_state, rollout_func=None):
"""Calculate relative/un- reachability between particular states."""
# relative reachability case
if self._dev_fun:
if rollout_func:
curr_values = self._rollout_values(rollout_func(current_state))
base_values = self._rollout_values(rollout_func(baseline_state))
else:
curr_values = self._reachability[current_state]
base_values = self._reachability[baseline_state]
all_s = set(list(curr_values.keys()) + list(base_values.keys()))
total = 0
for s in all_s:
diff = base_values[s] - curr_values[s]
total += self._dev_fun(diff)
d = total / len(all_s)
# unreachability case
else:
assert rollout_func is None
d = 1 - self._reachability[current_state][baseline_state]
return d
def _rollout_values(self, chain):
"""Compute stepwise rollout values for the relative reachability penalty.
Args:
chain: chain of states in an inaction rollout starting with the state for
which to compute the rollout values
Returns:
a dictionary of the form:
{ s : (1-discount) sum_{k=0}^inf discount^k R_s(S_k) }
where S_k is the k-th state in the inaction rollout from 'state',
s is a state, and
R_s(S_k) is the reachability of s from S_k.
"""
rollout_values = collections.defaultdict(lambda: 0)
coeff = 1
for st in chain:
for s, rch in six.iteritems(self._reachability[st]):
rollout_values[s] += coeff * rch * (1.0 - self._discount)
coeff *= self._discount
last_state = chain[-1]
for s, rch in six.iteritems(self._reachability[last_state]):
rollout_values[s] += coeff * rch
return rollout_values
class Reachability(ReachabilityMixin, DeviationMeasure):
"""Approximate (relative) (un)reachability deviation measure.
Unreachability (the default, when `dev_fun=None`) uses the length (say, n)
of the shortest path (sequence of actions) from the current state to the
baseline state. The reachability score is value_discount ** n.
Unreachability is then 1.0 - the reachability score.
Relative reachability (when `dev_fun` is not `None`) considers instead the
difference in reachability of all other states from the current state
versus from the baseline state.
We approximate reachability by only considering state transitions
that have been observed. Add transitions using the `update` function.
"""
def __init__(self, value_discount=1.0, dev_fun=None, discount=None):
self._value_discount = value_discount
self._dev_fun = dev_fun
self._discount = discount
self._reachability = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
def update(self, prev_state, current_state, action=None):
del action # Unused.
self._reachability[prev_state][prev_state] = 1
self._reachability[current_state][current_state] = 1
if self._reachability[prev_state][current_state] < self._value_discount:
for s1 in self._reachability.keys():
if self._reachability[s1][prev_state] > 0:
for s2 in self._reachability[current_state].keys():
if self._reachability[current_state][s2] > 0:
self._reachability[s1][s2] = max(
self._reachability[s1][s2],
self._reachability[s1][prev_state] * self._value_discount *
self._reachability[current_state][s2])
@property
def discount(self):
return self._discount
class UVFAReachability(ReachabilityMixin, DeviationMeasure):
"""Approximate relative reachability deviation measure using UVFA.
We approximate reachability using a neural network only trained on state
transitions that have been observed. For each (s0, action, s1) transition,
we update the reachability estimate for (s0, action, s) towards the
reachability estimate between s1 and s, for each s in a random sample of
size update_sample_size. In particular, the loss for the neural network
reachability estimate (NN) is
sum_s(max_a(NN(s1, a, s)) * value_discount - NN(s0, action, s)),
where the sum is over all sampled s, the max is taken over all actions a.
At evaluation time, the reachability difference is calculated with respect
to a randomly sampled set of states of size calc_sample_size.
"""
def __init__(
self,
value_discount=0.95,
dev_fun=None,
discount=0.95,
state_size=36, # Sokoban default
num_actions=5,
update_sample_size=10,
calc_sample_size=10,
hidden_size=50,
representation_size=5,
num_layers=1,
base_loss_coeff=0.1,
num_stored=100):
# Create networks to generate state representations. To get a reachability
# estimate, take the dot product of the origin network output and the goal
# network output, then pass it through a sigmoid function to constrain it to
# between 0 and 1.
output_sizes = [hidden_size] * num_layers + [representation_size]
self._origin_network = snt.nets.MLP(
output_sizes=output_sizes,
activation=tf.nn.relu,
activate_final=False,
name='origin_network')
self._goal_network = snt.nets.MLP(
output_sizes=output_sizes,
activation=tf.nn.relu,
activate_final=False,
name='goal_network')
self._value_discount = value_discount
self._dev_fun = dev_fun
self._discount = discount
self._state_size = state_size
self._num_actions = num_actions
self._update_sample_size = update_sample_size
self._calc_sample_size = calc_sample_size
self._num_stored = num_stored
self._stored_states = set()
self._state_0_placeholder = tf.placeholder(tf.float32, shape=(state_size))
self._state_1_placeholder = tf.placeholder(tf.float32, shape=(state_size))
self._action_placeholder = tf.placeholder(tf.float32, shape=(num_actions))
self._update_sample_placeholder = tf.placeholder(
tf.float32, shape=(update_sample_size, state_size))
self._calc_sample_placeholder = tf.placeholder(
tf.float32, shape=(calc_sample_size, state_size))
# Trained to estimate reachability = value_discount ^ distance.
self._sample_loss = self._get_state_action_loss(
self._state_0_placeholder,
self._state_1_placeholder,
self._action_placeholder,
self._update_sample_placeholder)
# Add additional loss to force observed transitions towards value_discount.
self._base_reachability = self._get_state_sample_reachability(
self._state_0_placeholder,
tf.expand_dims(self._state_1_placeholder, axis=0),
action=self._action_placeholder)
self._base_case_loss = tf.keras.losses.MSE(self._value_discount,
self._base_reachability)
self._opt = tf.train.AdamOptimizer().minimize(self._sample_loss +
base_loss_coeff *
self._base_case_loss)
current_state_reachability = self._get_state_sample_reachability(
self._state_0_placeholder, self._calc_sample_placeholder)
baseline_state_reachability = self._get_state_sample_reachability(
self._state_1_placeholder, self._calc_sample_placeholder)
self._reachability_calculation = [
tf.reshape(baseline_state_reachability, [-1]),
tf.reshape(current_state_reachability, [-1])
]
init = tf.global_variables_initializer()
self._sess = tf.Session()
self._sess.run(init)
def calculate(self, current_state, baseline_state, rollout_func=None):
"""Compute the reachability penalty between two states."""
current_state = np.array(current_state).flatten()
baseline_state = np.array(baseline_state).flatten()
sample = self._sample_n_states(self._calc_sample_size)
# Run if there are enough states to draw a correctly-sized sample from.
if sample:
base, curr = self._sess.run(
self._reachability_calculation,
feed_dict={
self._state_0_placeholder: current_state,
self._state_1_placeholder: baseline_state,
self._calc_sample_placeholder: sample
})
return sum(map(self._dev_fun, base - curr)) / self._calc_sample_size
else:
return 0
def _sample_n_states(self, n):
try:
return random.sample(self._stored_states, n)
except ValueError:
return None
def update(self, prev_state, current_state, action):
prev_state = np.array(prev_state).flatten()
current_state = np.array(current_state).flatten()
one_hot_action = np.zeros(self._num_actions)
one_hot_action[action] = 1
sample = self._sample_n_states(self._update_sample_size)
if self._num_stored is None or len(self._stored_states) < self._num_stored:
self._stored_states.add(tuple(prev_state))
self._stored_states.add(tuple(current_state))
elif (np.random.random() < 0.01 and
tuple(current_state) not in self._stored_states):
self._stored_states.pop()
self._stored_states.add(tuple(current_state))
# If there aren't enough states to get a full sample, do nothing.
if sample:
self._sess.run([self._opt], feed_dict={
self._state_0_placeholder: prev_state,
self._state_1_placeholder: current_state,
self._action_placeholder: one_hot_action,
self._update_sample_placeholder: sample
})
def _get_state_action_loss(self, prev_state, current_state, action, sample):
"""Get the loss from differences in state reachability estimates."""
# Calculate NN(s0, action, s) for all s in sample.
prev_state_reachability = self._get_state_sample_reachability(
prev_state, sample, action=action)
# Calculate max_a(NN(s1, a, s)) for all s in sample and all actions a.
current_state_reachability = tf.stop_gradient(
self._get_state_sample_reachability(current_state, sample))
# Combine to return loss.
return tf.keras.losses.MSE(
current_state_reachability * self._value_discount,
prev_state_reachability)
def _get_state_sample_reachability(self, state, sample, action=None):
"""Calculate reachability from a state to each item in a sample."""
if action is None:
state_options = self._tile_with_all_actions(state)
else:
state_options = tf.expand_dims(tf.concat([state, action], axis=0), axis=0)
goal_representations = self._goal_network(sample)
# Reachability of sampled states by taking actions
reach_result = tf.sigmoid(
tf.reduce_max(
tf.matmul(
goal_representations,
self._origin_network(state_options),
transpose_b=True),
axis=1))
if action is None:
# Return 1 if sampled state is already reached (equal to state)
reach_no_action = tf.cast(tf.reduce_all(tf.equal(sample, state), axis=1),
dtype=tf.float32)
reach_result = tf.maximum(reach_result, reach_no_action)
return reach_result
def _tile_with_all_actions(self, state):
"""Returns tensor with all state/action combinations."""
state_tiled = tf.tile(tf.expand_dims(state, axis=0), [self._num_actions, 1])
all_actions_tiled = tf.one_hot(
tf.range(self._num_actions), depth=self._num_actions)
return tf.concat([state_tiled, all_actions_tiled], axis=1)
class AttainableUtilityMixin(object):
"""Class for computing attainable utility measure.
Computes attainable utility (averaged over a set of utility functions)
given value functions for each utility function.
Expects _u_values, _discount, _value_discount, and _dev_fun attributes to
exist in the inheriting class.
"""
def calculate(self, current_state, baseline_state, rollout_func=None):
if rollout_func:
current_values = self._rollout_values(rollout_func(current_state))
baseline_values = self._rollout_values(rollout_func(baseline_state))
else:
current_values = [u_val[current_state] for u_val in self._u_values]
baseline_values = [u_val[baseline_state] for u_val in self._u_values]
penalties = [self._dev_fun(base_val - cur_val) * (1. - self._value_discount)
for base_val, cur_val in zip(baseline_values, current_values)]
return sum(penalties) / len(penalties)
def _rollout_values(self, chain):
"""Compute stepwise rollout values for the attainable utility penalty.
Args:
chain: chain of states in an inaction rollout starting with the state
for which to compute the rollout values
Returns:
a list containing
(1-discount) sum_{k=0}^inf discount^k V_u(S_k)
for each utility function u,
where S_k is the k-th state in the inaction rollout from 'state'.
"""
rollout_values = [0 for _ in self._u_values]
coeff = 1
for st in chain:
rollout_values = [rv + coeff * u_val[st] * (1.0 - self._discount)
for rv, u_val in zip(rollout_values, self._u_values)]
coeff *= self._discount
last_state = chain[-1]
rollout_values = [rv + coeff * u_val[last_state]
for rv, u_val in zip(rollout_values, self._u_values)]
return rollout_values
def _set_util_funs(self, util_funs):
"""Set up this instance's utility functions.
Args:
util_funs: either a number of functions to generate or a list of
pre-defined utility functions, represented as dictionaries
over states: util_funs[i][s] = u_i(s), the utility of s
according to u_i.
"""
if isinstance(util_funs, int):
self._util_funs = [
collections.defaultdict(float) for _ in range(util_funs)
]
else:
self._util_funs = util_funs
def _utility(self, u, state):
"""Apply a random utility function, generating its value if necessary."""
if state not in u:
u[state] = np.random.random()
return u[state]
class AttainableUtility(AttainableUtilityMixin, DeviationMeasure):
"""Approximate attainable utility deviation measure."""
def __init__(self, value_discount=0.99, dev_fun=np.abs, util_funs=10,
discount=None):
assert value_discount < 1.0 # AU does not converge otherwise
self._value_discount = value_discount
self._dev_fun = dev_fun
self._discount = discount
self._set_util_funs(util_funs)
# u_values[i][s] = V_{u_i}(s), the (approximate) value of s according to u_i
self._u_values = [
collections.defaultdict(float) for _ in range(len(self._util_funs))
]
# predecessors[s] = set of states known to lead, by some action, to s
self._predecessors = collections.defaultdict(set)
def update(self, prev_state, current_state, action=None):
"""Update predecessors and attainable utility estimates."""
del action # Unused.
self._predecessors[current_state].add(prev_state)
seen = set()
queue = [current_state]
while queue:
s_to = queue.pop(0)
seen.add(s_to)
for u, u_val in zip(self._util_funs, self._u_values):
for s_from in self._predecessors[s_to]:
v = self._utility(u, s_from) + self._value_discount * u_val[s_to]
if u_val[s_from] < v:
u_val[s_from] = v
if s_from not in seen:
queue.append(s_from)
class NoDeviation(DeviationMeasure):
"""Dummy deviation measure corresponding to no impact penalty."""
def calculate(self, *unused_args):
return 0
def update(self, *unused_args):
pass
class SideEffectPenalty(object):
"""Impact penalty."""
def __init__(
self, baseline, dev_measure, beta=1.0, nonterminal_weight=0.01,
use_inseparable_rollout=False):
"""Make an object to calculate the impact penalty.
Args:
baseline: object for calculating the baseline state
dev_measure: object for calculating the deviation between states
beta: weight (scaling factor) for the impact penalty
nonterminal_weight: penalty weight on nonterminal states.
use_inseparable_rollout:
whether to compute the penalty as the average of deviations over
parallel inaction rollouts from the current and baseline states (True)
otherwise just between the current state and baseline state (or by
whatever rollout value is provided in the baseline) (False)
"""
self._baseline = baseline
self._dev_measure = dev_measure
self._beta = beta
self._nonterminal_weight = nonterminal_weight
self._use_inseparable_rollout = use_inseparable_rollout
def calculate(self, prev_state, action, current_state):
"""Calculate the penalty associated with a transition, and update models."""
def compute_penalty(current_state, baseline_state):
"""Compute penalty."""
if self._use_inseparable_rollout:
penalty = self._rollout_value(current_state, baseline_state,
self._dev_measure.discount,
self._dev_measure.calculate)
else:
penalty = self._dev_measure.calculate(current_state, baseline_state,
self._baseline.rollout_func)
return self._beta * penalty
if current_state: # not a terminal state
self._dev_measure.update(prev_state, current_state, action)
baseline_state =\
self._baseline.calculate(prev_state, action, current_state)
penalty = compute_penalty(current_state, baseline_state)
return self._nonterminal_weight * penalty
else: # terminal state
penalty = compute_penalty(prev_state, self._baseline.baseline_state)
return penalty
def reset(self):
"""Signal start of new episode."""
self._baseline.reset()
def _rollout_value(self, cur_state, base_state, discount, func):
"""Compute stepwise rollout value for unreachability."""
# Returns (1-discount) sum_{k=0}^inf discount^k R(S_{t,t+k}, S'_{t,t+k}),
# where S_{t,t+k} is k-th state in the inaction rollout from current state,
# S'_{t,t+k} is k-th state in the inaction rollout from baseline state,
# and R is the reachability function.
chain = self._baseline.parallel_inaction_rollouts(cur_state, base_state)
coeff = 1
rollout_value = 0
for states in chain:
rollout_value += (coeff * func(states[0], states[1]) * (1.0 - discount))
coeff *= discount
last_states = chain[-1]
rollout_value += coeff * func(last_states[0], last_states[1])
return rollout_value
@property
def beta(self):
return self._beta
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.banded_triangular_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class BandedTriangularSolveOpTest(test.TestCase):
def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None):
for lower in (False,):
for adjoint in (False, True):
for use_placeholder in True, False:
self._verifySolve(
x,
y,
lower=lower,
adjoint=adjoint,
batch_dims=batch_dims,
use_placeholder=use_placeholder,
dtypes=dtypes)
def _verifySolveAllWaysReal(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.float32, np.float64), batch_dims)
def _verifySolveAllWaysComplex(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.complex64, np.complex128), batch_dims)
def _verifySolve(self,
x,
y,
lower=True,
adjoint=False,
batch_dims=None,
use_placeholder=False,
dtypes=(np.float32, np.float64)):
for np_type in dtypes:
a = x.astype(np_type)
b = y.astype(np_type)
# Now we need to convert a to a dense triangular matrix.
def make_diags(diags, lower=True):
n = len(diags[0])
a = np.zeros(n * n, dtype=diags.dtype)
if lower:
for i, diag in enumerate(diags):
a[n * i:n * n:n + 1] = diag[i:]
else:
diags_flip = np.flip(diags, 0)
for i, diag in enumerate(diags_flip):
a[i:(n - i) * n:n + 1] = diag[:(n - i)]
return a.reshape(n, n)
# For numpy.solve we have to explicitly zero out the strictly
# upper or lower triangle.
if a.size > 0:
a_np = make_diags(a, lower=lower)
else:
a_np = a
if adjoint:
a_np = np.conj(np.transpose(a_np))
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
with self.cached_session():
a_tf = a
b_tf = b
if use_placeholder:
a_tf = array_ops.placeholder_with_default(a_tf, shape=None)
b_tf = array_ops.placeholder_with_default(b_tf, shape=None)
tf_ans = linalg_ops.banded_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = self.evaluate(tf_ans)
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_val.shape)
self.assertAllClose(np_ans, tf_val)
@test_util.run_deprecated_v1
def testSolve(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1]])
rhs0 = np.array([[1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrix with 2 bands, single right-hand side.
# Corresponds to the lower triangular
# [[1., 0.], [3., 4.]]
# and upper triangular
# [[2., 1.], [0., 3.]]
matrix = np.array([[1., 4.], [2., 3.]])
rhs0 = np.array([[1.], [1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrix with 2 bands, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolveAllWaysReal(matrix, rhs1)
# 4 x 4 matrix with 2 bands, 3 right hand sides.
# Corresponds to the lower triangular
# [[1., 0., 0., 0.],
# [-1., 2., 0., 0.],
# [0., -2., 3., 0.],
# [0., 0., -3., 4.]]
# and upper triangular
# [[1., 1., 0., 0.],
# [0., -1., 2., 0.],
# [0., 0., -2., 3.],
# [0., 0., 0., -3.]]
matrix = np.array([[1., 2., 3., 4.], [1., -1., -2., -3.]])
rhs0 = np.array([[1., 0., 1.], [0., 1., 1.], [-1., 2., 1.], [0., -1., -1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
def testSolveBandSizeSmaller(self):
rhs0 = np.random.randn(6, 4)
# 6 x 6 matrix with 2 bands. Ensure all non-zero entries.
matrix = 2. * np.random.uniform(size=[3, 6]) + 1.
self._verifySolveAllWaysReal(matrix, rhs0)
# 6 x 6 matrix with 3 bands. Ensure all non-zero entries.
matrix = 2. * np.random.uniform(size=[3, 6]) + 1.
self._verifySolveAllWaysReal(matrix, rhs0)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="ROCm does not support BLAS operations for complex types")
@test_util.run_deprecated_v1
def testSolveComplex(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1 + 1j * 0.1]])
rhs0 = np.array([[1. + 1j]])
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrix with 2 bands, single right-hand side.
# Corresponds to
# [[1. + 1j, 0.], [4 + 1j, 2 + 1j]]
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs0 = np.array([[1.], [1.]]).astype(np.complex64)
rhs0 += 1j * rhs0
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrix with 2 bands, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs1 += 1j * rhs1
self._verifySolveAllWaysComplex(matrix, rhs1)
@test_util.run_deprecated_v1
def testSolveBatch(self):
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2])
matrix = np.array([[1., 2., 3., 4.], [-1., -2., -3., -4.],
[-1., 1., 2., 3.]])
rhs = np.array([[-1., 2.], [1., 1.], [0., 1.], [2., 3.]])
# Batch of 2x3x4x4 matrices with 3 bands, 2x3x4x2 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x4x4 matrices with 3 bands, 3x2x4x2 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2])
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="ROCm does not support BLAS operations for complex types")
@test_util.run_deprecated_v1
def testSolveBatchComplex(self):
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs += 1j * rhs
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[3, 2])
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The matrix should have the same number of rows as the
# right-hand sides.
matrix = np.array([[1., 1.], [1., 1.]])
rhs = np.array([[1., 0.]])
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
# Number of bands exceeds the dimension of the matrix.
matrix = np.ones((6, 4))
rhs = np.ones((4, 2))
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA cannot throw assertion errors during a kernel.")
def testNotInvertible(self):
# The input should be invertible.
# The matrix is singular because it has a zero on the diagonal.
# FIXME(rmlarsen): The GPU kernel does not check for singularity.
singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
with self.cached_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix)
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix, batch_dims=[2, 3])
if __name__ == "__main__":
test.main()
|
|
from django.db import models
from django.conf import settings
from django.db.models.signals import post_delete
from django.template.loader import render_to_string
import libvirt
from insekta.common.virt import connections
from insekta.network.models import Address
RUN_STATE_CHOICES = (
('disabled', 'VM is not created yet'),
('preparing', 'Preparing'),
('started', 'VM started'),
('suspended', 'VM suspended'),
('stopped', 'VM stopped'),
('error', 'VM has weird error')
)
class VirtualMachineError(Exception):
pass
class BaseImage(models.Model):
name = models.CharField(max_length=80)
hash = models.CharField(max_length=40)
def get_pool(self, node):
"""Return the pool where volume of the scenario image is stored.
:param node: A libvirt node, e.g. 'mynode'
:rtype: :class:`libvirt.virStoragePool`
"""
pool_name = settings.LIBVIRT_STORAGE_POOLS[node]
return connections[node].storagePoolLookupByName(pool_name)
def get_volume(self, node):
"""Return the volume where the image of this scenario is stored.
:param node: A libvirt node, e.g. 'mynode'.
:rtype: :class:`libvirt.virStorageVol`
>>> scenario = Scenario.objects.get(pk=1)
>>> vol = scenario.get_volume('mynode')
>>> print(vol.path()) # Prints /dev/insekta/simple-buffer-overflow
"""
pool = self.get_pool(node)
return pool.storageVolLookupByName(self.name)
def __unicode__(self):
try:
scenario = self.scenario
return u'Image for "{0}"'.format(scenario.title)
except:
if not self.virtualmachine_set.count():
return u'No longer used'
else:
return u'Deprecated image, but still in use'
class VirtualMachine(models.Model):
memory = models.IntegerField()
base_image = models.ForeignKey(BaseImage)
node = models.CharField(max_length=80)
address = models.OneToOneField(Address)
state = models.CharField(max_length=10, default='disabled',
choices=RUN_STATE_CHOICES)
def __unicode__(self):
scenario_run = self.scenariorun
return u'VM for scenario "{0}" played by {1}'.format(
scenario_run.scenario.title,
scenario_run.user.username)
def start(self):
"""Start the virtual machine."""
self._do_vm_action('create', 'started')
def stop(self):
"""Stop the virtual machine."""
self._do_vm_action('destroy', 'stopped')
def suspend(self):
"""Suspend the virtual machine."""
self._do_vm_action('suspend', 'suspended')
def resume(self):
"""Resume the virtual machine."""
self._do_vm_action('resume', 'started')
def destroy(self):
"""Destroy this scenario run including virtual machine."""
try:
self.stop()
except VirtualMachineError:
pass
self.destroy_domain()
self.delete()
def refresh_state(self):
"""Fetches the state from libvirt and saves it."""
try:
domain = self.get_domain()
except libvirt.libvirtError:
self.state = 'disabled'
else:
try:
state, _reason = domain.state(flags=0)
except libvirt.libvirtError:
new_state = 'error'
else:
new_state = {
libvirt.VIR_DOMAIN_NOSTATE: 'error',
libvirt.VIR_DOMAIN_RUNNING: 'started',
libvirt.VIR_DOMAIN_BLOCKED: 'error',
libvirt.VIR_DOMAIN_PAUSED: 'suspended',
libvirt.VIR_DOMAIN_SHUTDOWN: 'error',
libvirt.VIR_DOMAIN_SHUTOFF: 'stopped'
}.get(state, 'error')
self.state = new_state
def create_domain(self):
"""Create a domain for this scenario run.
This includes the following:
* Cloning the volume of the scenario
* Creating a new domain using the cloned volume as disk
* Starting the domain
:rtype: :class:`libvirt.virDomain`.
"""
volume = self._create_volume()
xml_desc = self._build_domain_xml(volume)
domain = connections[self.node].defineXML(xml_desc)
self.state = 'stopped'
self.save()
return domain
def destroy_domain(self):
""" Destroy a domain of this scenario run.
This includes the following:
* Killing the domain if it is running
* Undefining the domain
* Deleting the volume of the domain
"""
try:
self._do_vm_action('destroy', 'stopped')
except VirtualMachineError:
# It is already stopped, just ignore exception
pass
self._do_vm_action('undefine', 'disabled')
self.get_volume().delete(flags=0)
def get_domain(self):
"""Return the domain of this scenario run.
:rtype: :class:`libvirt.virDomain`.
"""
conn = connections[self.node]
return conn.lookupByName('scenarioRun{0}'.format(self.pk))
def get_volume(self):
"""Return the volume where this scenario run stores it's data.
:rtype: :class:`libvirt.virStorageVol`.
"""
pool = self.base_image.get_pool(self.node)
return pool.storageVolLookupByName('scenarioRun{0}'.format(self.pk))
def _create_volume(self):
"""Create a new volume by using a backing image.
:rtype: :class:`libvirt.virStorageVol`
"""
try:
self.get_volume().delete(flags=0)
except libvirt.libvirtError:
pass
pool = self.base_image.get_pool(self.node)
base_volume = self.base_image.get_volume(self.node)
capacity = base_volume.info()[1]
xmldesc = render_to_string('vm/volume.xml', {
'id': self.pk,
'capacity': capacity,
'backing_image': base_volume.path()
})
return pool.createXML(xmldesc, flags=0)
def _build_domain_xml(self, volume):
scenario_run = self.scenariorun
return render_to_string('vm/domain.xml', {
'id': self.pk,
'user': scenario_run.user,
'scenario': scenario_run.scenario,
'memory': self.memory * 1024,
'volume': volume.path(),
'mac': self.address.mac,
'bridge': settings.VM_BRIDGE
})
def _do_vm_action(self, action, new_state):
"""Do an action on the virtual machine.
After executing the action, the scenario run is in the state
`new_state`.
If it fails, it will reread the state from libvirt, since this is
mostly the cause for failing.
:param action: One of 'start', 'destroy', 'suspend', 'resume' and
'undefine'
"""
try:
domain = self.get_domain()
getattr(domain, action)()
self.state = new_state
except libvirt.libvirtError, e:
self.refresh_state()
raise VirtualMachineError(unicode(e))
finally:
self.save()
def _delete_image(sender, instance, **kwargs):
for node in settings.LIBVIRT_NODES.keys():
try:
instance.get_volume(node).delete(flags=0)
except libvirt.libvirtError:
pass
post_delete.connect(_delete_image, BaseImage)
|
|
#!/usr/bin/python3
"""Statistics module
"""
from db_queries import query_stats_module, query_get_nature
from experience import Experience
class Statistics(Experience):
"""The Statistics Class.
Parameters
----------
dex_no : integer, required
The national dex number for the pokemon you are calculating stats for.
form : string, optional (default=None)
The form you want to calculate for, default is the most base form.
In unique cases like with Meowstic and Basculin, the default form is
just whichever one I decided to list first.
nature : string, optional (default='serious')
The nature you want to have an effect on the stats. Default is serious
because that one doesn't do anything.
evs : list, optional (default=(0, 0, 0, 0, 0, 0))
The list of EVs in the standard order.
ivs : list, optional (default=(0, 0, 0, 0, 0, 0))
The list of IVs in the standard order.
current_exp : integer, optional (default=None)
The current exp, is used to calculate level if level is not given
level : integer, optional (default=None)
The level for the pokemon. If both exp and level are entered, exp
is prioritized.
"""
def __init__(self, dex_no, form=None, nature='serious', evs=(0, 0, 0, 0, 0, 0),
ivs=(0, 0, 0, 0, 0, 0), current_exp=None, level=100):
self.__pull_data(dex_no, form)
super().__init__(exp_group=self.exp_group, current_exp=current_exp, level=level)
self.evs = evs
self.ivs = ivs
self.nature = query_get_nature(nature)
self.get_stats()
def get_stats(self):
"""
Sets self.stats based on base, ivs, evs, and nature.
Parameters
----------
Nothing.
Returns
-------
self : object
Returns self.
"""
stats = [0, 0, 0, 0, 0, 0]
for stat in range(6):
if stat == 0:
stats[stat] = int((((2 * self.base[stat] + self.ivs[stat] + (self.evs[stat]//4)) * \
self.current_level) / 100)+ self.current_level + 10)
else:
stats[stat] = int((((((2 * self.base[stat] + self.ivs[stat] + \
(self.evs[stat] // 4)) * self.current_level) / 100) + 5) * self.nature[stat]))
self.stats = stats
return self
def set_evs(self, evs):
"""
Sets evs to the new list
Parameters
----------
evs : list, required
The list of evs you want to set.
Returns
-------
self : object
Returns self.
"""
self.evs = evs
self.get_stats()
return self
def set_ivs(self, ivs):
"""
Sets ivs to the new list
Parameters
----------
ivs : list, required
The list of ivs you want to set.
Returns
-------
self : object
Returns self.
"""
self.ivs = ivs
self.get_stats()
return self
def set_nature(self, nature):
"""
Sets nature to the new nature list
Parameters
----------
nature : list, required
The nature you want to set.
Returns
-------
self : object
Returns self.
"""
self.nature = query_get_nature(nature)
self.get_stats()
return self
def change_form(self, new_form):
"""
Sets form to the new form and recalculates stats
Parameters
----------
new_form : string, required
The new form.
Returns
-------
self : object
Returns self.
"""
self.__pull_data(self.dex_no, new_form)
self.get_stats()
return self
def change_pokemon(self, dex_no, form=None):
"""
Changes the pokemon this instance of the object refers to.
Parameters
----------
dex_no : integer, required
The national dex number for the pokemon you are calculating stats for.
form : string, optional (default=None)
The form you want to calculate for.
Returns
-------
self : object
Returns self.
"""
self.__pull_data(dex_no, form)
self.get_stats()
super().__init__(exp_group=self.exp_group, current_exp=int(self.current_exp),
level=self.current_level)
return self
def __pull_data(self, dex_no, form):
"""
Private Method
Pulls data from the database and cuts it up into the relevant pieces.
Parameters
----------
dex_no : integer, required
The national dex number for the pokemon you are calculating stats for.
form : string, optional (default=None)
The form you want to calculate for.
Returns
-------
self : object
Returns self.
"""
pull = query_stats_module(dex_no, form)
self.dex_no = dex_no
self.base = pull[:6]
self.exp_group = pull[6]
self.form = pull[7]
self.name = pull[8]
return self
if __name__ == '__main__':
a = Statistics(dex_no=3, nature='timid', evs=[252, 0, 0, 252, 0, 252],
ivs=[31, 31, 31, 31, 31, 31])
print(a.current_level)
print(a.stats)
a.set_nature('modest')
print(a.stats)
a.change_form('mega')
print(a.stats)
a.change_pokemon(25)
print(a.stats)
print(a.name)
|
|
""":mod:`sass` --- Binding of ``libsass``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This simple C extension module provides a very simple binding of ``libsass``,
which is written in C/C++. It contains only one function and one exception
type.
>>> import sass
>>> sass.compile(string='a { b { color: blue; } }')
'a b {\n color: blue; }\n'
"""
from __future__ import absolute_import
import collections
import inspect
import io
import os
import os.path
import re
import sys
import warnings
from six import string_types, text_type, PY2
import _sass
from sassutils._compat import collections_abc
__all__ = (
'MODES', 'OUTPUT_STYLES', 'SOURCE_COMMENTS', 'CompileError', 'SassColor',
'SassError', 'SassFunction', 'SassList', 'SassMap', 'SassNumber',
'SassWarning', 'and_join', 'compile', 'libsass_version',
)
__version__ = '0.20.1'
libsass_version = _sass.libsass_version
#: (:class:`collections.abc.Mapping`) The dictionary of output styles.
#: Keys are output name strings, and values are flag integers.
OUTPUT_STYLES = _sass.OUTPUT_STYLES
#: (:class:`collections.abc.Mapping`) The dictionary of source comments styles.
#: Keys are mode names, and values are corresponding flag integers.
#:
#: .. versionadded:: 0.4.0
#:
#: .. deprecated:: 0.6.0
SOURCE_COMMENTS = {'none': 0, 'line_numbers': 1, 'default': 1, 'map': 2}
#: (:class:`frozenset`) The set of keywords :func:`compile()` can take.
MODES = frozenset(('string', 'filename', 'dirname'))
def to_native_s(s):
if isinstance(s, bytes) and not PY2: # pragma: no cover (py3)
s = s.decode('UTF-8')
elif isinstance(s, text_type) and PY2: # pragma: no cover (py2)
s = s.encode('UTF-8')
return s
class CompileError(ValueError):
"""The exception type that is raised by :func:`compile()`.
It is a subtype of :exc:`exceptions.ValueError`.
"""
def __init__(self, msg):
super(CompileError, self).__init__(to_native_s(msg))
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if os.path.isdir(path):
return
raise
class SassFunction(object):
"""Custom function for Sass. It can be instantiated using
:meth:`from_lambda()` and :meth:`from_named_function()` as well.
:param name: the function name
:type name: :class:`str`
:param arguments: the argument names
:type arguments: :class:`collections.abc.Sequence`
:param callable_: the actual function to be called
:type callable_: :class:`collections.abc.Callable`
.. versionadded:: 0.7.0
"""
__slots__ = 'name', 'arguments', 'callable_'
@classmethod
def from_lambda(cls, name, lambda_):
"""Make a :class:`SassFunction` object from the given ``lambda_``
function. Since lambda functions don't have their name, it need
its ``name`` as well. Arguments are automatically inspected.
:param name: the function name
:type name: :class:`str`
:param lambda_: the actual lambda function to be called
:type lambda_: :class:`types.LambdaType`
:returns: a custom function wrapper of the ``lambda_`` function
:rtype: :class:`SassFunction`
"""
if PY2: # pragma: no cover
a = inspect.getargspec(lambda_)
varargs, varkw, defaults, kwonlyargs = (
a.varargs, a.keywords, a.defaults, None,
)
else: # pragma: no cover
a = inspect.getfullargspec(lambda_)
varargs, varkw, defaults, kwonlyargs = (
a.varargs, a.varkw, a.defaults, a.kwonlyargs,
)
if varargs or varkw or defaults or kwonlyargs:
raise TypeError(
'functions cannot have starargs or defaults: {} {}'.format(
name, lambda_,
),
)
return cls(name, a.args, lambda_)
@classmethod
def from_named_function(cls, function):
"""Make a :class:`SassFunction` object from the named ``function``.
Function name and arguments are automatically inspected.
:param function: the named function to be called
:type function: :class:`types.FunctionType`
:returns: a custom function wrapper of the ``function``
:rtype: :class:`SassFunction`
"""
if not getattr(function, '__name__', ''):
raise TypeError('function must be named')
return cls.from_lambda(function.__name__, function)
def __init__(self, name, arguments, callable_):
if not isinstance(name, string_types):
raise TypeError('name must be a string, not ' + repr(name))
elif not isinstance(arguments, collections_abc.Sequence):
raise TypeError(
'arguments must be a sequence, not ' +
repr(arguments),
)
elif not callable(callable_):
raise TypeError(repr(callable_) + ' is not callable')
self.name = name
self.arguments = tuple(
arg if arg.startswith('$') else '$' + arg
for arg in arguments
)
self.callable_ = callable_
@property
def signature(self):
"""Signature string of the function."""
return '{}({})'.format(self.name, ', '.join(self.arguments))
def __call__(self, *args, **kwargs):
return self.callable_(*args, **kwargs)
def __str__(self):
return self.signature
def _normalize_importer_return_value(result):
# An importer must return an iterable of iterables of 1-3 stringlike
# objects
if result is None:
return result
def _to_importer_result(single_result):
single_result = tuple(single_result)
if len(single_result) not in (1, 2, 3):
raise ValueError(
'Expected importer result to be a tuple of length (1, 2, 3) '
'but got {}: {!r}'.format(len(single_result), single_result),
)
def _to_bytes(obj):
if not isinstance(obj, bytes):
return obj.encode('UTF-8')
else:
return obj
return tuple(_to_bytes(s) for s in single_result)
return tuple(_to_importer_result(x) for x in result)
def _importer_callback_wrapper(func):
def inner(path, prev):
path, prev = path.decode('UTF-8'), prev.decode('UTF-8')
num_args = getattr(inner, '_num_args', None)
if num_args is None:
try:
ret = func(path, prev)
except TypeError:
inner._num_args = 1
ret = func(path)
else:
inner._num_args = 2
elif num_args == 2:
ret = func(path, prev)
else:
ret = func(path)
return _normalize_importer_return_value(ret)
return inner
def _validate_importers(importers):
"""Validates the importers and decorates the callables with our output
formatter.
"""
# They could have no importers, that's chill
if importers is None:
return None
def _to_importer(priority, func):
assert isinstance(priority, int), priority
assert callable(func), func
return (priority, _importer_callback_wrapper(func))
# Our code assumes tuple of tuples
return tuple(_to_importer(priority, func) for priority, func in importers)
def _raise(e):
raise e
def compile_dirname(
search_path, output_path, output_style, source_comments, include_paths,
precision, custom_functions, importers, source_map_contents,
source_map_embed, omit_source_map_url, source_map_root,
):
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
for dirpath, _, filenames in os.walk(search_path, onerror=_raise):
filenames = [
filename for filename in filenames
if filename.endswith(('.scss', '.sass')) and
not filename.startswith('_')
]
for filename in filenames:
input_filename = os.path.join(dirpath, filename)
relpath_to_file = os.path.relpath(input_filename, search_path)
output_filename = os.path.join(output_path, relpath_to_file)
output_filename = re.sub('.s[ac]ss$', '.css', output_filename)
input_filename = input_filename.encode(fs_encoding)
s, v, _ = _sass.compile_filename(
input_filename, output_style, source_comments, include_paths,
precision, None, custom_functions, importers, None,
source_map_contents, source_map_embed, omit_source_map_url,
source_map_root,
)
if s:
v = v.decode('UTF-8')
mkdirp(os.path.dirname(output_filename))
with io.open(
output_filename, 'w', encoding='UTF-8', newline='',
) as output_file:
output_file.write(v)
else:
return False, v
return True, None
def _check_no_remaining_kwargs(func, kwargs):
if kwargs:
raise TypeError(
'{}() got unexpected keyword argument(s) {}'.format(
func.__name__,
', '.join("'{}'".format(arg) for arg in sorted(kwargs)),
),
)
def compile(**kwargs):
r"""There are three modes of parameters :func:`compile()` can take:
``string``, ``filename``, and ``dirname``.
The ``string`` parameter is the most basic way to compile Sass.
It simply takes a string of Sass code, and then returns a compiled
CSS string.
:param string: Sass source code to compile. it's exclusive to
``filename`` and ``dirname`` parameters
:type string: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param source_map_contents: embed include contents in map
:type source_map_contents: :class:`bool`
:param source_map_embed: embed sourceMappingUrl as data URI
:type source_map_embed: :class:`bool`
:param omit_source_map_url: omit source map URL comment from output
:type omit_source_map_url: :class:`bool`
:param source_map_root: base path, will be emitted in source map as is
:type source_map_root: :class:`str`
:param include_paths: an optional list of paths to find ``@import``\ ed
Sass/CSS source files
:type include_paths: :class:`collections.abc.Sequence`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions_>`_ description
:type custom_functions: :class:`set`,
:class:`collections.abc.Sequence`,
:class:`collections.abc.Mapping`
:param custom_import_extensions: (ignored, for backward compatibility)
:param indented: optional declaration that the string is Sass, not SCSS
formatted. :const:`False` by default
:type indented: :class:`bool`
:returns: the compiled CSS string
:param importers: optional callback functions.
see also below `importer callbacks
<importer-callbacks_>`_ description
:type importers: :class:`collections.abc.Callable`
:rtype: :class:`str`
:raises sass.CompileError: when it fails for any reason
(for example the given Sass has broken syntax)
The ``filename`` is the most commonly used way. It takes a string of
Sass filename, and then returns a compiled CSS string.
:param filename: the filename of Sass source code to compile.
it's exclusive to ``string`` and ``dirname`` parameters
:type filename: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param source_map_filename: use source maps and indicate the source map
output filename. :const:`None` means not
using source maps. :const:`None` by default.
:type source_map_filename: :class:`str`
:param source_map_contents: embed include contents in map
:type source_map_contents: :class:`bool`
:param source_map_embed: embed sourceMappingUrl as data URI
:type source_map_embed: :class:`bool`
:param omit_source_map_url: omit source map URL comment from output
:type omit_source_map_url: :class:`bool`
:param source_map_root: base path, will be emitted in source map as is
:type source_map_root: :class:`str`
:param include_paths: an optional list of paths to find ``@import``\ ed
Sass/CSS source files
:type include_paths: :class:`collections.abc.Sequence`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions_>`_ description
:type custom_functions: :class:`set`,
:class:`collections.abc.Sequence`,
:class:`collections.abc.Mapping`
:param custom_import_extensions: (ignored, for backward compatibility)
:param importers: optional callback functions.
see also below `importer callbacks
<importer-callbacks_>`_ description
:type importers: :class:`collections.abc.Callable`
:returns: the compiled CSS string, or a pair of the compiled CSS string
and the source map string if ``source_map_filename`` is set
:rtype: :class:`str`, :class:`tuple`
:raises sass.CompileError: when it fails for any reason
(for example the given Sass has broken syntax)
:raises exceptions.IOError: when the ``filename`` doesn't exist or
cannot be read
The ``dirname`` is useful for automation. It takes a pair of paths.
The first of the ``dirname`` pair refers the source directory, contains
several Sass source files to compiled. Sass source files can be nested
in directories. The second of the pair refers the output directory
that compiled CSS files would be saved. Directory tree structure of
the source directory will be maintained in the output directory as well.
If ``dirname`` parameter is used the function returns :const:`None`.
:param dirname: a pair of ``(source_dir, output_dir)``.
it's exclusive to ``string`` and ``filename``
parameters
:type dirname: :class:`tuple`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param source_map_contents: embed include contents in map
:type source_map_contents: :class:`bool`
:param source_map_embed: embed sourceMappingUrl as data URI
:type source_map_embed: :class:`bool`
:param omit_source_map_url: omit source map URL comment from output
:type omit_source_map_url: :class:`bool`
:param source_map_root: base path, will be emitted in source map as is
:type source_map_root: :class:`str`
:param include_paths: an optional list of paths to find ``@import``\ ed
Sass/CSS source files
:type include_paths: :class:`collections.abc.Sequence`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions_>`_ description
:type custom_functions: :class:`set`,
:class:`collections.abc.Sequence`,
:class:`collections.abc.Mapping`
:param custom_import_extensions: (ignored, for backward compatibility)
:raises sass.CompileError: when it fails for any reason
(for example the given Sass has broken syntax)
.. _custom-functions:
The ``custom_functions`` parameter can take three types of forms:
:class:`~set`/:class:`~collections.abc.Sequence` of \
:class:`SassFunction`\ s
It is the most general form. Although pretty verbose, it can take
any kind of callables like type objects, unnamed functions,
and user-defined callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
sass.SassFunction('func-name', ('$a', '$b'), some_callable),
...
}
)
:class:`~collections.abc.Mapping` of names to functions
Less general, but easier-to-use form. Although it's not it can take
any kind of callables, it can take any kind of *functions* defined
using :keyword:`def`/:keyword:`lambda` syntax.
It cannot take callables other than them since inspecting arguments
is not always available for every kind of callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
'func-name': lambda a, b: ...,
...
}
)
:class:`~set`/:class:`~collections.abc.Sequence` of \
named functions
Not general, but the easiest-to-use form for *named* functions.
It can take only named functions, defined using :keyword:`def`.
It cannot take lambdas sinc names are unavailable for them.
.. code-block:: python
def func_name(a, b):
return ...
sass.compile(
...,
custom_functions={func_name}
)
.. _importer-callbacks:
Newer versions of ``libsass`` allow developers to define callbacks to be
called and given a chance to process ``@import`` directives. You can
define yours by passing in a list of callables via the ``importers``
parameter. The callables must be passed as 2-tuples in the form:
.. code-block:: python
(priority_int, callback_fn)
A priority of zero is acceptable; priority determines the order callbacks
are attempted.
These callbacks can accept one or two string arguments. The first argument
is the path that was passed to the ``@import`` directive; the second
(optional) argument is the previous resolved path, where the ``@import``
directive was found. The callbacks must either return ``None`` to
indicate the path wasn't handled by that callback (to continue with others
or fall back on internal ``libsass`` filesystem behaviour) or a list of
one or more tuples, each in one of three forms:
* A 1-tuple representing an alternate path to handle internally; or,
* A 2-tuple representing an alternate path and the content that path
represents; or,
* A 3-tuple representing the same as the 2-tuple with the addition of a
"sourcemap".
All tuple return values must be strings. As a not overly realistic
example:
.. code-block:: python
def my_importer(path, prev):
return [(path, '#' + path + ' { color: red; }')]
sass.compile(
...,
importers=[(0, my_importer)]
)
Now, within the style source, attempting to ``@import 'button';`` will
instead attach ``color: red`` as a property of an element with the
imported name.
.. versionadded:: 0.4.0
Added ``source_comments`` and ``source_map_filename`` parameters.
.. versionchanged:: 0.6.0
The ``source_comments`` parameter becomes to take only :class:`bool`
instead of :class:`str`.
.. deprecated:: 0.6.0
Values like ``'none'``, ``'line_numbers'``, and ``'map'`` for
the ``source_comments`` parameter are deprecated.
.. versionadded:: 0.7.0
Added ``precision`` parameter.
.. versionadded:: 0.7.0
Added ``custom_functions`` parameter.
.. versionadded:: 0.11.0
``source_map_filename`` no longer implies ``source_comments``.
.. versionadded:: 0.17.0
Added ``source_map_contents``, ``source_map_embed``,
``omit_source_map_url``, and ``source_map_root`` parameters.
.. versionadded:: 0.18.0
The importer callbacks can now take a second argument, the previously-
resolved path, so that importers can do relative path resolution.
"""
modes = set()
for mode_name in MODES:
if mode_name in kwargs:
modes.add(mode_name)
if not modes:
raise TypeError('choose one at least in ' + and_join(MODES))
elif len(modes) > 1:
raise TypeError(
and_join(modes) + ' are exclusive each other; '
'cannot be used at a time',
)
precision = kwargs.pop('precision', 5)
output_style = kwargs.pop('output_style', 'nested')
if not isinstance(output_style, string_types):
raise TypeError(
'output_style must be a string, not ' +
repr(output_style),
)
try:
output_style = OUTPUT_STYLES[output_style]
except KeyError:
raise CompileError(
'{} is unsupported output_style; choose one of {}'
''.format(output_style, and_join(OUTPUT_STYLES)),
)
source_comments = kwargs.pop('source_comments', False)
if source_comments in SOURCE_COMMENTS:
if source_comments == 'none':
deprecation_message = (
'you can simply pass False to '
"source_comments instead of 'none'"
)
source_comments = False
elif source_comments in ('line_numbers', 'default'):
deprecation_message = (
'you can simply pass True to '
"source_comments instead of " +
repr(source_comments)
)
source_comments = True
else:
deprecation_message = (
"you don't have to pass 'map' to "
'source_comments but just need to '
'specify source_map_filename'
)
source_comments = False
warnings.warn(
"values like 'none', 'line_numbers', and 'map' for "
'the source_comments parameter are deprecated; ' +
deprecation_message,
FutureWarning,
)
if not isinstance(source_comments, bool):
raise TypeError(
'source_comments must be bool, not ' +
repr(source_comments),
)
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def _get_file_arg(key):
ret = kwargs.pop(key, None)
if ret is not None and not isinstance(ret, string_types):
raise TypeError('{} must be a string, not {!r}'.format(key, ret))
elif isinstance(ret, text_type):
ret = ret.encode(fs_encoding)
if ret and 'filename' not in modes:
raise CompileError(
'{} is only available with filename= keyword argument since '
'has to be aware of it'.format(key),
)
return ret
source_map_filename = _get_file_arg('source_map_filename')
output_filename_hint = _get_file_arg('output_filename_hint')
source_map_contents = kwargs.pop('source_map_contents', False)
source_map_embed = kwargs.pop('source_map_embed', False)
omit_source_map_url = kwargs.pop('omit_source_map_url', False)
source_map_root = kwargs.pop('source_map_root', None)
if isinstance(source_map_root, text_type):
source_map_root = source_map_root.encode('utf-8')
# #208: cwd is always included in include paths
include_paths = (os.getcwd(),)
include_paths += tuple(kwargs.pop('include_paths', ()) or ())
include_paths = os.pathsep.join(include_paths)
if isinstance(include_paths, text_type):
include_paths = include_paths.encode(fs_encoding)
custom_functions = kwargs.pop('custom_functions', ())
if isinstance(custom_functions, collections_abc.Mapping):
custom_functions = [
SassFunction.from_lambda(name, lambda_)
for name, lambda_ in custom_functions.items()
]
elif isinstance(
custom_functions,
(collections_abc.Set, collections_abc.Sequence),
):
custom_functions = [
func if isinstance(func, SassFunction)
else SassFunction.from_named_function(func)
for func in custom_functions
]
else:
raise TypeError(
'custom_functions must be one of:\n'
'- a set/sequence of {0.__module__}.{0.__name__} objects,\n'
'- a mapping of function name strings to lambda functions,\n'
'- a set/sequence of named functions,\n'
'not {1!r}'.format(SassFunction, custom_functions),
)
if kwargs.pop('custom_import_extensions', None) is not None:
warnings.warn(
'`custom_import_extensions` has no effect and will be removed in '
'a future version.',
FutureWarning,
)
importers = _validate_importers(kwargs.pop('importers', None))
if 'string' in modes:
string = kwargs.pop('string')
if isinstance(string, text_type):
string = string.encode('utf-8')
indented = kwargs.pop('indented', False)
if not isinstance(indented, bool):
raise TypeError(
'indented must be bool, not ' +
repr(source_comments),
)
_check_no_remaining_kwargs(compile, kwargs)
s, v = _sass.compile_string(
string, output_style, source_comments, include_paths, precision,
custom_functions, indented, importers,
source_map_contents, source_map_embed, omit_source_map_url,
source_map_root,
)
if s:
return v.decode('utf-8')
elif 'filename' in modes:
filename = kwargs.pop('filename')
if not isinstance(filename, string_types):
raise TypeError('filename must be a string, not ' + repr(filename))
elif not os.path.isfile(filename):
raise IOError('{!r} seems not a file'.format(filename))
elif isinstance(filename, text_type):
filename = filename.encode(fs_encoding)
_check_no_remaining_kwargs(compile, kwargs)
s, v, source_map = _sass.compile_filename(
filename, output_style, source_comments, include_paths, precision,
source_map_filename, custom_functions, importers,
output_filename_hint,
source_map_contents, source_map_embed, omit_source_map_url,
source_map_root,
)
if s:
v = v.decode('utf-8')
if source_map_filename:
source_map = source_map.decode('utf-8')
v = v, source_map
return v
elif 'dirname' in modes:
try:
search_path, output_path = kwargs.pop('dirname')
except ValueError:
raise ValueError(
'dirname must be a pair of (source_dir, '
'output_dir)',
)
_check_no_remaining_kwargs(compile, kwargs)
s, v = compile_dirname(
search_path, output_path, output_style, source_comments,
include_paths, precision, custom_functions, importers,
source_map_contents, source_map_embed, omit_source_map_url,
source_map_root,
)
if s:
return
else:
raise TypeError('something went wrong')
assert not s
raise CompileError(v)
def and_join(strings):
"""Join the given ``strings`` by commas with last `' and '` conjuction.
>>> and_join(['Korea', 'Japan', 'China', 'Taiwan'])
'Korea, Japan, China, and Taiwan'
:param strings: a list of words to join
:type string: :class:`collections.abc.Sequence`
:returns: a joined string
:rtype: :class:`str`, :class:`basestring`
"""
last = len(strings) - 1
if last == 0:
return strings[0]
elif last < 0:
return ''
iterator = enumerate(strings)
return ', '.join('and ' + s if i == last else s for i, s in iterator)
"""
This module provides datatypes to be used in custom sass functions.
The following mappings from sass types to python types are used:
SASS_NULL: ``None``
SASS_BOOLEAN: ``True`` or ``False``
SASS_STRING: class:`str`
SASS_NUMBER: class:`SassNumber`
SASS_COLOR: class:`SassColor`
SASS_LIST: class:`SassList`
SASS_MAP: class:`dict` or class:`SassMap`
SASS_ERROR: class:`SassError`
SASS_WARNING: class:`SassWarning`
"""
class SassNumber(collections.namedtuple('SassNumber', ('value', 'unit'))):
def __new__(cls, value, unit):
value = float(value)
if not isinstance(unit, text_type):
unit = unit.decode('UTF-8')
return super(SassNumber, cls).__new__(cls, value, unit)
class SassColor(collections.namedtuple('SassColor', ('r', 'g', 'b', 'a'))):
def __new__(cls, r, g, b, a):
r = float(r)
g = float(g)
b = float(b)
a = float(a)
return super(SassColor, cls).__new__(cls, r, g, b, a)
SASS_SEPARATOR_COMMA = collections.namedtuple('SASS_SEPARATOR_COMMA', ())()
SASS_SEPARATOR_SPACE = collections.namedtuple('SASS_SEPARATOR_SPACE', ())()
SEPARATORS = frozenset((SASS_SEPARATOR_COMMA, SASS_SEPARATOR_SPACE))
class SassList(
collections.namedtuple(
'SassList', ('items', 'separator', 'bracketed'),
),
):
def __new__(cls, items, separator, bracketed=False):
items = tuple(items)
assert separator in SEPARATORS, separator
assert isinstance(bracketed, bool), bracketed
return super(SassList, cls).__new__(cls, items, separator, bracketed)
class SassError(collections.namedtuple('SassError', ('msg',))):
def __new__(cls, msg):
if not isinstance(msg, text_type):
msg = msg.decode('UTF-8')
return super(SassError, cls).__new__(cls, msg)
class SassWarning(collections.namedtuple('SassWarning', ('msg',))):
def __new__(cls, msg):
if not isinstance(msg, text_type):
msg = msg.decode('UTF-8')
return super(SassWarning, cls).__new__(cls, msg)
class SassMap(collections_abc.Mapping):
"""Because sass maps can have mapping types as keys, we need an immutable
hashable mapping type.
.. versionadded:: 0.7.0
"""
__slots__ = '_dict', '_hash'
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
# An assertion that all things are hashable
self._hash = hash(frozenset(self._dict.items()))
# Mapping interface
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
# Our interface
def __repr__(self):
return '{}({})'.format(type(self).__name__, frozenset(self.items()))
def __hash__(self):
return self._hash
def _immutable(self, *_):
raise TypeError('SassMaps are immutable.')
__setitem__ = __delitem__ = _immutable
|
|
# -*- coding: utf-8 -*-
"""
Base settings file, common to all environments.
These settings can be overridden in local.py.
"""
import datetime
import os
import json
import hashlib
from datetime import timedelta
from collections import OrderedDict
os_env = os.environ
def parent_dir(path):
'''Return the parent of a directory.'''
return os.path.abspath(os.path.join(path, os.pardir))
HERE = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = parent_dir(HERE) # website/ directory
APP_PATH = parent_dir(BASE_PATH)
ADDON_PATH = os.path.join(BASE_PATH, 'addons')
STATIC_FOLDER = os.path.join(BASE_PATH, 'static')
STATIC_URL_PATH = '/static'
ASSET_HASH_PATH = os.path.join(APP_PATH, 'webpack-assets.json')
ROOT = os.path.join(BASE_PATH, '..')
BCRYPT_LOG_ROUNDS = 12
with open(os.path.join(APP_PATH, 'package.json'), 'r') as fobj:
VERSION = json.load(fobj)['version']
# Expiration time for verification key
EXPIRATION_TIME_DICT = {
'password': 24 * 60, # 24 hours in minutes for forgot and reset password
'confirm': 24 * 60, # 24 hours in minutes for confirm account and email
'claim': 30 * 24 * 60 # 30 days in minutes for claim contributor-ship
}
CITATION_STYLES_PATH = os.path.join(BASE_PATH, 'static', 'vendor', 'bower_components', 'styles')
# Minimum seconds between forgot password email attempts
SEND_EMAIL_THROTTLE = 30
# Hours before pending embargo/retraction/registration automatically becomes active
RETRACTION_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_TERMINATION_PENDING_TIME = datetime.timedelta(days=2)
REGISTRATION_APPROVAL_TIME = datetime.timedelta(days=2)
# Date range for embargo periods
EMBARGO_END_DATE_MIN = datetime.timedelta(days=2)
EMBARGO_END_DATE_MAX = datetime.timedelta(days=1460) # Four years
# Question titles to be reomved for anonymized VOL
ANONYMIZED_TITLES = ['Authors']
LOAD_BALANCER = False
PROXY_ADDRS = []
# May set these to True in local.py for development
DEV_MODE = False
DEBUG_MODE = False
SECURE_MODE = not DEBUG_MODE # Set secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
API_DOMAIN = PROTOCOL + 'localhost:8000/'
# External Ember App Local Development
USE_EXTERNAL_EMBER = False
EXTERNAL_EMBER_APPS = {}
LOG_PATH = os.path.join(APP_PATH, 'logs')
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
ANALYTICS_PATH = os.path.join(BASE_PATH, 'analytics')
# User management & registration
CONFIRM_REGISTRATIONS_BY_EMAIL = True
ALLOW_REGISTRATION = True
ALLOW_LOGIN = True
SEARCH_ENGINE = 'elastic' # Can be 'elastic', or None
ELASTIC_URI = 'localhost:9200'
ELASTIC_TIMEOUT = 10
ELASTIC_INDEX = 'website'
# Sessions
COOKIE_NAME = 'osf'
# TODO: Override OSF_COOKIE_DOMAIN in local.py in production
OSF_COOKIE_DOMAIN = None
# server-side verification timeout
OSF_SESSION_TIMEOUT = 30 * 24 * 60 * 60 # 30 days in seconds
# TODO: Override SECRET_KEY in local.py in production
SECRET_KEY = 'CHANGEME'
SESSION_COOKIE_SECURE = SECURE_MODE
SESSION_COOKIE_HTTPONLY = True
# local path to private key and cert for local development using https, overwrite in local.py
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
# Change if using `scripts/cron.py` to manage crontab
CRON_USER = None
# External services
USE_CDN_FOR_CLIENT_LIBS = True
USE_EMAIL = True
FROM_EMAIL = 'openscienceframework-noreply@osf.io'
SUPPORT_EMAIL = 'support@osf.io'
# SMTP Settings
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = '' # Set this in local.py
# OR, if using Sendgrid's API
SENDGRID_API_KEY = None
# Mailchimp
MAILCHIMP_API_KEY = None
MAILCHIMP_WEBHOOK_SECRET_KEY = 'CHANGEME' # OSF secret key to ensure webhook is secure
ENABLE_EMAIL_SUBSCRIPTIONS = True
MAILCHIMP_GENERAL_LIST = 'Open Science Framework General'
#Triggered emails
OSF_HELP_LIST = 'Open Science Framework Help'
WAIT_BETWEEN_MAILS = timedelta(days=7)
NO_ADDON_WAIT_TIME = timedelta(weeks=8)
NO_LOGIN_WAIT_TIME = timedelta(weeks=4)
WELCOME_OSF4M_WAIT_TIME = timedelta(weeks=2)
NO_LOGIN_OSF4M_WAIT_TIME = timedelta(weeks=6)
NEW_PUBLIC_PROJECT_WAIT_TIME = timedelta(hours=24)
WELCOME_OSF4M_WAIT_TIME_GRACE = timedelta(days=12)
# TODO: Override in local.py
MAILGUN_API_KEY = None
# TODO: Override in local.py in production
UPLOADS_PATH = os.path.join(BASE_PATH, 'uploads')
MFR_CACHE_PATH = os.path.join(BASE_PATH, 'mfrcache')
MFR_TEMP_PATH = os.path.join(BASE_PATH, 'mfrtemp')
# Use Celery for file rendering
USE_CELERY = True
# File rendering timeout (in ms)
MFR_TIMEOUT = 30000
# TODO: Override in local.py in production
DB_HOST = 'localhost'
DB_PORT = os_env.get('OSF_DB_PORT', 27017)
DB_NAME = 'osf20130903'
DB_USER = None
DB_PASS = None
# Cache settings
SESSION_HISTORY_LENGTH = 5
SESSION_HISTORY_IGNORE_RULES = [
lambda url: '/static/' in url,
lambda url: 'favicon' in url,
lambda url: url.startswith('/api/'),
]
# TODO: Configuration should not change between deploys - this should be dynamic.
CANONICAL_DOMAIN = 'openscienceframework.org'
COOKIE_DOMAIN = '.openscienceframework.org' # Beaker
SHORT_DOMAIN = 'osf.io'
# TODO: Combine Python and JavaScript config
COMMENT_MAXLENGTH = 500
# Profile image options
PROFILE_IMAGE_LARGE = 70
PROFILE_IMAGE_MEDIUM = 40
PROFILE_IMAGE_SMALL = 20
# Conference options
CONFERENCE_MIN_COUNT = 5
WIKI_WHITELIST = {
'tags': [
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'blockquote', 'br',
'center', 'cite', 'code',
'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'embed', 'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
'kbd', 'li', 'object', 'ol', 'param', 'pre', 'p', 'q',
's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup',
'table', 'tbody', 'td', 'th', 'thead', 'tr', 'tt', 'ul', 'u',
'var', 'wbr',
],
'attributes': [
'align', 'alt', 'border', 'cite', 'class', 'dir',
'height', 'href', 'id', 'src', 'style', 'title', 'type', 'width',
'face', 'size', # font tags
'salign', 'align', 'wmode', 'target',
],
# Styles currently used in Reproducibility Project wiki pages
'styles' : [
'top', 'left', 'width', 'height', 'position',
'background', 'font-size', 'text-align', 'z-index',
'list-style',
]
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
NODE_CATEGORY_MAP = OrderedDict([
('analysis', 'Analysis'),
('communication', 'Communication'),
('data', 'Data'),
('hypothesis', 'Hypothesis'),
('instrumentation', 'Instrumentation'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('project', 'Project'),
('software', 'Software'),
('other', 'Other'),
('', 'Uncategorized')
])
# Add-ons
# Load addons from addons.json
with open(os.path.join(ROOT, 'addons.json')) as fp:
addon_settings = json.load(fp)
ADDONS_REQUESTED = addon_settings['addons']
ADDONS_ARCHIVABLE = addon_settings['addons_archivable']
ADDONS_COMMENTABLE = addon_settings['addons_commentable']
ADDONS_BASED_ON_IDS = addon_settings['addons_based_on_ids']
ADDONS_DESCRIPTION = addon_settings['addons_description']
ADDONS_URL = addon_settings['addons_url']
ADDON_CATEGORIES = [
'documentation',
'storage',
'bibliography',
'other',
'security',
'citations',
]
SYSTEM_ADDED_ADDONS = {
# 'user': ['badges'],
'user': [],
'node': [],
}
KEEN = {
'public': {
'project_id': None,
'master_key': 'changeme',
'write_key': '',
'read_key': '',
},
'private': {
'project_id': '',
'write_key': '',
'read_key': '',
},
}
SENTRY_DSN = None
SENTRY_DSN_JS = None
# TODO: Delete me after merging GitLab
MISSING_FILE_NAME = 'untitled'
# Project Organizer
ALL_MY_PROJECTS_ID = '-amp'
ALL_MY_REGISTRATIONS_ID = '-amr'
ALL_MY_PROJECTS_NAME = 'All my projects'
ALL_MY_REGISTRATIONS_NAME = 'All my registrations'
# Most Popular and New and Noteworthy Nodes
POPULAR_LINKS_NODE = None # TODO Override in local.py in production.
POPULAR_LINKS_REGISTRATIONS = None # TODO Override in local.py in production.
NEW_AND_NOTEWORTHY_LINKS_NODE = None # TODO Override in local.py in production.
MAX_POPULAR_PROJECTS = 10
NEW_AND_NOTEWORTHY_CONTRIBUTOR_BLACKLIST = [] # TODO Override in local.py in production.
# FOR EMERGENCIES ONLY: Setting this to True will disable forks, registrations,
# and uploads in order to save disk space.
DISK_SAVING_MODE = False
# Seconds before another notification email can be sent to a contributor when added to a project
CONTRIBUTOR_ADDED_EMAIL_THROTTLE = 24 * 3600
# Google Analytics
GOOGLE_ANALYTICS_ID = None
GOOGLE_SITE_VERIFICATION = None
# Pingdom
PINGDOM_ID = None
DEFAULT_HMAC_SECRET = 'changeme'
DEFAULT_HMAC_ALGORITHM = hashlib.sha256
WATERBUTLER_URL = 'http://localhost:7777'
WATERBUTLER_ADDRS = ['127.0.0.1']
# Test identifier namespaces
DOI_NAMESPACE = 'doi:10.5072/FK2'
ARK_NAMESPACE = 'ark:99999/fk4'
EZID_USERNAME = 'changeme'
EZID_PASSWORD = 'changeme'
# Format for DOIs and ARKs
EZID_FORMAT = '{namespace}osf.io/{guid}'
SHARE_REGISTRATION_URL = ''
SHARE_URL = None
SHARE_API_TOKEN = None # Required to send project updates to SHARE
CAS_SERVER_URL = 'http://localhost:8080'
MFR_SERVER_URL = 'http://localhost:7778'
###### ARCHIVER ###########
ARCHIVE_PROVIDER = 'osfstorage'
MAX_ARCHIVE_SIZE = 5 * 1024 ** 3 # == math.pow(1024, 3) == 1 GB
MAX_FILE_SIZE = MAX_ARCHIVE_SIZE # TODO limit file size?
ARCHIVE_TIMEOUT_TIMEDELTA = timedelta(1) # 24 hours
ENABLE_ARCHIVER = True
JWT_SECRET = 'changeme'
JWT_ALGORITHM = 'HS256'
##### CELERY #####
DEFAULT_QUEUE = 'celery'
LOW_QUEUE = 'low'
MED_QUEUE = 'med'
HIGH_QUEUE = 'high'
LOW_PRI_MODULES = {
'framework.analytics.tasks',
'framework.celery_tasks',
'scripts.osfstorage.usage_audit',
'scripts.osfstorage.glacier_inventory',
'scripts.analytics.tasks',
'scripts.osfstorage.files_audit',
'scripts.osfstorage.glacier_audit',
'scripts.populate_new_and_noteworthy_projects',
'scripts.populate_popular_projects_and_registrations',
'website.search.elastic_search',
}
MED_PRI_MODULES = {
'framework.email.tasks',
'scripts.send_queued_mails',
'scripts.triggered_mails',
'website.mailchimp_utils',
'website.notifications.tasks',
}
HIGH_PRI_MODULES = {
'scripts.approve_embargo_terminations',
'scripts.approve_registrations',
'scripts.embargo_registrations',
'scripts.refresh_addon_tokens',
'scripts.retract_registrations',
'website.archiver.tasks',
}
try:
from kombu import Queue, Exchange
except ImportError:
pass
else:
CELERY_QUEUES = (
Queue(LOW_QUEUE, Exchange(LOW_QUEUE), routing_key=LOW_QUEUE,
consumer_arguments={'x-priority': -1}),
Queue(DEFAULT_QUEUE, Exchange(DEFAULT_QUEUE), routing_key=DEFAULT_QUEUE,
consumer_arguments={'x-priority': 0}),
Queue(MED_QUEUE, Exchange(MED_QUEUE), routing_key=MED_QUEUE,
consumer_arguments={'x-priority': 1}),
Queue(HIGH_QUEUE, Exchange(HIGH_QUEUE), routing_key=HIGH_QUEUE,
consumer_arguments={'x-priority': 10}),
)
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_ROUTES = ('framework.celery_tasks.routers.CeleryRouter', )
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
# Modules to import when celery launches
CELERY_IMPORTS = (
'framework.celery_tasks',
'framework.celery_tasks.signals',
'framework.email.tasks',
'website.mailchimp_utils',
'website.notifications.tasks',
'website.archiver.tasks',
'website.search.search',
'website.project.tasks',
'scripts.populate_new_and_noteworthy_projects',
'scripts.populate_popular_projects_and_registrations',
'scripts.refresh_addon_tokens',
'scripts.retract_registrations',
'scripts.embargo_registrations',
'scripts.approve_registrations',
'scripts.approve_embargo_terminations',
'scripts.triggered_mails',
'scripts.send_queued_mails',
'scripts.analytics.run_keen_summaries',
'scripts.analytics.run_keen_snapshots',
'scripts.analytics.run_keen_events',
)
# Modules that need metrics and release requirements
# CELERY_IMPORTS += (
# 'scripts.osfstorage.glacier_inventory',
# 'scripts.osfstorage.glacier_audit',
# 'scripts.osfstorage.usage_audit',
# 'scripts.osfstorage.files_audit',
# 'scripts.analytics.tasks',
# 'scripts.analytics.upload',
# )
# celery.schedule will not be installed when running invoke requirements the first time.
try:
from celery.schedules import crontab
except ImportError:
pass
else:
# Setting up a scheduler, essentially replaces an independent cron job
CELERYBEAT_SCHEDULE = {
'5-minute-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute='*/5'),
'args': ('email_transactional',),
},
'daily-emails': {
'task': 'website.notifications.tasks.send_users_email',
'schedule': crontab(minute=0, hour=0),
'args': ('email_digest',),
},
'refresh_addons': {
'task': 'scripts.refresh_addon_tokens',
'schedule': crontab(minute=0, hour= 2), # Daily 2:00 a.m
'kwargs': {'dry_run': False, 'addons': {
'box': 60, # https://docs.box.com/docs/oauth-20#section-6-using-the-access-and-refresh-tokens
'googledrive': 14, # https://developers.google.com/identity/protocols/OAuth2#expiration
'mendeley': 14 # http://dev.mendeley.com/reference/topics/authorization_overview.html
}},
},
'retract_registrations': {
'task': 'scripts.retract_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'embargo_registrations': {
'task': 'scripts.embargo_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_registrations': {
'task': 'scripts.approve_registrations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'approve_embargo_terminations': {
'task': 'scripts.approve_embargo_terminations',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'triggered_mails': {
'task': 'scripts.triggered_mails',
'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
'kwargs': {'dry_run': False},
},
'send_queued_mails': {
'task': 'scripts.send_queued_mails',
'schedule': crontab(minute=0, hour=12), # Daily 12 p.m.
'kwargs': {'dry_run': False},
},
'new-and-noteworthy': {
'task': 'scripts.populate_new_and_noteworthy_projects',
'schedule': crontab(minute=0, hour=2, day_of_week=6), # Saturday 2:00 a.m.
'kwargs': {'dry_run': False}
},
'update_popular_nodes': {
'task': 'scripts.populate_popular_projects_and_registrations',
'schedule': crontab(minute=0, hour=2), # Daily 2:00 a.m.
'kwargs': {'dry_run': False}
},
'run_keen_summaries': {
'task': 'scripts.analytics.run_keen_summaries',
'schedule': crontab(minute=00, hour=2), # Daily 2:00 a.m.
'kwargs': {'yesterday': True}
},
'run_keen_snapshots': {
'task': 'scripts.analytics.run_keen_snapshots',
'schedule': crontab(minute=0, hour=3), # Daily 3:00 a.m.
},
'run_keen_events': {
'task': 'scripts.analytics.run_keen_events',
'schedule': crontab(minute=0, hour=4), # Daily 4:00 a.m.
'kwargs': {'yesterday': True}
}
}
# Tasks that need metrics and release requirements
# CELERYBEAT_SCHEDULE.update({
# 'usage_audit': {
# 'task': 'scripts.osfstorage.usage_audit',
# 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m
# 'kwargs': {'send_mail': True},
# },
# 'glacier_inventory': {
# 'task': 'scripts.osfstorage.glacier_inventory',
# 'schedule': crontab(minute=0, hour= 0, day_of_week=0), # Sunday 12:00 a.m.
# 'args': (),
# },
# 'glacier_audit': {
# 'task': 'scripts.osfstorage.glacier_audit',
# 'schedule': crontab(minute=0, hour=6, day_of_week=0), # Sunday 6:00 a.m.
# 'kwargs': {'dry_run': False},
# },
# 'files_audit_0': {
# 'task': 'scripts.osfstorage.files_audit.0',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_1': {
# 'task': 'scripts.osfstorage.files_audit.1',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_2': {
# 'task': 'scripts.osfstorage.files_audit.2',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'files_audit_3': {
# 'task': 'scripts.osfstorage.files_audit.3',
# 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m.
# 'kwargs': {'num_of_workers': 4, 'dry_run': False},
# },
# 'analytics': {
# 'task': 'scripts.analytics.tasks',
# 'schedule': crontab(minute=0, hour=2), # Daily 2:00 a.m.
# 'kwargs': {}
# },
# 'analytics-upload': {
# 'task': 'scripts.analytics.upload',
# 'schedule': crontab(minute=0, hour=6), # Daily 6:00 a.m.
# 'kwargs': {}
# },
# })
WATERBUTLER_JWE_SALT = 'yusaltydough'
WATERBUTLER_JWE_SECRET = 'CirclesAre4Squares'
WATERBUTLER_JWT_SECRET = 'ILiekTrianglesALot'
WATERBUTLER_JWT_ALGORITHM = 'HS256'
WATERBUTLER_JWT_EXPIRATION = 15
SENSITIVE_DATA_SALT = 'yusaltydough'
SENSITIVE_DATA_SECRET = 'TrainglesAre5Squares'
DRAFT_REGISTRATION_APPROVAL_PERIOD = datetime.timedelta(days=10)
assert (DRAFT_REGISTRATION_APPROVAL_PERIOD > EMBARGO_END_DATE_MIN), 'The draft registration approval period should be more than the minimum embargo end date.'
PREREG_ADMIN_TAG = "prereg_admin"
ENABLE_INSTITUTIONS = False
ENABLE_VARNISH = False
ENABLE_ESI = False
VARNISH_SERVERS = [] # This should be set in local.py or cache invalidation won't work
ESI_MEDIA_TYPES = {'application/vnd.api+json', 'application/json'}
# Used for gathering meta information about the current build
GITHUB_API_TOKEN = None
# External Identity Provider
EXTERNAL_IDENTITY_PROFILE = {
'OrcidProfile': 'ORCID',
}
# Source: https://github.com/maxd/fake_email_validator/blob/master/config/fake_domains.list
BLACKLISTED_DOMAINS = [
'0-mail.com',
'0815.ru',
'0815.su',
'0clickemail.com',
'0wnd.net',
'0wnd.org',
'10mail.org',
'10minut.com.pl',
'10minutemail.cf',
'10minutemail.co.uk',
'10minutemail.co.za',
'10minutemail.com',
'10minutemail.de',
'10minutemail.eu',
'10minutemail.ga',
'10minutemail.gq',
'10minutemail.info',
'10minutemail.ml',
'10minutemail.net',
'10minutemail.org',
'10minutemail.ru',
'10minutemail.us',
'10minutesmail.co.uk',
'10minutesmail.com',
'10minutesmail.eu',
'10minutesmail.net',
'10minutesmail.org',
'10minutesmail.ru',
'10minutesmail.us',
'123-m.com',
'15qm-mail.red',
'15qm.com',
'1chuan.com',
'1mail.ml',
'1pad.de',
'1usemail.com',
'1zhuan.com',
'20mail.in',
'20mail.it',
'20minutemail.com',
'2prong.com',
'30minutemail.com',
'30minutesmail.com',
'33mail.com',
'3d-painting.com',
'3mail.ga',
'4mail.cf',
'4mail.ga',
'4warding.com',
'4warding.net',
'4warding.org',
'5mail.cf',
'5mail.ga',
'60minutemail.com',
'675hosting.com',
'675hosting.net',
'675hosting.org',
'6ip.us',
'6mail.cf',
'6mail.ga',
'6mail.ml',
'6paq.com',
'6url.com',
'75hosting.com',
'75hosting.net',
'75hosting.org',
'7mail.ga',
'7mail.ml',
'7mail7.com',
'7tags.com',
'8mail.cf',
'8mail.ga',
'8mail.ml',
'99experts.com',
'9mail.cf',
'9ox.net',
'a-bc.net',
'a45.in',
'abcmail.email',
'abusemail.de',
'abyssmail.com',
'acentri.com',
'advantimo.com',
'afrobacon.com',
'agedmail.com',
'ajaxapp.net',
'alivance.com',
'ama-trade.de',
'amail.com',
'amail4.me',
'amilegit.com',
'amiri.net',
'amiriindustries.com',
'anappthat.com',
'ano-mail.net',
'anobox.ru',
'anonbox.net',
'anonmails.de',
'anonymail.dk',
'anonymbox.com',
'antichef.com',
'antichef.net',
'antireg.ru',
'antispam.de',
'antispammail.de',
'appixie.com',
'armyspy.com',
'artman-conception.com',
'asdasd.ru',
'azmeil.tk',
'baxomale.ht.cx',
'beddly.com',
'beefmilk.com',
'beerolympics.se',
'bestemailaddress.net',
'bigprofessor.so',
'bigstring.com',
'binkmail.com',
'bio-muesli.net',
'bladesmail.net',
'bloatbox.com',
'bobmail.info',
'bodhi.lawlita.com',
'bofthew.com',
'bootybay.de',
'bossmail.de',
'boun.cr',
'bouncr.com',
'boxformail.in',
'boximail.com',
'boxtemp.com.br',
'breakthru.com',
'brefmail.com',
'brennendesreich.de',
'broadbandninja.com',
'bsnow.net',
'bspamfree.org',
'buffemail.com',
'bugmenot.com',
'bumpymail.com',
'bund.us',
'bundes-li.ga',
'burnthespam.info',
'burstmail.info',
'buymoreplays.com',
'buyusedlibrarybooks.org',
'byom.de',
'c2.hu',
'cachedot.net',
'card.zp.ua',
'casualdx.com',
'cbair.com',
'cdnqa.com',
'cek.pm',
'cellurl.com',
'cem.net',
'centermail.com',
'centermail.net',
'chammy.info',
'cheatmail.de',
'chewiemail.com',
'childsavetrust.org',
'chogmail.com',
'choicemail1.com',
'chong-mail.com',
'chong-mail.net',
'chong-mail.org',
'clixser.com',
'clrmail.com',
'cmail.net',
'cmail.org',
'coldemail.info',
'consumerriot.com',
'cool.fr.nf',
'correo.blogos.net',
'cosmorph.com',
'courriel.fr.nf',
'courrieltemporaire.com',
'crapmail.org',
'crazymailing.com',
'cubiclink.com',
'curryworld.de',
'cust.in',
'cuvox.de',
'd3p.dk',
'dacoolest.com',
'daintly.com',
'dandikmail.com',
'dayrep.com',
'dbunker.com',
'dcemail.com',
'deadaddress.com',
'deadfake.cf',
'deadfake.ga',
'deadfake.ml',
'deadfake.tk',
'deadspam.com',
'deagot.com',
'dealja.com',
'delikkt.de',
'despam.it',
'despammed.com',
'devnullmail.com',
'dfgh.net',
'digitalsanctuary.com',
'dingbone.com',
'dingfone.com',
'discard.cf',
'discard.email',
'discard.ga',
'discard.gq',
'discard.ml',
'discard.tk',
'discardmail.com',
'discardmail.de',
'dispomail.eu',
'disposable-email.ml',
'disposable.cf',
'disposable.ga',
'disposable.ml',
'disposableaddress.com',
'disposableemailaddresses.com',
'disposableinbox.com',
'dispose.it',
'disposeamail.com',
'disposemail.com',
'dispostable.com',
'divermail.com',
'dodgeit.com',
'dodgemail.de',
'dodgit.com',
'dodgit.org',
'dodsi.com',
'doiea.com',
'domozmail.com',
'donemail.ru',
'dontmail.net',
'dontreg.com',
'dontsendmespam.de',
'dotmsg.com',
'drdrb.com',
'drdrb.net',
'droplar.com',
'dropmail.me',
'duam.net',
'dudmail.com',
'dump-email.info',
'dumpandjunk.com',
'dumpmail.de',
'dumpyemail.com',
'duskmail.com',
'e-mail.com',
'e-mail.org',
'e4ward.com',
'easytrashmail.com',
'ee1.pl',
'ee2.pl',
'eelmail.com',
'einmalmail.de',
'einrot.com',
'einrot.de',
'eintagsmail.de',
'email-fake.cf',
'email-fake.com',
'email-fake.ga',
'email-fake.gq',
'email-fake.ml',
'email-fake.tk',
'email60.com',
'email64.com',
'emailage.cf',
'emailage.ga',
'emailage.gq',
'emailage.ml',
'emailage.tk',
'emaildienst.de',
'emailgo.de',
'emailias.com',
'emailigo.de',
'emailinfive.com',
'emaillime.com',
'emailmiser.com',
'emailproxsy.com',
'emails.ga',
'emailsensei.com',
'emailspam.cf',
'emailspam.ga',
'emailspam.gq',
'emailspam.ml',
'emailspam.tk',
'emailtemporanea.com',
'emailtemporanea.net',
'emailtemporar.ro',
'emailtemporario.com.br',
'emailthe.net',
'emailtmp.com',
'emailto.de',
'emailwarden.com',
'emailx.at.hm',
'emailxfer.com',
'emailz.cf',
'emailz.ga',
'emailz.gq',
'emailz.ml',
'emeil.in',
'emeil.ir',
'emeraldwebmail.com',
'emil.com',
'emkei.cf',
'emkei.ga',
'emkei.gq',
'emkei.ml',
'emkei.tk',
'emz.net',
'enterto.com',
'ephemail.net',
'ero-tube.org',
'etranquil.com',
'etranquil.net',
'etranquil.org',
'evopo.com',
'example.com',
'explodemail.com',
'express.net.ua',
'eyepaste.com',
'facebook-email.cf',
'facebook-email.ga',
'facebook-email.ml',
'facebookmail.gq',
'facebookmail.ml',
'fake-box.com',
'fake-mail.cf',
'fake-mail.ga',
'fake-mail.ml',
'fakeinbox.cf',
'fakeinbox.com',
'fakeinbox.ga',
'fakeinbox.ml',
'fakeinbox.tk',
'fakeinformation.com',
'fakemail.fr',
'fakemailgenerator.com',
'fakemailz.com',
'fammix.com',
'fansworldwide.de',
'fantasymail.de',
'fastacura.com',
'fastchevy.com',
'fastchrysler.com',
'fastkawasaki.com',
'fastmazda.com',
'fastmitsubishi.com',
'fastnissan.com',
'fastsubaru.com',
'fastsuzuki.com',
'fasttoyota.com',
'fastyamaha.com',
'fatflap.com',
'fdfdsfds.com',
'fightallspam.com',
'fiifke.de',
'filzmail.com',
'fivemail.de',
'fixmail.tk',
'fizmail.com',
'fleckens.hu',
'flurre.com',
'flurred.com',
'flurred.ru',
'flyspam.com',
'footard.com',
'forgetmail.com',
'forward.cat',
'fr33mail.info',
'frapmail.com',
'free-email.cf',
'free-email.ga',
'freemails.cf',
'freemails.ga',
'freemails.ml',
'freundin.ru',
'friendlymail.co.uk',
'front14.org',
'fuckingduh.com',
'fudgerub.com',
'fux0ringduh.com',
'fyii.de',
'garliclife.com',
'gehensiemirnichtaufdensack.de',
'gelitik.in',
'germanmails.biz',
'get-mail.cf',
'get-mail.ga',
'get-mail.ml',
'get-mail.tk',
'get1mail.com',
'get2mail.fr',
'getairmail.cf',
'getairmail.com',
'getairmail.ga',
'getairmail.gq',
'getairmail.ml',
'getairmail.tk',
'getmails.eu',
'getonemail.com',
'getonemail.net',
'gfcom.com',
'ghosttexter.de',
'giantmail.de',
'girlsundertheinfluence.com',
'gishpuppy.com',
'gmial.com',
'goemailgo.com',
'gorillaswithdirtyarmpits.com',
'gotmail.com',
'gotmail.net',
'gotmail.org',
'gowikibooks.com',
'gowikicampus.com',
'gowikicars.com',
'gowikifilms.com',
'gowikigames.com',
'gowikimusic.com',
'gowikinetwork.com',
'gowikitravel.com',
'gowikitv.com',
'grandmamail.com',
'grandmasmail.com',
'great-host.in',
'greensloth.com',
'grr.la',
'gsrv.co.uk',
'guerillamail.biz',
'guerillamail.com',
'guerillamail.de',
'guerillamail.net',
'guerillamail.org',
'guerillamailblock.com',
'guerrillamail.biz',
'guerrillamail.com',
'guerrillamail.de',
'guerrillamail.info',
'guerrillamail.net',
'guerrillamail.org',
'guerrillamailblock.com',
'gustr.com',
'h8s.org',
'hacccc.com',
'haltospam.com',
'haqed.com',
'harakirimail.com',
'hartbot.de',
'hat-geld.de',
'hatespam.org',
'headstrong.de',
'hellodream.mobi',
'herp.in',
'hidemail.de',
'hideme.be',
'hidzz.com',
'hiru-dea.com',
'hmamail.com',
'hochsitze.com',
'hopemail.biz',
'hot-mail.cf',
'hot-mail.ga',
'hot-mail.gq',
'hot-mail.ml',
'hot-mail.tk',
'hotpop.com',
'hulapla.de',
'hushmail.com',
'ieatspam.eu',
'ieatspam.info',
'ieh-mail.de',
'ihateyoualot.info',
'iheartspam.org',
'ikbenspamvrij.nl',
'imails.info',
'imgof.com',
'imgv.de',
'imstations.com',
'inbax.tk',
'inbox.si',
'inboxalias.com',
'inboxclean.com',
'inboxclean.org',
'inboxproxy.com',
'incognitomail.com',
'incognitomail.net',
'incognitomail.org',
'ineec.net',
'infocom.zp.ua',
'inoutmail.de',
'inoutmail.eu',
'inoutmail.info',
'inoutmail.net',
'insorg-mail.info',
'instant-mail.de',
'instantemailaddress.com',
'instantlyemail.com',
'ip6.li',
'ipoo.org',
'irish2me.com',
'iwi.net',
'jetable.com',
'jetable.fr.nf',
'jetable.net',
'jetable.org',
'jnxjn.com',
'jourrapide.com',
'junk1e.com',
'junkmail.com',
'junkmail.ga',
'junkmail.gq',
'jupimail.com',
'kasmail.com',
'kaspop.com',
'keepmymail.com',
'killmail.com',
'killmail.net',
'kimsdisk.com',
'kingsq.ga',
'kiois.com',
'kir.ch.tc',
'klassmaster.com',
'klassmaster.net',
'klzlk.com',
'kook.ml',
'koszmail.pl',
'kulturbetrieb.info',
'kurzepost.de',
'l33r.eu',
'labetteraverouge.at',
'lackmail.net',
'lags.us',
'landmail.co',
'lastmail.co',
'lawlita.com',
'lazyinbox.com',
'legitmail.club',
'letthemeatspam.com',
'lhsdv.com',
'libox.fr',
'lifebyfood.com',
'link2mail.net',
'litedrop.com',
'loadby.us',
'login-email.cf',
'login-email.ga',
'login-email.ml',
'login-email.tk',
'lol.ovpn.to',
'lolfreak.net',
'lookugly.com',
'lopl.co.cc',
'lortemail.dk',
'lovemeleaveme.com',
'lr78.com',
'lroid.com',
'lukop.dk',
'm21.cc',
'm4ilweb.info',
'maboard.com',
'mail-filter.com',
'mail-temporaire.fr',
'mail.by',
'mail.mezimages.net',
'mail.zp.ua',
'mail114.net',
'mail1a.de',
'mail21.cc',
'mail2rss.org',
'mail333.com',
'mail4trash.com',
'mailbidon.com',
'mailbiz.biz',
'mailblocks.com',
'mailblog.biz',
'mailbucket.org',
'mailcat.biz',
'mailcatch.com',
'mailde.de',
'mailde.info',
'maildrop.cc',
'maildrop.cf',
'maildrop.ga',
'maildrop.gq',
'maildrop.ml',
'maildu.de',
'maildx.com',
'maileater.com',
'mailed.ro',
'maileimer.de',
'mailexpire.com',
'mailfa.tk',
'mailforspam.com',
'mailfree.ga',
'mailfree.gq',
'mailfree.ml',
'mailfreeonline.com',
'mailfs.com',
'mailguard.me',
'mailhazard.com',
'mailhazard.us',
'mailhz.me',
'mailimate.com',
'mailin8r.com',
'mailinater.com',
'mailinator.com',
'mailinator.gq',
'mailinator.net',
'mailinator.org',
'mailinator.us',
'mailinator2.com',
'mailinator2.net',
'mailincubator.com',
'mailismagic.com',
'mailjunk.cf',
'mailjunk.ga',
'mailjunk.gq',
'mailjunk.ml',
'mailjunk.tk',
'mailmate.com',
'mailme.gq',
'mailme.ir',
'mailme.lv',
'mailme24.com',
'mailmetrash.com',
'mailmoat.com',
'mailms.com',
'mailnator.com',
'mailnesia.com',
'mailnull.com',
'mailorg.org',
'mailpick.biz',
'mailproxsy.com',
'mailquack.com',
'mailrock.biz',
'mailscrap.com',
'mailshell.com',
'mailsiphon.com',
'mailslapping.com',
'mailslite.com',
'mailspeed.ru',
'mailtemp.info',
'mailtome.de',
'mailtothis.com',
'mailtrash.net',
'mailtv.net',
'mailtv.tv',
'mailzilla.com',
'mailzilla.org',
'mailzilla.orgmbx.cc',
'makemetheking.com',
'mallinator.com',
'manifestgenerator.com',
'manybrain.com',
'mbx.cc',
'mciek.com',
'mega.zik.dj',
'meinspamschutz.de',
'meltmail.com',
'messagebeamer.de',
'mezimages.net',
'mfsa.ru',
'mierdamail.com',
'migmail.pl',
'migumail.com',
'mindless.com',
'ministry-of-silly-walks.de',
'mintemail.com',
'misterpinball.de',
'mjukglass.nu',
'moakt.com',
'mobi.web.id',
'mobileninja.co.uk',
'moburl.com',
'mohmal.com',
'moncourrier.fr.nf',
'monemail.fr.nf',
'monmail.fr.nf',
'monumentmail.com',
'msa.minsmail.com',
'mt2009.com',
'mt2014.com',
'mt2015.com',
'mx0.wwwnew.eu',
'my10minutemail.com',
'myalias.pw',
'mycard.net.ua',
'mycleaninbox.net',
'myemailboxy.com',
'mymail-in.net',
'mymailoasis.com',
'mynetstore.de',
'mypacks.net',
'mypartyclip.de',
'myphantomemail.com',
'mysamp.de',
'myspaceinc.com',
'myspaceinc.net',
'myspaceinc.org',
'myspacepimpedup.com',
'myspamless.com',
'mytemp.email',
'mytempemail.com',
'mytempmail.com',
'mytrashmail.com',
'nabuma.com',
'neomailbox.com',
'nepwk.com',
'nervmich.net',
'nervtmich.net',
'netmails.com',
'netmails.net',
'netzidiot.de',
'neverbox.com',
'nice-4u.com',
'nincsmail.com',
'nincsmail.hu',
'nmail.cf',
'nnh.com',
'no-spam.ws',
'noblepioneer.com',
'nobulk.com',
'noclickemail.com',
'nogmailspam.info',
'nomail.pw',
'nomail.xl.cx',
'nomail2me.com',
'nomorespamemails.com',
'nonspam.eu',
'nonspammer.de',
'noref.in',
'nospam.ze.tc',
'nospam4.us',
'nospamfor.us',
'nospammail.net',
'nospamthanks.info',
'notmailinator.com',
'notsharingmy.info',
'nowhere.org',
'nowmymail.com',
'nurfuerspam.de',
'nus.edu.sg',
'nwldx.com',
'objectmail.com',
'obobbo.com',
'odaymail.com',
'odnorazovoe.ru',
'one-time.email',
'oneoffemail.com',
'oneoffmail.com',
'onewaymail.com',
'onlatedotcom.info',
'online.ms',
'oopi.org',
'opayq.com',
'opentrash.com',
'ordinaryamerican.net',
'otherinbox.com',
'ourklips.com',
'outlawspam.com',
'ovpn.to',
'owlpic.com',
'pancakemail.com',
'paplease.com',
'pepbot.com',
'pfui.ru',
'pimpedupmyspace.com',
'pjjkp.com',
'plexolan.de',
'poczta.onet.pl',
'politikerclub.de',
'poofy.org',
'pookmail.com',
'pop3.xyz',
'postalmail.biz',
'privacy.net',
'privatdemail.net',
'privy-mail.com',
'privymail.de',
'proxymail.eu',
'prtnx.com',
'prtz.eu',
'pubmail.io',
'punkass.com',
'putthisinyourspamdatabase.com',
'pwrby.com',
'q314.net',
'qisdo.com',
'qisoa.com',
'qoika.com',
'qq.com',
'quickinbox.com',
'quickmail.nl',
'rainmail.biz',
'rcpt.at',
're-gister.com',
'reallymymail.com',
'realtyalerts.ca',
'recode.me',
'reconmail.com',
'recursor.net',
'recyclemail.dk',
'regbypass.com',
'regbypass.comsafe-mail.net',
'rejectmail.com',
'reliable-mail.com',
'remail.cf',
'remail.ga',
'renraku.in',
'rhyta.com',
'rklips.com',
'rmqkr.net',
'royal.net',
'rppkn.com',
'rtrtr.com',
's0ny.net',
'safe-mail.net',
'safersignup.de',
'safetymail.info',
'safetypost.de',
'sandelf.de',
'sayawaka-dea.info',
'saynotospams.com',
'scatmail.com',
'schafmail.de',
'schrott-email.de',
'secretemail.de',
'secure-mail.biz',
'secure-mail.cc',
'selfdestructingmail.com',
'selfdestructingmail.org',
'sendspamhere.com',
'senseless-entertainment.com',
'services391.com',
'sharedmailbox.org',
'sharklasers.com',
'shieldedmail.com',
'shieldemail.com',
'shiftmail.com',
'shitmail.me',
'shitmail.org',
'shitware.nl',
'shmeriously.com',
'shortmail.net',
'showslow.de',
'sibmail.com',
'sinnlos-mail.de',
'siteposter.net',
'skeefmail.com',
'slapsfromlastnight.com',
'slaskpost.se',
'slipry.net',
'slopsbox.com',
'slowslow.de',
'slushmail.com',
'smashmail.de',
'smellfear.com',
'smellrear.com',
'smoug.net',
'snakemail.com',
'sneakemail.com',
'sneakmail.de',
'snkmail.com',
'sofimail.com',
'sofort-mail.de',
'softpls.asia',
'sogetthis.com',
'soisz.com',
'solvemail.info',
'soodonims.com',
'spam.la',
'spam.su',
'spam4.me',
'spamail.de',
'spamarrest.com',
'spamavert.com',
'spambob.com',
'spambob.net',
'spambob.org',
'spambog.com',
'spambog.de',
'spambog.net',
'spambog.ru',
'spambooger.com',
'spambox.info',
'spambox.irishspringrealty.com',
'spambox.us',
'spambpg.com',
'spamcannon.com',
'spamcannon.net',
'spamcero.com',
'spamcon.org',
'spamcorptastic.com',
'spamcowboy.com',
'spamcowboy.net',
'spamcowboy.org',
'spamday.com',
'spamex.com',
'spamfighter.cf',
'spamfighter.ga',
'spamfighter.gq',
'spamfighter.ml',
'spamfighter.tk',
'spamfree.eu',
'spamfree24.com',
'spamfree24.de',
'spamfree24.eu',
'spamfree24.info',
'spamfree24.net',
'spamfree24.org',
'spamgoes.in',
'spamgourmet.com',
'spamgourmet.net',
'spamgourmet.org',
'spamherelots.com',
'spamhereplease.com',
'spamhole.com',
'spamify.com',
'spaminator.de',
'spamkill.info',
'spaml.com',
'spaml.de',
'spammotel.com',
'spamobox.com',
'spamoff.de',
'spamsalad.in',
'spamslicer.com',
'spamsphere.com',
'spamspot.com',
'spamstack.net',
'spamthis.co.uk',
'spamthisplease.com',
'spamtrail.com',
'spamtroll.net',
'speed.1s.fr',
'spikio.com',
'spoofmail.de',
'spybox.de',
'squizzy.de',
'ssoia.com',
'startkeys.com',
'stexsy.com',
'stinkefinger.net',
'stop-my-spam.cf',
'stop-my-spam.com',
'stop-my-spam.ga',
'stop-my-spam.ml',
'stop-my-spam.tk',
'streetwisemail.com',
'stuffmail.de',
'super-auswahl.de',
'supergreatmail.com',
'supermailer.jp',
'superrito.com',
'superstachel.de',
'suremail.info',
'sute.jp',
'svk.jp',
'sweetxxx.de',
'tafmail.com',
'tagyourself.com',
'talkinator.com',
'tapchicuoihoi.com',
'teewars.org',
'teleworm.com',
'teleworm.us',
'temp-mail.com',
'temp-mail.net',
'temp-mail.org',
'temp-mail.ru',
'temp15qm.com',
'tempail.com',
'tempalias.com',
'tempe-mail.com',
'tempemail.biz',
'tempemail.co.za',
'tempemail.com',
'tempemail.net',
'tempemail.org',
'tempinbox.co.uk',
'tempinbox.com',
'tempmail.de',
'tempmail.eu',
'tempmail.it',
'tempmail2.com',
'tempmaildemo.com',
'tempmailer.com',
'tempmailer.de',
'tempomail.fr',
'temporarily.de',
'temporarioemail.com.br',
'temporaryemail.net',
'temporaryemail.us',
'temporaryforwarding.com',
'temporaryinbox.com',
'temporarymailaddress.com',
'tempsky.com',
'tempthe.net',
'tempymail.com',
'test.com',
'thanksnospam.info',
'thankyou2010.com',
'thc.st',
'thecloudindex.com',
'thisisnotmyrealemail.com',
'thismail.net',
'thismail.ru',
'throam.com',
'throwam.com',
'throwawayemailaddress.com',
'throwawaymail.com',
'tilien.com',
'tittbit.in',
'tizi.com',
'tmail.ws',
'tmailinator.com',
'tmpeml.info',
'toiea.com',
'tokenmail.de',
'toomail.biz',
'topranklist.de',
'tormail.net',
'tormail.org',
'tradermail.info',
'trash-amil.com',
'trash-mail.at',
'trash-mail.cf',
'trash-mail.com',
'trash-mail.de',
'trash-mail.ga',
'trash-mail.gq',
'trash-mail.ml',
'trash-mail.tk',
'trash-me.com',
'trash2009.com',
'trash2010.com',
'trash2011.com',
'trashdevil.com',
'trashdevil.de',
'trashemail.de',
'trashmail.at',
'trashmail.com',
'trashmail.de',
'trashmail.me',
'trashmail.net',
'trashmail.org',
'trashmail.ws',
'trashmailer.com',
'trashymail.com',
'trashymail.net',
'trayna.com',
'trbvm.com',
'trialmail.de',
'trickmail.net',
'trillianpro.com',
'tryalert.com',
'turual.com',
'twinmail.de',
'twoweirdtricks.com',
'tyldd.com',
'ubismail.net',
'uggsrock.com',
'umail.net',
'unlimit.com',
'unmail.ru',
'upliftnow.com',
'uplipht.com',
'uroid.com',
'us.af',
'valemail.net',
'venompen.com',
'vermutlich.net',
'veryrealemail.com',
'vidchart.com',
'viditag.com',
'viewcastmedia.com',
'viewcastmedia.net',
'viewcastmedia.org',
'viralplays.com',
'vmail.me',
'voidbay.com',
'vomoto.com',
'vpn.st',
'vsimcard.com',
'vubby.com',
'w3internet.co.uk',
'walala.org',
'walkmail.net',
'watchever.biz',
'webemail.me',
'webm4il.info',
'webuser.in',
'wee.my',
'weg-werf-email.de',
'wegwerf-email-addressen.de',
'wegwerf-email.at',
'wegwerf-emails.de',
'wegwerfadresse.de',
'wegwerfemail.com',
'wegwerfemail.de',
'wegwerfmail.de',
'wegwerfmail.info',
'wegwerfmail.net',
'wegwerfmail.org',
'wem.com',
'wetrainbayarea.com',
'wetrainbayarea.org',
'wh4f.org',
'whatiaas.com',
'whatpaas.com',
'whatsaas.com',
'whopy.com',
'whyspam.me',
'wickmail.net',
'wilemail.com',
'willhackforfood.biz',
'willselfdestruct.com',
'winemaven.info',
'wmail.cf',
'writeme.com',
'wronghead.com',
'wuzup.net',
'wuzupmail.net',
'wwwnew.eu',
'wzukltd.com',
'xagloo.com',
'xemaps.com',
'xents.com',
'xmaily.com',
'xoxy.net',
'xww.ro',
'xyzfree.net',
'yapped.net',
'yep.it',
'yogamaven.com',
'yomail.info',
'yopmail.com',
'yopmail.fr',
'yopmail.gq',
'yopmail.net',
'yopmail.org',
'yoru-dea.com',
'you-spam.com',
'youmail.ga',
'yourdomain.com',
'ypmail.webarnak.fr.eu.org',
'yuurok.com',
'yyhmail.com',
'z1p.biz',
'za.com',
'zebins.com',
'zebins.eu',
'zehnminuten.de',
'zehnminutenmail.de',
'zetmail.com',
'zippymail.info',
'zoaxe.com',
'zoemail.com',
'zoemail.net',
'zoemail.org',
'zomg.info',
'zxcv.com',
'zxcvbnm.com',
'zzz.com',
]
# reCAPTCHA API
RECAPTCHA_SITE_KEY = None
RECAPTCHA_SECRET_KEY = None
RECAPTCHA_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'
# akismet spam check
AKISMET_APIKEY = None
SPAM_CHECK_ENABLED = False
SPAM_CHECK_PUBLIC_ONLY = True
SPAM_ACCOUNT_SUSPENSION_ENABLED = False
SPAM_ACCOUNT_SUSPENSION_THRESHOLD = timedelta(hours=24)
SPAM_FLAGGED_MAKE_NODE_PRIVATE = False
SPAM_FLAGGED_REMOVE_FROM_SEARCH = False
SHARE_API_TOKEN = None
# number of nodes that need to be affiliated with an institution before the institution logo is shown on the dashboard
INSTITUTION_DISPLAY_NODE_THRESHOLD = 5
# refresh campaign every 5 minutes
CAMPAIGN_REFRESH_THRESHOLD = 5 * 60 # 5 minutes in seconds
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export a TensorFlow model.
See: go/tf-exporter
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import six
from google.protobuf.any_pb2 import Any
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import gc
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
def gfile_copy_callback(files_to_copy, export_dir_path):
"""Callback to copy files using `gfile.Copy` to an export directory.
This method is used as the default `assets_callback` in `Exporter.init` to
copy assets from the `assets_collection`. It can also be invoked directly to
copy additional supplementary files into the export directory (in which case
it is not a callback).
Args:
files_to_copy: A dictionary that maps original file paths to desired
basename in the export directory.
export_dir_path: Directory to copy the files to.
"""
logging.info("Write assest into: %s using gfile_copy.", export_dir_path)
gfile.MakeDirs(export_dir_path)
for source_filepath, basename in files_to_copy.items():
new_path = os.path.join(
compat.as_bytes(export_dir_path), compat.as_bytes(basename))
logging.info("Copying asset %s to path %s.", source_filepath, new_path)
if gfile.Exists(new_path):
# Guard against being restarted while copying assets, and the file
# existing and being in an unknown state.
# TODO(b/28676216): Do some file checks before deleting.
logging.info("Removing file %s.", new_path)
gfile.Remove(new_path)
gfile.Copy(source_filepath, new_path)
def regression_signature(input_tensor, output_tensor):
"""Creates a regression signature.
Args:
input_tensor: Tensor specifying the input to a graph.
output_tensor: Tensor specifying the output of a graph.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.regression_signature.input.tensor_name = input_tensor.name
signature.regression_signature.output.tensor_name = output_tensor.name
return signature
def classification_signature(input_tensor,
classes_tensor=None,
scores_tensor=None):
"""Creates a classification signature.
Args:
input_tensor: Tensor specifying the input to a graph.
classes_tensor: Tensor specifying the output classes of a graph.
scores_tensor: Tensor specifying the scores of the output classes.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.classification_signature.input.tensor_name = input_tensor.name
if classes_tensor is not None:
signature.classification_signature.classes.tensor_name = classes_tensor.name
if scores_tensor is not None:
signature.classification_signature.scores.tensor_name = scores_tensor.name
return signature
def generic_signature(name_tensor_map):
"""Creates a generic signature of name to Tensor name.
Args:
name_tensor_map: Map from logical name to Tensor.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
for name, tensor in six.iteritems(name_tensor_map):
signature.generic_signature.map[name].tensor_name = tensor.name
return signature
class Exporter(object):
"""Exporter helps package a TensorFlow model for serving.
Args:
saver: Saver object.
"""
def __init__(self, saver):
# Makes a copy of the saver-def and disables garbage-collection, since the
# exporter enforces garbage-collection independently. Specifically, since
# the exporter performs atomic copies of the saver output, it is required
# that garbage-collection via the underlying saver be disabled.
saver_def = saver.as_saver_def()
saver_def.ClearField("max_to_keep")
self._saver = tf_saver.Saver(saver_def=saver_def)
self._has_init = False
self._assets_to_copy = {}
def init(self,
graph_def=None,
init_op=None,
clear_devices=False,
default_graph_signature=None,
named_graph_signatures=None,
assets_collection=None,
assets_callback=gfile_copy_callback):
"""Initialization.
Args:
graph_def: A GraphDef message of the graph to be used in inference.
GraphDef of default graph is used when None.
init_op: Op to be used in initialization.
clear_devices: If device info of the graph should be cleared upon export.
default_graph_signature: Default signature of the graph.
named_graph_signatures: Map of named input/output signatures of the graph.
assets_collection: A collection of constant asset filepath tensors. If set
the assets will be exported into the asset directory.
assets_callback: callback with two argument called during export with the
list of files to copy and the asset path.
Raises:
RuntimeError: if init is called more than once.
TypeError: if init_op is not an Operation or None.
ValueError: if asset file path tensors are not non-empty constant string
scalar tensors.
"""
# Avoid Dangerous default value []
if named_graph_signatures is None:
named_graph_signatures = {}
assets = []
if assets_collection:
for asset_tensor in assets_collection:
asset_filepath = self._file_path_value(asset_tensor)
if not asset_filepath:
raise ValueError("invalid asset filepath tensor %s" % asset_tensor)
basename = os.path.basename(asset_filepath)
assets.append((basename, asset_tensor))
self._assets_to_copy[asset_filepath] = basename
if self._has_init:
raise RuntimeError("init should be called only once")
self._has_init = True
if graph_def or clear_devices:
copy = graph_pb2.GraphDef()
if graph_def:
copy.CopyFrom(graph_def)
else:
copy.CopyFrom(ops.get_default_graph().as_graph_def())
if clear_devices:
for node in copy.node:
node.device = ""
graph_any_buf = Any()
graph_any_buf.Pack(copy)
ops.add_to_collection(constants.GRAPH_KEY, graph_any_buf)
if init_op:
if not isinstance(init_op, ops.Operation):
raise TypeError("init_op needs to be an Operation: %s" % init_op)
ops.add_to_collection(constants.INIT_OP_KEY, init_op)
signatures_proto = manifest_pb2.Signatures()
if default_graph_signature:
signatures_proto.default_signature.CopyFrom(default_graph_signature)
for signature_name, signature in six.iteritems(named_graph_signatures):
signatures_proto.named_signatures[signature_name].CopyFrom(signature)
signatures_any_buf = Any()
signatures_any_buf.Pack(signatures_proto)
ops.add_to_collection(constants.SIGNATURES_KEY, signatures_any_buf)
for filename, tensor in assets:
asset = manifest_pb2.AssetFile()
asset.filename = filename
asset.tensor_binding.tensor_name = tensor.name
asset_any_buf = Any()
asset_any_buf.Pack(asset)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_buf)
self._assets_callback = assets_callback
def export(self,
export_dir_base,
global_step_tensor,
sess=None,
exports_to_keep=None):
"""Exports the model.
Args:
export_dir_base: A string path to the base export dir.
global_step_tensor: An Tensor or tensor name providing the
global step counter to append to the export directory path and set
in the manifest version.
sess: A Session to use to save the parameters.
exports_to_keep: a gc.Path filter function used to determine the set of
exports to keep. If set to None, all versions will be kept.
Returns:
The string path to the exported directory.
Raises:
RuntimeError: if init is not called.
RuntimeError: if the export would overwrite an existing directory.
"""
if not self._has_init:
raise RuntimeError("init must be called first")
# Export dir must not end with / or it will break exports to keep. Strip /.
if export_dir_base.endswith("/"):
export_dir_base = export_dir_base[:-1]
global_step = training_util.global_step(sess, global_step_tensor)
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(constants.VERSION_FORMAT_SPECIFIER % global_step))
# Prevent overwriting on existing exports which could lead to bad/corrupt
# storage and loading of models. This is an important check that must be
# done before any output files or directories are created.
if gfile.Exists(export_dir):
raise RuntimeError("Overwriting exports can cause corruption and are "
"not allowed. Duplicate export dir: %s" % export_dir)
# Output to a temporary directory which is atomically renamed to the final
# directory when complete.
tmp_export_dir = compat.as_text(export_dir) + "-tmp"
gfile.MakeDirs(tmp_export_dir)
self._saver.save(sess,
os.path.join(
compat.as_text(tmp_export_dir),
compat.as_text(constants.EXPORT_BASE_NAME)),
meta_graph_suffix=constants.EXPORT_SUFFIX_NAME)
# Run the asset callback.
if self._assets_callback and self._assets_to_copy:
assets_dir = os.path.join(
compat.as_bytes(tmp_export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
gfile.MakeDirs(assets_dir)
self._assets_callback(self._assets_to_copy, assets_dir)
# TODO(b/27794910): Delete *checkpoint* file before rename.
gfile.Rename(tmp_export_dir, export_dir)
if exports_to_keep:
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match("^" + export_dir_base + "/(\\d{8})$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
paths_to_delete = gc.negation(exports_to_keep)
for p in paths_to_delete(gc.get_paths(export_dir_base, parser=parser)):
gfile.DeleteRecursively(p.path)
return export_dir
def _file_path_value(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`."""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if path_tensor.op.type != "Const":
raise TypeError("Only constants tensor are supported")
if path_tensor.dtype != dtypes.string:
raise TypeError("File paths should be string")
str_value = path_tensor.op.get_attr("value").string_val
if len(str_value) != 1:
raise TypeError("Only scalar tensors are supported")
return str_value[0]
|
|
from __future__ import annotations
from collections.abc import Callable # noqa: PDF001
import re
import textwrap
import unicodedata
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.missing as libmissing
import pandas._libs.ops as libops
from pandas._typing import (
Dtype,
Scalar,
)
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.missing import isna
from pandas.core.strings.base import BaseStringArrayMethods
class ObjectStringArrayMixin(BaseStringArrayMethods):
"""
String Methods operating on object-dtype ndarrays.
"""
_str_na_value = np.nan
def __len__(self):
# For typing, _str_map relies on the object being sized.
raise NotImplementedError
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
):
"""
Map a callable over valid element of the array.
Parameters
----------
f : Callable
A function to call on each non-NA element.
na_value : Scalar, optional
The value to set for NA values. Might also be used for the
fill value if the callable `f` raises an exception.
This defaults to ``self._str_na_value`` which is ``np.nan``
for object-dtype and Categorical and ``pd.NA`` for StringArray.
dtype : Dtype, optional
The dtype of the result array.
convert : bool, default True
Whether to call `maybe_convert_objects` on the resulting ndarray
"""
if dtype is None:
dtype = np.dtype("object")
if na_value is None:
na_value = self._str_na_value
if not len(self):
# error: Argument 1 to "ndarray" has incompatible type "int";
# expected "Sequence[int]"
return np.ndarray(0, dtype=dtype) # type: ignore[arg-type]
arr = np.asarray(self, dtype=object)
mask = isna(arr)
map_convert = convert and not np.all(mask)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
# FIXME: this should be totally avoidable
raise e
def g(x):
# This type of fallback behavior can be removed once
# we remove object-dtype .str accessor.
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return self._str_map(g, na_value=na_value, dtype=dtype)
if not isinstance(result, np.ndarray):
return result
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if convert and result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
def _str_count(self, pat, flags=0):
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return self._str_map(f, dtype="int64")
def _str_pad(self, width, side="left", fillchar=" "):
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return self._str_map(f)
def _str_contains(self, pat, case=True, flags=0, na=np.nan, regex: bool = True):
if regex:
if not case:
flags |= re.IGNORECASE
pat = re.compile(pat, flags=flags)
f = lambda x: pat.search(x) is not None
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x.upper()
return self._str_map(f, na, dtype=np.dtype("bool"))
def _str_startswith(self, pat, na=None):
f = lambda x: x.startswith(pat)
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_endswith(self, pat, na=None):
f = lambda x: x.endswith(pat)
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
flags: int = 0,
regex: bool = True,
):
if case is False:
# add case flag, if provided
flags |= re.IGNORECASE
if regex or flags or callable(repl):
if not isinstance(pat, re.Pattern):
if regex is False:
pat = re.escape(pat)
pat = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
f = lambda x: pat.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return self._str_map(f, dtype=str)
def _str_repeat(self, repeats):
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return self._str_map(scalar_rep, dtype=str)
else:
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
def rep(x, r):
if x is libmissing.NA:
return x
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(np.asarray(self), repeats, rep)
if isinstance(self, (StringArray, ArrowStringArray)):
# Not going through map, so we have to do this here.
result = type(self)._from_sequence(result)
return result
def _str_match(
self, pat: str, case: bool = True, flags: int = 0, na: Scalar = None
):
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
f = lambda x: regex.match(x) is not None
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_fullmatch(
self,
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
na: Scalar = None,
):
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
f = lambda x: regex.fullmatch(x) is not None
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_encode(self, encoding, errors="strict"):
f = lambda x: x.encode(encoding, errors=errors)
return self._str_map(f, dtype=object)
def _str_find(self, sub, start=0, end=None):
return self._str_find_(sub, start, end, side="left")
def _str_rfind(self, sub, start=0, end=None):
return self._str_find_(sub, start, end, side="right")
def _str_find_(self, sub, start, end, side):
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_findall(self, pat, flags=0):
regex = re.compile(pat, flags=flags)
return self._str_map(regex.findall, dtype="object")
def _str_get(self, i):
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return self._str_na_value
return self._str_map(f)
def _str_index(self, sub, start=0, end=None):
if end:
f = lambda x: x.index(sub, start, end)
else:
f = lambda x: x.index(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_rindex(self, sub, start=0, end=None):
if end:
f = lambda x: x.rindex(sub, start, end)
else:
f = lambda x: x.rindex(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_join(self, sep):
return self._str_map(sep.join)
def _str_partition(self, sep, expand):
result = self._str_map(lambda x: x.partition(sep), dtype="object")
return result
def _str_rpartition(self, sep, expand):
return self._str_map(lambda x: x.rpartition(sep), dtype="object")
def _str_len(self):
return self._str_map(len, dtype="int64")
def _str_slice(self, start=None, stop=None, step=None):
obj = slice(start, stop, step)
return self._str_map(lambda x: x[obj])
def _str_slice_replace(self, start=None, stop=None, repl=None):
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return self._str_map(f)
def _str_split(self, pat=None, n=-1, expand=False):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
return self._str_map(f, dtype=object)
def _str_rsplit(self, pat=None, n=-1):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
return self._str_map(f, dtype="object")
def _str_translate(self, table):
return self._str_map(lambda x: x.translate(table))
def _str_wrap(self, width, **kwargs):
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return self._str_map(lambda s: "\n".join(tw.wrap(s)))
def _str_get_dummies(self, sep="|"):
from pandas import Series
arr = Series(self).fillna("")
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags: set[str] = set()
for ts in Series(arr).str.split(sep):
tags.update(ts)
tags2 = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags2)), dtype=np.int64)
for i, t in enumerate(tags2):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.to_numpy(), lambda x: pat in x)
return dummies, tags2
def _str_upper(self):
return self._str_map(lambda x: x.upper())
def _str_isalnum(self):
return self._str_map(str.isalnum, dtype="bool")
def _str_isalpha(self):
return self._str_map(str.isalpha, dtype="bool")
def _str_isdecimal(self):
return self._str_map(str.isdecimal, dtype="bool")
def _str_isdigit(self):
return self._str_map(str.isdigit, dtype="bool")
def _str_islower(self):
return self._str_map(str.islower, dtype="bool")
def _str_isnumeric(self):
return self._str_map(str.isnumeric, dtype="bool")
def _str_isspace(self):
return self._str_map(str.isspace, dtype="bool")
def _str_istitle(self):
return self._str_map(str.istitle, dtype="bool")
def _str_isupper(self):
return self._str_map(str.isupper, dtype="bool")
def _str_capitalize(self):
return self._str_map(str.capitalize)
def _str_casefold(self):
return self._str_map(str.casefold)
def _str_title(self):
return self._str_map(str.title)
def _str_swapcase(self):
return self._str_map(str.swapcase)
def _str_lower(self):
return self._str_map(str.lower)
def _str_normalize(self, form):
f = lambda x: unicodedata.normalize(form, x)
return self._str_map(f)
def _str_strip(self, to_strip=None):
return self._str_map(lambda x: x.strip(to_strip))
def _str_lstrip(self, to_strip=None):
return self._str_map(lambda x: x.lstrip(to_strip))
def _str_rstrip(self, to_strip=None):
return self._str_map(lambda x: x.rstrip(to_strip))
def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
regex = re.compile(pat, flags=flags)
na_value = self._str_na_value
if not expand:
def g(x):
m = regex.search(x)
return m.groups()[0] if m else na_value
return self._str_map(g, convert=False)
empty_row = [na_value] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [na_value if item is None else item for item in m.groups()]
else:
return empty_row
return [f(val) for val in np.asarray(self)]
|
|
import pdb
import os
#===============================================================================
# Application-Specific Utilities
#===============================================================================
import ssa.globals as g
def isverbose(verbose=False):
if verbose:
return True
if g.args:
return g.args.verbose
return False
#===============================================================================
# Basic Utilities
#===============================================================================
def panic(msg, exitcode=1):
for line in listify(msg):
if not line.endswith('\n'):
line += '\n'
sys.stderr.write(line)
sys.exit(exitcode)
def asserting(val):
if not val:
pdb.set_trace()
assert(val)
def reverse(seq):
return seq[::-1]
def contains(xs, elem):
return xs.find(elem) >= 0
def no(xs):
return (not xs) or (len(xs) <= 0)
empty=no
def is_str(x):
return (type(x) is str) or (type(x) is unicode)
def is_iterable(x):
try:
if is_str(x):
return False
xit = iter(x)
return True
except TypeError, te:
return False
def listify(x):
if is_iterable(x):
return list(x)
if not x:
return list()
return [x]
def flatten_once(xs):
return [item for sublist in xs for item in listify(sublist)]
def flatten(xs, recursive=True):
ys = flatten_once(xs)
if recursive:
for y in ys:
if is_iterable(y):
return flatten(ys, recursive=True)
return ys
#===============================================================================
# Other Utilities
#===============================================================================
import platform
def iswindows():
return platform.system().lower().find('windows') >= 0
def wait_any_key():
if iswindows():
print "Press any key to continue..."
import msvcrt as m
return m.getch()
else:
return raw_input("Press enter to continue...")
def increase_stackoverflow_limit(limit=10**6):
import resource, sys
#resource.setrlimit(resource.RLIMIT_STACK, (2**29,-1))
sys.setrecursionlimit(limit)
#===============================================================================
# Filesystem Utilities
#===============================================================================
class Path(object):
def __init__(self, path):
assert(isinstance(path, str) or isinstance(path, unicode))
self._path = path
@property
def path(self):
val = self._path
val = os.path.normpath(val)
val = os.path.normcase(val)
return val
@property
def abspath(self):
return os.path.abspath(self.path)
@property
def isfile(self):
return os.path.isfile(self.path)
@property
def isdir(self):
return os.path.isdir(self.path)
@property
def exists(self):
return os.path.exists(self.path)
def __eq__(self, other):
return self.abspath == mkpath(other).abspath
def __hash__(self):
return hash(self.abspath)
def __str__(self):
return self.path
def __repr__(self):
return repr(self.path)
def mkpath(filepath):
asserting(filepath)
if isinstance(filepath, Path):
return filepath
return Path(filepath)
def getpath(filepath):
if isinstance(filepath, Path):
return str(filepath)
return filepath
class FileSet(object):
def __init__(self, filepaths=[]):
self.added = set()
self.files = []
self.add(filepaths)
def add(self, x):
if not x:
return
if isinstance(x, FileSet):
return self.add(x.files)
for filepath in listify(x):
path = mkpath(filepath)
if not path.exists:
print "Path doesn't exist: %s" % path
return
if not path.isfile:
pdb.set_trace()
print "Path isn't a file: %s" % path
return
# if it was already added, ignore it.
if path in self.added:
return
self.added.add(path)
self.files.append(path)
def __contains__(self, filepath):
path = mkpath(filepath)
return path in self.added
def __repr__(self):
return repr(self.files)
def __str__(self):
return repr(self.files)
import fnmatch
def patmatch(name, patterns, ignorecase=False):
name = name.lower() if ignorecase else name
for pattern in listify(patterns):
pattern = pattern.lower() if ignorecase else pattern
if fnmatch.fnmatch(name, pattern):
return True
return False
#
# adapted from http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
#
import os
def find_files_in(directory, patterns=[], ignore_patterns=[], ignorecase=True, verbose=False):
patterns = listify(patterns)
ignore_patterns = listify(ignore_patterns)
# if no patterns, then match everything.
if len(patterns) <= 0:
patterns += ['*']
if os.path.isfile(directory):
# if the 'directory' is actually a file, then allow it if it
# matches.
if patmatch(directory, patterns, ignorecase=ignorecase):
if not patmatch(directory, ignore_patterns, ignorecase=ignorecase):
yield directory
else:
for root, dirs, files in os.walk(directory):
if isverbose(verbose):
print 'find_files_in(%s): trying %d files' % (repr(root), len(files))
for basename in files:
if patmatch(basename, patterns, ignorecase=ignorecase):
if not patmatch(basename, ignore_patterns, ignorecase=ignorecase):
filename = os.path.join(root, basename)
yield filename
def find_files(paths_or_patterns, patterns=['*'], ignore_patterns=[], ignorecase=True, verbose=False):
patterns = list(listify(patterns))
ignore_patterns = list(listify(ignore_patterns))
paths = []
for arg in paths_or_patterns:
if arg.find('*') >= 0:
# get all patterns that were passed into the first param.
patterns.append(arg)
else:
# get all paths that were passed into the first param.
paths.append(arg)
# search for matching files under each path.
found = FileSet()
for path in paths:
path = mkpath(path)
# skip bogus dirs.
if not path.exists:
print "find_files(): Doesn't exist: %s" % path
continue
if isverbose(verbose):
print 'find_files(%s)' % repr(str(path))
for filepath in find_files_in(str(path), patterns=patterns, ignore_patterns=ignore_patterns, ignorecase=ignorecase, verbose=verbose):
if filepath not in found:
found.add(filepath)
yield filepath
|
|
#
# Written by Maxim Khitrov (July 2011)
#
from gzip import GzipFile
from . import conf
from .util import *
import functools
import hashlib
import logging
import os
import shutil
import sqlite3
import time
import traceback
log = logging.getLogger('gmdb.db')
# Signed <-> unsigned conversion for 64-bit INTEGER columns (msg_id and thr_id)
int64 = lambda v: v if v < 2**63 else v - 2**64
uint64 = lambda v: v if v >= 0 else v + 2**64
os_path = os.path
#
### Controller ###############################################################
#
class DBError(Exception):
pass
class DBControl:
"""SQLite database controller."""
def __init__(self, root):
if not os_path.isabs(root):
root = os_path.join(conf.ROOT_DIR, root)
self.root = os_path.realpath(root)
self.lock = LockFile(self._path('~lock'))
self._reset()
def __enter__(self):
"""Open the database with exclusive access."""
if not os_path.isdir(self.root):
log.info('Creating database directory')
os.makedirs(self.root, conf.DIR_MODE)
self.lock.create()
try:
self._open()
return self
except Exception:
self.lock.remove()
raise
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
"""Close the database and release exclusive access."""
try:
if self.op and self.op.active:
if exc_type:
res = 'abort' if exc_type is KeyboardInterrupt else 'error'
self.op.finish(res, traceback.format_exc())
else:
self.op.finish()
self.save_conf(False)
except Exception:
log.exception('Operation not finished')
# Do not re-raise
finally:
self._close()
self.lock.remove()
def __contains__(self, msg_id):
"""Check if the given msg_id is present in the database."""
return self.get_digest(msg_id) in self.files
def begin_op(self, name, *args, **kwargs):
"""Begin new operation."""
if self.op and self.op.active:
raise DBError('previous operation is not finished')
if self.cn.in_transaction:
raise DBError('previous transaction is not finished')
self.op = Op(self, name, *args, **kwargs)
return self.op
def history(self, name=None):
"""Get a complete history of all operations."""
if name is None:
sql = 'SELECT * FROM op ORDER BY op_id'
cur = self.cn.execute(sql)
else:
sql = 'SELECT * FROM op WHERE name = ? ORDER BY op_id'
cur = self.cn.execute(sql, (name,))
return [dict(row) for row in cur]
def get_attrs(self, msg_id, labels=None):
"""Load message attributes for the given msg_id."""
sql = 'SELECT * FROM msg_view WHERE msg_id = ?'
for attrs in map(dict, self.cn.execute(sql, (int64(msg_id),))):
if labels is None:
attrs['labels'] = qstr_split(attrs['labels'] or '')
else:
attrs['labels'] = labels
attrs['flags'] = (attrs['flags'] or '').split()
for k in ('msg_id', 'thr_id'):
attrs[k] = uint64(attrs[k])
return attrs
return None
def get_digest(self, msg_id):
"""Get file digest for the given msg_id."""
sql = 'SELECT digest FROM msg, file USING (file_id) WHERE msg_id = ?'
for digest, in self.cn.execute(sql, (int64(msg_id),)):
return digest
return None
def get_body(self, msg_id):
"""Load message body for the given msg_id."""
return self.files.read(self.get_digest(msg_id))
def get_labels(self, msg_id=None):
"""Get labels of a specific message or all labels in the database."""
if msg_id is not None:
sql = 'SELECT labels FROM lbl, msg USING (lbl_id) WHERE msg_id = ?'
cur = self.cn.execute(sql, (int64(msg_id),))
else:
cur = self.cn.execute('SELECT labels FROM lbl')
lbls = set()
for labels, in cur:
lbls.update(qstr_split(labels))
return lbls
def save_conf(self, reload=True):
"""Save current configuration to the database."""
items = ((v, k) for k, v in self.conf.items())
self.cn.execute('BEGIN')
with self.cn:
self.cn.executemany('UPDATE conf SET value=? WHERE key = ?', items)
if reload:
self._load_conf()
def attach_index(self, use_current=True):
"""Attach the index database to the current connection."""
cn = self.cn
fts_ver = self._fts_version()
idx_path = self._path(conf.DB_IDX_NAME)
attached = True
indexed = self.conf['indexed']
log.debug('Attaching index database...')
cn.execute('ATTACH ? AS idx', (idx_path,))
try:
# (Re)create the index database, if needed
uv, = next(cn.execute('PRAGMA idx.user_version'))
if uv != conf.DB_IDX_VER:
attached = False
cn.execute('DETACH idx')
if use_current:
log.error("Index must be rebuilt (see 'index' command)")
return False
os.unlink(idx_path)
cn.execute('ATTACH ? AS idx', (idx_path,))
attached = True
indexed = False
self._run_script(cn, 'init.sql', db='idx', sync=conf.DB_SYNC)
if uv != conf.DB_IDX_VER:
self._run_script(cn, 'index.sql', uv=conf.DB_IDX_VER,
fts_ver=fts_ver, tok=conf.DB_FTS_TOK)
# Update message index
if not indexed:
if use_current:
log.warn("Index is out of date (see 'index' command)")
return True
start = walltime()
self._update_index()
log.debug('Index update took {:.3f} sec', walltime() - start)
self.conf['indexed'] = 1
self.save_conf(False)
return True
except Exception:
if attached:
cn.execute('DETACH idx')
raise
def cleanup(self):
"""Remove old and unreferenced entries from the database."""
cn = self.cn
cn.execute('BEGIN')
with cn:
# Delete old operations
if self.conf['retention']:
lim = (unixtime() - self.conf['retention'],)
sql = ' FROM op WHERE start < ?'
num = next(cn.execute('SELECT COUNT(op_id)' + sql, lim))[0]
if num:
date = time.strftime('%Y-%m-%d', time.localtime(lim[0]))
log.info('Removing {} operation(s) before {}', num, date)
cn.execute('DELETE' + sql, lim)
# Delete unreferenced files from disk and database
sql = '''
SELECT file_id, digest FROM file LEFT JOIN msg USING (file_id)
WHERE msg.file_id IS NULL
'''
rm = self.files.remove
for file_id, digest in cn.execute(sql):
cn.execute('DELETE FROM file WHERE file_id = ?', (file_id,))
rm(digest)
# Delete unreferenced flags and labels
for table in ('flag', 'lbl'):
cn.execute('''
DELETE FROM {0} WHERE {0}_id IN (
SELECT {0}_id FROM {0}
LEFT JOIN msg USING ({0}_id)
WHERE msg.{0}_id IS NULL
);
'''.format(table))
cn.executescript('VACUUM; ANALYZE;')
def _path(self, *args):
"""Create a full path relative to the database root."""
return os_path.join(self.root, *args)
def _open(self):
"""Open SQLite database."""
db_path = self._path(conf.DB_NAME)
is_new = not (os_path.isfile(db_path) and os_path.getsize(db_path))
if getattr(conf, 'db_backup', False) and not is_new:
log.info('Creating database backup: {}.bak', db_path)
shutil.copy2(db_path, db_path + '.bak')
log.debug('Opening database: {}', db_path)
self.cn = sqlite3.connect(db_path, 1.0, sqlite3.PARSE_DECLTYPES, None)
try:
os.chmod(db_path, conf.FILE_MODE)
self._init(self.cn, is_new)
self._load_conf()
except Exception:
self._close()
raise
def _close(self):
"""Close SQLite database."""
if self.cn:
try:
log.debug('Closing database')
self.cn.close()
for name in (conf.DB_NAME, conf.DB_IDX_NAME):
journal = self._path(name + '-journal')
if os_path.isfile(journal) and not os_path.getsize(journal):
log.debug('Removing {} journal', name)
os.unlink(journal)
except Exception:
log.exception('Failed to close database')
# Do not re-raise
finally:
self._reset()
def _reset(self):
"""Reset internal attributes."""
self.cn = None
self.conf = None
self.files = None
self.op = None
def _init(self, cn, is_new):
"""Initialize database connection prior to use."""
cn.row_factory = sqlite3.Row
self._run_script(cn, 'init.sql', db='main', sync=conf.DB_SYNC)
if is_new:
log.debug('Creating database structure [ver={}]', conf.DB_VER)
self._run_script(cn, 'create.sql', uv=conf.DB_VER)
# Check database status
uv, = next(cn.execute('PRAGMA user_version'))
fk, = next(cn.execute('PRAGMA foreign_keys'), [None])
if uv != conf.DB_VER:
raise DBError('unexpected database version')
if fk != 1:
raise DBError('foreign key support is disabled')
for row in cn.execute('SELECT op_id FROM op WHERE result IS NULL'):
log.warn('Database contains an unfinished operation')
break
def _run_script(self, cn, name, **kwargs):
"""Execute a SQL script from the 'misc' directory."""
with open(os_path.join(conf.PKG_DIR, 'misc', name)) as fd:
script = fd.read()
if kwargs:
script = script.format(**kwargs)
try:
cn.executescript(script)
except Exception:
try:
# cn.in_transaction is always False here for some reason
cn.execute('ROLLBACK')
except sqlite3.OperationalError:
pass
raise
def _load_conf(self):
"""Load 'conf' table into memory and create file db interface."""
db_conf = dict(self.cn.execute('SELECT key, value FROM conf'))
for k, v in db_conf.items():
if v and v.isdigit():
db_conf[k] = int(v)
self.conf = db_conf
self.files = FileDB(self.root, db_conf)
def _fts_version(self):
"""Determine which full-text search module is supported."""
cn = self.cn
if conf.DB_FTS_EXT:
cn.enable_load_extension(True)
cn.load_extension(conf.DB_FTS_EXT)
cn.execute('ATTACH ":memory:" AS fts')
try:
for ver in conf.DB_FTS_VER:
try:
cn.execute('CREATE VIRTUAL TABLE fts.temp USING ' + ver)
return ver
except sqlite3.OperationalError:
pass
raise DBError('FTS module is not available')
finally:
cn.execute('DETACH fts')
def _update_index(self):
"""Populate or update message index."""
cn = self.cn
# Delete messages no longer present in 'file'
cn.execute('BEGIN')
with cn:
cn.executescript('''
DELETE FROM map WHERE digest NOT IN (SELECT digest FROM file);
DELETE FROM fts WHERE docid NOT IN (SELECT docid FROM map);
''')
# Count the total number of new messages that need to be indexed
sql = '''
SELECT {} FROM file LEFT JOIN map USING (digest)
WHERE map.digest IS NULL
'''
total, = next(cn.execute(sql.format('COUNT(digest)')))
if not total:
return
# Index new messages
from . import email
log.info('Indexing {} new message(s)', total)
sql_ins_map = 'INSERT INTO map (digest, date) VALUES (:digest, :date)'
sql_ins_fts = '''
INSERT INTO fts (docid, "from", "to", subject, body)
VALUES (:docid, :from, :to, :subject, :body)
'''
cn.execute('BEGIN')
err = log.exception if conf.verbose >= 2 else log.warn
with cn:
for n, (digest,) in enumerate(cn.execute(sql.format('digest')), 1):
if n % 1000 == 0:
log.debug('{} / {} ({:.3f})', n, total, n / total * 100.0)
fd = self.files.open(digest)
if fd is None:
log.warn('Failed to open message file [digest={}]', digest)
continue
try:
with fd:
msg = email.parse(fd)
except Exception:
err('Failed to parse message [digest={}]', digest)
continue
msg['digest'] = digest
msg['docid'] = cn.execute(sql_ins_map, msg).lastrowid
cn.execute(sql_ins_fts, msg)
log.debug('{0} / {0} (100.000%)', total)
#
### Operations ###############################################################
#
class Op(metaclass=RegisteredType):
"""Operation base class."""
def __new__(cls, db, name, *args, **kwargs):
if cls is Op:
cls = Op.registry[name]
if cls.__new__ is not Op.__new__:
return cls.__new__(cls, db, name, *args, **kwargs)
return object.__new__(cls)
def __init__(self, db, name, *, temp=False):
start = unixtime()
if temp:
op_id = 0
else:
sql = 'INSERT INTO op (name, start) VALUES (?,?)'
op_id = db.cn.execute(sql, (name, start)).lastrowid
self.db = db # DBControl instance
self.name = name # Operation name ('backup', 'restore', etc.)
self.id = op_id # Operation ID
self.start = start # Start time
self.stop = None # Stop time
self.result = None # Final result ('ok', 'abort', 'error')
self.info = None # Additional result information
if not temp:
self.begin()
log.info('Operation {} started ({})', op_id, name)
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.rollback() if exc_type else self.commit()
def begin(self):
self.db.cn.in_transaction or self.db.cn.execute('BEGIN')
def commit(self):
self.db.cn.in_transaction and self.db.cn.commit()
def rollback(self):
self.db.cn.in_transaction and self.db.cn.rollback()
def finish(self, result='ok', info=None):
if not self.active:
return
if result not in ('ok', 'abort', 'error'):
raise ValueError('invalid operation result {!a}'.format(result))
self.commit() if result == 'ok' else self.rollback()
# Break reference cycle when db is no longer needed
cn = self.db.cn
self.db = None
self.stop = unixtime()
self.result = result
self.info = str(info) if info else None
if self.id:
sql = 'UPDATE op SET stop=?, result=?, info=? WHERE op_id = ?'
cn.execute(sql, (self.stop, self.result, self.info, self.id))
log.info('Operation {} finished in {} sec ({})', self.id,
self.duration, self.result)
@property
def active(self):
"""Operation status."""
return self.db is not None
@property
def duration(self):
"""Operation run time in seconds."""
return (unixtime() if self.stop is None else self.stop) - self.start
# Backup SQL statements
_update_sql = 'UPDATE msg SET op_id=?, flag_id=?, lbl_id=? WHERE msg_id = ?'
_insert_sql = '''
REPLACE INTO msg (msg_id, op_id, file_id, flag_id, lbl_id, thr_id, idate)
VALUES (?,?,?,?,?,?,?)
'''
class backup(Op):
"""Backup operation."""
def __init__(self, db, name):
# Prevent backups of multiple accounts into the same database
if conf.account != db.conf['account']:
if db.conf['account'] is not None:
raise DBError('account name mismatch')
db.conf['account'] = conf.account
# Index needs to be updated
db.conf['indexed'] = 0
db.save_conf(False)
# Delayed 'msg' table insert/update queues (conf.DB_DELAYED_OPS)
self.msg_insert = deque()
self.msg_update = deque()
super().__init__(db, name)
if conf.filter:
sql = 'UPDATE op SET filter=? WHERE op_id = ?'
db.cn.execute(sql, (conf.filter, self.id))
def commit(self):
"""Execute delayed operations and commit the current transaction."""
if not self.db.cn.in_transaction:
return
if self.msg_update:
self.db.cn.executemany(_update_sql, self.msg_update)
self.msg_update.clear()
if self.msg_insert:
self.db.cn.executemany(_insert_sql, self.msg_insert)
self.msg_insert.clear()
self.db.cn.commit()
def rollback(self):
"""Cancel current transaction."""
if self.db.cn.in_transaction:
self.msg_insert.clear()
self.msg_update.clear()
self.db.cn.rollback()
def store(self, msg):
"""Add a new message to the database."""
msg_id = int64(msg['msg_id'])
thr_id = int64(msg['thr_id'])
idate = msg['idate']
file_id = self._store_file(msg['body'])
flag_id = self._store_flags(msg['flags'])
lbl_id = self._store_labels(msg['labels'])
entry = (msg_id, self.id, file_id, flag_id, lbl_id, thr_id, idate)
if conf.DB_DELAYED_OPS:
self.msg_insert.append(entry)
else:
self.db.cn.execute(_insert_sql, entry)
def update(self, msg):
"""Update the mutable attributes of an existing message."""
msg_id = int64(msg['msg_id'])
flag_id = self._store_flags(msg['flags'])
lbl_id = self._store_labels(msg['labels'])
entry = (self.id, flag_id, lbl_id, msg_id)
if conf.DB_DELAYED_OPS:
self.msg_update.append(entry)
else:
self.db.cn.execute(_update_sql, entry)
def finish(self, result='ok', info=None):
db = self.db
super().finish(result, info)
log.debug('Flag cache: {}', self._flag_id.cache_info())
log.debug('Label cache: {}', self._lbl_id.cache_info())
if result == 'ok':
log.info('Performing database maintenance...')
start = walltime()
db.cleanup()
log.info('Maintenance finished in {:.3f} sec', walltime() - start)
def _store_file(self, data):
"""Write message data to disk and add/update its database entry."""
digest = self.db.files.write(data)
# Use REPLACE to avoid multiple msg references to the same file entry
sql = 'INSERT OR REPLACE INTO file (op_id, digest, size) VALUES (?,?,?)'
return self.db.cn.execute(sql, (self.id, digest, len(data))).lastrowid
def _store_flags(self, flags):
"""Store the list of flags in the database and return its ID."""
if flags:
flags.sort()
return self._flag_id(' '.join(flags))
return None
def _store_labels(self, labels):
"""Store the list of labels in the database and return its ID."""
if labels:
labels.sort()
quote = lambda lbl: qstr(lbl, False)
return self._lbl_id(' '.join(map(quote, labels)))
return None
@functools.lru_cache(None)
def _flag_id(self, flags):
"""Flag list cache."""
sql = 'SELECT flag_id FROM flag WHERE flags = ?'
for flag_id, in self.db.cn.execute(sql, (flags,)):
return flag_id
sql = 'INSERT INTO flag (flags) VALUES (?)'
return self.db.cn.execute(sql, (flags,)).lastrowid
@functools.lru_cache(500)
def _lbl_id(self, labels):
"""Label list cache."""
sql = 'SELECT lbl_id FROM lbl WHERE labels = ?'
for lbl_id, in self.db.cn.execute(sql, (labels,)):
return lbl_id
sql = 'INSERT INTO lbl (labels) VALUES (?)'
return self.db.cn.execute(sql, (labels,)).lastrowid
class restore(Op):
"""Restore operation."""
def __init__(self, db, name, ids=None, pri=None, **kwargs):
op_id, start = self._find_start(db)
pri = dict((lbl.lower(), n) for n, lbl in enumerate(pri or ()))
last = len(pri)
pkey = lambda lbl: pri.get(lbl, last)
ymd = time.strftime('GMDB-%Y%m%d', time.localtime(start))
super().__init__(db, name, **kwargs)
self.msg_ids = ids # Message IDs to restore (None = all)
self.start_op = op_id # First usable backup operation (0 = all)
self.pri_key = pkey # Label priority sort key
self.bkp_lbl = ymd # Default label added to all messages
def count(self):
"""Get the total number of messages to be restored."""
if self.msg_ids:
return len(self.msg_ids)
sql = 'SELECT COUNT(msg_id) FROM msg WHERE op_id >= ?'
for count, in self.db.cn.execute(sql, (self.start_op,)):
return count
def mbox_map(self, srv_type):
"""Create a map of mailbox names to final message labels."""
is_gmail = srv_type == 'gmail' # Server type
mbox_map = defaultdict(list) # mbox->[(lbl_id, labels), ...]
sql = 'SELECT lbl_id, labels FROM lbl UNION ALL SELECT NULL, ""'
for lbl_id, labels in self.db.cn.execute(sql):
mbox, labels = self._mbox_labels(qstr_split(labels), is_gmail)
mbox_map[mbox].append((lbl_id, labels))
return mbox_map
def attrs(self, mbox_map):
"""Generator of message attributes.
The generator yields (mbox, msg) tuples, where mbox is the destination
mailbox and msg is a dictionary of message attributes as returned by
DBControl.get_attrs, but with an updated list of lables. The tuples are
grouped by mailbox name. When restoring to Gmail, mbox will be one of
'allmail', 'spam', or 'trash'. Otherwise, mbox is the name of an actual
mailbox, which may need to be created.
"""
db = self.db
ids = self.msg_ids
sql_id = 'SELECT msg_id FROM msg WHERE op_id >= ? AND lbl_id = ?'
sql_null = 'SELECT msg_id FROM msg WHERE op_id >= ? AND lbl_id IS NULL'
for mbox, entries in mbox_map.items():
for lbl_id, labels in entries:
if lbl_id is None:
sql, args = sql_null, (self.start_op,)
else:
sql, args = sql_id, (self.start_op, lbl_id)
for msg_id, in db.cn.execute(sql, args):
if not ids or msg_id in ids:
yield (mbox, db.get_attrs(msg_id, labels))
def body(self, msg):
"""Load message body."""
return self.db.get_body(msg['msg_id'])
def _find_start(self, db):
"""Find the ID and timestamp of the first usable backup operation."""
sql = 'SELECT MAX(op_id) FROM op WHERE name="backup" AND result="ok"'
for op_id, in db.cn.execute(sql):
if op_id is not None:
break
else:
# No successful backups, start with the first failed one
sql = 'SELECT MIN(op_id) FROM op WHERE name="backup"'
for op_id, in db.cn.execute(sql):
if op_id is not None:
log.warn('Database does not contain any successful backups')
break
else:
raise DBError('database does not contain any backups')
# Get the start time of the first usable backup
sql = 'SELECT start FROM op WHERE op_id = ?'
for start, in db.cn.execute(sql, (op_id,)):
return (0 if conf.all or conf.ids else op_id, start)
def _mbox_labels(self, orig, is_gmail):
"""Get destination mailbox and final labels for restoration."""
mbox = 'allmail'
base = conf.set_label or orig
lbls = OrderedDict((lbl.lower(), lbl) for lbl in base if lbl)
# Merge additional labels
if conf.add_label:
lbls.update((lbl.lower(), lbl) for lbl in conf.add_label if lbl)
# Extract ^Spam and ^Trash labels, and update destination mailbox
for k in ('^spam', '^trash'):
if k in lbls:
del lbls[k]
mbox = k[1:]
# Convert lbls dict to a tuple
if is_gmail:
lbls = tuple(lbls.values()) + (self.bkp_lbl,)
elif lbls:
# Strip leading backslash from special Gmail labels
for k in map(str.lower, conf.GMAIL_SPECIAL):
if k in lbls:
lbls[k[1:]] = lbls.pop(k)[1:]
# Add the default 'All Mail' label
lbls['all mail'] = 'All Mail'
# Sort labels by their priority
lbls = tuple(lbls[l] for l in sorted(lbls, key=self.pri_key))
else:
lbls = ('All Mail',)
# Prepend prefix
if conf.prefix:
prefix = conf.prefix
lbls = tuple(prefix + lbl for lbl in lbls)
# Determine the final location
if is_gmail:
return (mbox, lbls)
if mbox == 'allmail':
return (lbls[0], lbls)
prefix = conf.prefix or ''
return (prefix + mbox.capitalize(), lbls)
#
### File database ############################################################
#
class FileDB(metaclass=RegisteredType):
"""Class for storing and loading messages using their digests."""
_registry_key = 'comp_method'
def __new__(cls, root, db_conf):
if cls is FileDB:
cls = FileDB.registry.get(db_conf['comp_method'], 'none')
if cls.__new__ is not FileDB.__new__:
return cls.__new__(cls, root, db_conf)
return object.__new__(cls)
def __init__(self, root, db_conf):
self.root = os_path.realpath(root)
self.tmp = os_path.join(self.root, 'tmp')
self.hash = getattr(hashlib, db_conf.get('digest', 'sha1'))
self.dir_levels = max(1, min(db_conf.get('dir_levels', 1), 8))
self.comp_level = db_conf.get('comp_level', 6)
# Create a directory for temporary files (root must already exist)
if not os_path.isdir(self.tmp):
os.mkdir(self.tmp, conf.DIR_MODE)
def __contains__(self, digest):
"""Check if the given digest exists."""
return digest and os_path.isfile(self._path(digest))
def __iter__(self):
"""Iterate over all available digests."""
ls = os.listdir
join = os_path.join
b16 = frozenset('0123456789abcdef')
tree = deque(((0, ''),))
while tree:
n, d = tree.pop()
path = join(self.root, d)
if n < self.dir_levels:
n += 1
tree.extend((n, join(d, s)) for s in ls(path) if s in b16)
else:
for digest in ls(path):
yield digest
def digest(self, data):
"""Calculate message digest."""
return self.hash(data).hexdigest()
def open(self, digest):
"""Open a message file for reading."""
if digest:
path = self._path(digest)
if os_path.isfile(path):
return self._open(path, 'rb')
return None
def read(self, digest):
"""Read message contents from disk."""
fd = self.open(digest)
if fd is not None:
with fd:
return fd.read()
def write(self, data, digest=None, overwrite=False):
"""Write message contents to disk."""
if not digest:
digest = self.digest(data)
path = self._path(digest)
exists = os_path.isfile(path)
tmp = os_path.join(self.tmp, digest)
if exists and not overwrite:
log.warn('Duplicate digest: {}', digest)
return digest
try:
with self._open(tmp, 'wb') as fd:
os.chmod(tmp, conf.FILE_MODE)
fd.write(data)
if not exists:
parent = os_path.dirname(path)
if not os_path.isdir(parent):
os.makedirs(parent, conf.DIR_MODE)
elif os.name == 'nt':
os.unlink(path) # os.rename doesn't overwrite on Windows
os.rename(tmp, path)
return digest
except Exception:
if os_path.isfile(tmp):
os.unlink(tmp)
raise
def remove(self, digest):
"""Remove message contents from disk."""
if digest:
path = self._path(digest)
if os_path.isfile(path):
os.unlink(path)
try:
os.removedirs(os_path.dirname(path))
except OSError:
pass
@property
def empty(self):
"""Database is empty flag."""
for digest in self:
return False
return True
def _path(self, digest):
"""Get full path to the given digest."""
steps = list(digest[:self.dir_levels])
steps.append(digest)
return os_path.join(self.root, *steps)
def _open(self, path, mode='rb'):
"""Open a file for reading or writing."""
return open(path, mode)
class FileDB_None(FileDB):
comp_method = 'none'
class FileDB_Gzip(FileDB):
comp_method = 'gzip'
def _open(self, path, mode='rb'):
fd = GzipFile(path, mode, self.comp_level)
fd.read1 = fd.read # Bug fix for email parser
return fd
|
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.contrib.layers import batch_norm
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.CharacterMapper import get_cm_lp
from util.saver import PrefixSaver
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_shifted_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm = get_cm_lp()
# Additional NaC Channel
nClasses = cm.size() + 1
nEpochs = 15
batchSize = 16
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
numT = 32998
stepsPerEpocheTrain = numT / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def get_saver_dict(prefix):
dict = {}
if prefix[-1] != '/':
prefix = prefix + '/'
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
for t in res:
key = t.name
key = key[len(prefix):]
dict[str(key)] = t
# print(dict)
return dict
def inference(images, seqLen, keep_prob, phase_train):
with tf.variable_scope('readPart') as scope:
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN1")
conv1 = tf.nn.relu(conv1_bn, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN2")
conv2 = tf.nn.relu(conv2_bn, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN3")
conv3 = tf.nn.relu(conv3_bn, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
trainIN = tf.placeholder_with_default(tf.constant(False), [])
logits3d, seqAfterConv = inference(inputX, seqLengths, keep_prob, trainIN)
loss = loss(logits3d, targetY, seqAfterConv)
saver = PrefixSaver('readPart', './private/models/lp23/')
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
workList = workList[0:32998]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: imgW*np.ones([batchSize]), keep_prob: 0.5, trainIN: True}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: imgW*np.ones([batchSize]), keep_prob: 1.0, trainIN: False}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
saver.save(session, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 425823.728893
# Train: CER 0.53363006419
# Train time 1443.70941591
# Val: CTC-loss 7984.01521397
# Val: CER 0.104459231919
# Val time 34.3682131767
# Epoch 2 ...
# Train: CTC-loss 64123.7205703
# Train: CER 0.0713416529448
# Train time 1377.6435709
# Val: CTC-loss 4989.27085871
# Val: CER 0.0612603672307
# Val time 30.7804141045
# Epoch 3 ...
# Train: CTC-loss 52995.6194758
# Train: CER 0.0583216237314
# Train time 1350.16746306
# Val: CTC-loss 4974.11897206
# Val: CER 0.0643494823141
# Val time 30.9492008686
# Epoch 4 ...
# Train: CTC-loss 47650.4380932
# Train: CER 0.0526525123744
# Train time 1352.0392859
# Val: CTC-loss 3726.932432
# Val: CER 0.0427109787071
# Val time 31.1687119007
# Epoch 5 ...
# Train: CTC-loss 42036.9393945
# Train: CER 0.0459474870026
# Train time 1310.07066607
# Val: CTC-loss 3805.60591072
# Val: CER 0.0458993885049
# Val time 30.856719017
# Epoch 6 ...
# Train: CTC-loss 39825.6757735
# Train: CER 0.0435396286173
# Train time 1314.13341784
# Val: CTC-loss 3652.99790461
# Val: CER 0.0441121808328
# Val time 31.0514140129
# Epoch 7 ...
# Train: CTC-loss 38805.7285916
# Train: CER 0.0428731738658
# Train time 1302.10318995
# Val: CTC-loss 3396.00391503
# Val: CER 0.0390877672894
# Val time 31.3051400185
# Epoch 8 ...
# Train: CTC-loss 37655.5376481
# Train: CER 0.040996678151
# Train time 1272.14776897
# Val: CTC-loss 3241.93075249
# Val: CER 0.0379257008961
# Val time 31.1723740101
# Epoch 9 ...
# Train: CTC-loss 35306.6837268
# Train: CER 0.039340560049
# Train time 1255.14130592
# Val: CTC-loss 3298.8084621
# Val: CER 0.0380707961522
# Val time 30.9955301285
# Epoch 10 ...
# Train: CTC-loss 33064.6664063
# Train: CER 0.0367346494644
# Train time 1257.81966996
# Val: CTC-loss 3216.98754848
# Val: CER 0.0373557400317
# Val time 31.348790884
# Epoch 11 ...
# Train: CTC-loss 33231.565251
# Train: CER 0.0367647848037
# Train time 1223.39252186
# Val: CTC-loss 3369.77349287
# Val: CER 0.0383404844196
# Val time 31.4620189667
# Epoch 12 ...
# Train: CTC-loss 31701.1454951
# Train: CER 0.0351086833043
# Train time 1228.37678313
# Val: CTC-loss 3071.02720545
# Val: CER 0.0351393960496
# Val time 20.628880024
# Epoch 13 ...
# Train: CTC-loss 32437.9929693
# Train: CER 0.0362730159413
# Train time 1223.4059422
# Val: CTC-loss 3160.40971142
# Val: CER 0.0356085528445
# Val time 31.3786180019
# Epoch 14 ...
# Train: CTC-loss 30465.1829899
# Train: CER 0.034653937174
# Train time 1197.5064919
# Val: CTC-loss 3055.63726359
# Val: CER 0.0346931330959
# Val time 31.2907111645
# Epoch 15 ...
# Train: CTC-loss 29634.7127888
# Train: CER 0.0332576885411
# Train time 1208.09267807
# Val: CTC-loss 3150.66269298
# Val: CER 0.0346802188741
# Val time 31.6104729176
|
|
#import urllib2
import urllib
import subprocess
import time
import os.path
import sys
import getopt
from Bio.PDB import *
import openbabel
import pybel
import yaml
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
import re
import os
from collections import Counter
import numpy as np
import collections
from math import pi, degrees
from operator import itemgetter, attrgetter, methodcaller
import getopt
import sys
import shutil
AA = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V'}
HBD = {'H', 'K', 'N', 'Q', 'R', 'S', 'T', 'W', 'Y'}
HBA = {'D', 'E', 'H', 'N', 'Q', 'S', 'T', 'Y'}
NEGATIVE = {'D', 'E'}
POSITIVE = {'H', 'K', 'R'}
AROMATIC = {'TYR', 'TRP', 'PHE', 'HIS'}
CHARGEDAA = {'ARG', 'LYS', 'ASP', 'GLU'} # skip ,'HIS'
HYDROPHOBIC_AA = {'A', 'C', 'F', 'I', 'L', 'M', 'P', 'V', 'W', 'Y'}
projectdir = '/tmp/interactions/'
if not os.path.exists(projectdir):
os.makedirs(projectdir)
os.chmod(projectdir, 0o777)
tempdir = projectdir + 'temp/'
if not os.path.exists(tempdir):
os.makedirs(tempdir)
os.chmod(tempdir, 0o777)
ignore_het = ['NA', 'W'] # ignore sodium and water
radius = 5
hydrophob_radius = 4.5
ignore_het = ['NA', 'W'] # ignore sodium and water
debug = False
def fetch_pdb(id):
url = 'https://www.rcsb.org/pdb/files/%s.pdb' % id
return urllib.urlopen(url).read()
def check_unique_ligand_mol(filename):
# check that only HETATM are exported to file
f_in = open(filename, 'r')
tempstr = ''
check = []
ligandid = 0
chainid = 0
for line in f_in:
if line.startswith('HETATM'):
residue_number = line[22:26]
chain = line[21]
if (residue_number != ligandid and ligandid != 0) or (chain != chainid and chainid != 0):
continue
ligandid = residue_number
chainid = chain
tempstr += line
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
def check_pdb():
# check if PDB is there, otherwise fetch
if not os.path.exists(projectdir + 'pdbs/'):
os.makedirs(projectdir + 'pdbs/')
if not os.path.isfile(projectdir + 'pdbs/' + pdbname + '.pdb'):
pdbfile = fetch_pdb(pdbname)
temp_path = projectdir + 'pdbs/' + pdbname + '.pdb'
f = open(temp_path, 'w')
f.write(pdbfile)
f.close()
def checkdirs():
# check that dirs are there and have right permissions
directory = projectdir + 'results/' + pdbname
if os.path.exists(directory):
shutil.rmtree(directory)
directory = projectdir + 'results/' + pdbname + '/interaction'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/ligand'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/output'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/png'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/fragments'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
def find_ligand_full_names():
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
residuename = ''
f_in = open(pdbfile, 'r')
d = {}
for line in f_in:
if line.startswith('HETSYN'):
# need to fix bad PDB formatting where col4 and col5 are put
# together for some reason -- usually seen when the id is +1000
# NOTE: PDB is a fixed-width column format, so this is normal
m = re.match("HETSYN[\s]+([\w]{3})[\s]+(.+)", line)
if (m):
d[m.group(1)] = m.group(2).strip()
return d
def fragment_library(ligand, atomvector, atomname, residuenr, chain, typeinteraction):
#if debug:
#print "Make fragment pdb file for ligand:", ligand, "atom vector", atomvector, "atomname", atomname, "residuenr from protein", residuenr, typeinteraction, 'chain', chain
residuename = 'unknown'
ligand_pdb = projectdir + 'results/' + pdbname + \
'/ligand/' + ligand + '_' + pdbname + '.pdb'
mol = pybel.readfile("pdb", ligand_pdb).next()
mol.removeh()
listofvectors = []
chain = chain.strip()
if atomvector is not None:
for atom in mol:
distance = (Vector(getattr(atom, 'coords')) - atomvector).norm()
if distance > 0.1:
continue
# print "Parent:",getattr(atom,'type'),getattr(atom,'idx')
# ,Vector(getattr(atom,'coords'))
listofvectors.append(Vector(getattr(atom, 'coords')))
for neighbour_atom in openbabel.OBAtomAtomIter(atom.OBAtom):
# print neighbour_atom.GetAtomicNum()
neighbor = pybel.Atom(neighbour_atom)
# print
# "Neighbour:",neighbour_atom.GetType(),Vector(getattr(neighbor,'coords'))
listofvectors.append(Vector(getattr(neighbor, 'coords')))
for neighbour_atom2 in openbabel.OBAtomAtomIter(neighbour_atom):
# print neighbour_atom.GetAtomicNum()
neighbor2 = pybel.Atom(neighbour_atom2)
# print
# "Neighbour2:",neighbour_atom2.GetType(),Vector(getattr(neighbor2,'coords'))
listofvectors.append(Vector(getattr(neighbor2, 'coords')))
#if debug:
#print "vectors:", listofvectors
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
f_in = open(pdbfile, 'r')
tempstr = ''
for line in f_in:
if line.startswith('HETATM'):
atomvector = Vector(line[30:38], line[38:46], line[46:54])
residue_number = line[22:26]
tempchain = line[21]
skip = 1
for targetvector in listofvectors:
distance = (targetvector - atomvector).norm()
if distance < 0.1:
# print "FOUND!"
skip = 0
if skip == 1:
continue
elif line.startswith('ATOM'):
residue_number = line[22:26].strip()
tempchain = line[21].strip()
if residue_number != residuenr:
continue
if tempchain != chain:
continue
residuenr = residue_number
chain = tempchain
residuename = line[17:20].strip()
else:
continue # ignore all other lines
tempstr += line
filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \
"_" + residuename + residuenr + chain + "_" + \
atomname + "_" + typeinteraction + ".pdb"
# if debug:
# print filename
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
mol = pybel.readfile("pdb", filename).next()
mol.write("pdb", filename, overwrite=True)
return filename
def fragment_library_aromatic(ligand, atomvectors, residuenr, chain, ringnr):
# print "Make aromatic fragment pdb file for ligand:",ligand,"atom
# vectors",atomvectors,"residuenr from protein", residuenr
chain = chain.strip()
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
residuename = ''
f_in = open(pdbfile, 'r')
tempstr = ''
for line in f_in:
if line.startswith('HETATM'):
atomvector = Vector(line[30:38], line[38:46], line[46:54])
skip = 1
for targetvector in atomvectors:
distance = (targetvector - atomvector).norm()
if distance < 0.1:
# print "FOUND!"
skip = 0
if skip == 1:
continue
elif line.startswith('ATOM'):
residue_number = line[22:26].strip()
tempchain = line[21].strip()
if residue_number != residuenr:
continue
if tempchain != chain:
continue
residuename = line[17:20].strip()
chain = tempchain
else:
continue # ignore all other lines
tempstr += line
filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \
"_" + residuename + str(residuenr) + chain + \
"_aromatic_" + str(ringnr) + ".pdb"
# print tempstr
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
return filename
def create_ligands_and_poseview():
class HetSelect(Select):
def accept_residue(self, residue):
if residue.get_resname().strip() == HETNAM:
return 1
else:
return 0
class ClassSelect(Select):
def accept_residue(self, residue):
if residue.get_parent().id == peptideligand:
return 1
else:
return 0
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' +
pdbname + '.pdb') # Disable warnings
hetflag_done = {}
for model in s:
for chain in model:
for residue in chain:
hetresname = residue.get_resname()
# catch residues with hetflag
hetflag = residue.get_full_id()[3][0].strip()
hetflag = hetflag.replace("H_", "").strip()
#hetflag = hetflag.replace("W","")
#print(hetflag)
if peptideligand and chain.id==peptideligand:
hetflag= 'pep'
if peptideligand and chain.id!=peptideligand:
continue
if hetflag and hetflag not in ignore_het:
if not hetflag in hetflag_done:
hetflag_done[hetflag] = 1
HETNAM = hetflag
temp_path = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.sdf'
ligand_pdb = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.pdb'
ligand_sdf = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.sdf'
ligand_inchi = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.inchi'
ligand_poseview = projectdir + 'results/' + \
pdbname + '/png/' + pdbname + '_' + HETNAM + '.png'
ligand_png = projectdir + 'results/' + pdbname + '/png/' + HETNAM + '.png'
# if sdf not made, make it #Always make them for now
if not os.path.isfile(ligand_pdb) or 1 == 1:
io = PDBIO()
io.set_structure(s)
if peptideligand and chain.id==peptideligand:
io.save(ligand_pdb, ClassSelect())
else:
io.save(ligand_pdb, HetSelect())
check_unique_ligand_mol(ligand_pdb)
if len(list(pybel.readfile("pdb", ligand_pdb))) == 0:
continue
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("pdb", "inchi")
obConversion.SetOptions(
"K", obConversion.OUTOPTIONS)
mol = openbabel.OBMol()
# Open Babel will uncompress automatically
obConversion.ReadFile(mol, ligand_pdb)
obConversion.WriteFile(mol, ligand_inchi)
inchikey = obConversion.WriteString(mol)
inchikeys[HETNAM] = inchikey.strip()
#smiles[HETNAM] = smile
smiles[HETNAM] = pybel.readfile(
"pdb", ligand_pdb).next().write("smi").split("\t")[0]
mol = pybel.readfile("pdb", ligand_pdb).next()
mol.OBMol.AddHydrogens(False, True, 7.4)
mol.write("pdb", ligand_pdb, overwrite=True)
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("pdb", "sdf")
mol = openbabel.OBMol()
# Open Babel will uncompress automatically
obConversion.ReadFile(mol, ligand_pdb)
obConversion.WriteFile(mol, ligand_sdf)
# if png of ligand not made, make it
if not os.path.isfile(ligand_png):
m = Chem.MolFromMolFile(ligand_sdf)
# Draw.MolToFile(m,ligand_png)
# if interaction png not made, make it #SKIP poseview
# stuff
if not os.path.isfile(ligand_poseview) and 1 == 2:
cmd = "poseview -l " + ligand_sdf + " -p " + projectdir + \
"pdbs/" + pdbname + ".pdb -o " + ligand_poseview
#print('Running cmd ' + cmd)
proc = subprocess.Popen(
[cmd], stdout=subprocess.PIPE, shell=True)
while proc.poll() is None:
time.sleep(1)
#(out, err) = proc.communicate()
else:
# print "Already made
# Poseview:",pdbname+"_"+HETNAM+".png"
continue
# print "Done "+str(len(hetflag_done))
def addresiduestoligand(ligand, pdb, residuelist):
temp_path = projectdir + 'pdbs/' + pdb + '.pdb'
f_in = open(temp_path, 'r')
inserstr = ''
check = []
# print filename
ligandid = 0
chainid = 0
for line in f_in:
if line.startswith('ATOM'):
temp = line.split()
# need to fix bad PDB formatting where col4 and col5 are put
# together for some reason -- usually seen when the id is +1000
m = re.match("(\w)(\d+)", temp[4])
if (m):
temp[4] = m.group(1)
temp[5] = m.group(2)
aaname = temp[3] + temp[5] + temp[4]
if aaname in residuelist:
# print aaname
inserstr += line
# print inserstr
f_in.close()
# ligands/'+hetflag+'_'+pdbname+".pdb")
temp_path = projectdir + 'results/' + pdbname + \
'/ligand/' + ligand + '_' + pdb + '.pdb'
f_in = open(temp_path, 'r')
tempstr = ''
inserted = 0
for line in f_in:
if line.startswith('ATOM'):
temp = line.split()
if temp[2] == 'H':
continue # skip hydrogen in model
if (line.startswith('CONECT') or line.startswith('MASTER') or line.startswith('END')) and inserted == 0:
tempstr += inserstr
inserted = 1
tempstr += line
# print tempstr
# print tempstr
f_in.close()
f = open(projectdir + 'results/' + pdbname +
'/interaction/' + pdb + '_' + ligand + '.pdb', 'w')
f.write(tempstr)
f.close()
def get_ring_from_aa(residueid):
class AAselect(Select):
def accept_residue(self, residue):
# print residue.get_full_id()[3][1],residueid
if str(residue.get_full_id()[3][1]) == residueid:
return 1
else:
return 0
ptemp = PDBParser(QUIET=True) # disable warnings
stemp = ptemp.get_structure(
pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
temp_aa_id = residueid
io = PDBIO()
io.set_structure(stemp)
io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect())
mol = pybel.readfile("pdb", projectdir + 'temp/' +
residueid + '.pdb').next()
# print hetflag
rings = getattr(mol, "OBMol").GetSSSR()
ringlist = []
for ring in rings:
center = Vector(0.0, 0.0, 0.0)
members = ring.Size()
if ring.IsAromatic():
atomlist = []
atomnames = []
atomvectors = []
for atom in mol:
if ring.IsMember(atom.OBAtom):
a_vector = Vector(getattr(atom, 'coords'))
center += a_vector
atomlist.append(atom.idx)
atomvectors.append(a_vector)
atomnames.append(getattr(atom, 'type'))
center = center / members
normal = center - a_vector # vector in plane
normal1 = center - atomvectors[0]
normal2 = center - atomvectors[2]
normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]]))
ringlist.append([atomlist, center, normal, atomnames, atomvectors])
return ringlist
def get_hydrogen_from_aa(residueid):
class AAselect(Select):
def accept_residue(self, residue):
# print residue.get_full_id()[3][1],residueid
if str(residue.get_full_id()[3][1]) == residueid:
return 1
else:
return 0
ptemp = PDBParser(QUIET=True)
stemp = ptemp.get_structure(
pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
temp_aa_id = residueid
io = PDBIO()
io.set_structure(stemp)
io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect())
mol = pybel.readfile("pdb", projectdir + 'temp/' +
residueid + '.pdb').next()
mol.OBMol.AddHydrogens(False, True, 7.4)
# print hetflag
donors = []
for atom in mol:
if getattr(atom, 'OBAtom').IsHbondDonor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is Donor",chargevector
temphatoms = []
for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom):
neighbor = pybel.Atom(neighbor)
if getattr(neighbor, 'type') == "H":
# print "neighbor
# Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords')
temphatoms.append(Vector(getattr(neighbor, 'coords')))
donors.append([getattr(atom, 'type'), chargevector, temphatoms,getattr(atom, 'OBAtom').IsHbondAcceptor()])
if getattr(atom, 'OBAtom').IsHbondAcceptor():
chargevector = Vector(getattr(atom, 'coords'))
#print getattr(atom, 'type'),chargevector,'acceptor!'
return donors
def build_ligand_info():
count_atom_ligand = {}
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
for model in s:
for chain in model:
for residue in chain:
hetresname = residue.get_resname()
# catch residues with hetflag
hetflag = residue.get_full_id()[3][0].strip()
hetflag = hetflag.replace("H_", "").strip()
#hetflag = hetflag.replace("W","")
if peptideligand and chain.id==peptideligand:
hetflag= 'pep'
if peptideligand and chain.id!=peptideligand:
continue
if hetflag and hetflag not in ignore_het:
# if goodhet!='' and hetflag!=goodhet and
# "H_"+goodhet!=hetflag: continue ### Only look at the
# ligand that has an image from poseview made for it.
if hetflag not in hetlist or (peptideligand and chain.id==peptideligand):
if len(list(pybel.readfile("pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + '.pdb'))) == 0:
# This ligand has no molecules
# print('no info for',hetflag)
continue
if hetflag not in hetlist: #do not recreate for peptides
hetlist[hetflag] = []
ligand_charged[hetflag] = []
ligand_donors[hetflag] = []
ligand_acceptors[hetflag] = []
count_atom_ligand[hetflag] = 0
mol = pybel.readfile(
"pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + ".pdb").next()
# print "LIGAND",hetflag
rings = getattr(mol, "OBMol").GetSSSR()
# http://python.zirael.org/e-openbabel4.html
ringlist = []
for ring in rings:
center = Vector(0.0, 0.0, 0.0)
members = ring.Size()
if ring.IsAromatic():
# print "Found an aromatic ring"
atomlist = []
atomnames = []
vectorlist = []
for atom in mol:
if ring.IsMember(atom.OBAtom):
# print atom.idx,getattr(atom,'type'),
# ring.IsMember( atom.OBAtom)
a_vector = Vector(
getattr(atom, 'coords'))
center += a_vector
atomlist.append(atom.idx)
vectorlist.append(a_vector)
atomnames.append(getattr(atom, 'type'))
center = center / members
normal = center - a_vector # vector in plane
#print center - vectorlist[0],center - vectorlist[2]
normal1 = center - vectorlist[0]
normal2 = center - vectorlist[2]
normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]]))
ringlist.append(
[atomlist, center, normal, atomnames, vectorlist])
ligand_rings[hetflag] = ringlist
for atom in mol:
#print "Atom",getattr(atom,'type'),"Coords:",getattr(atom,'coords'),"FormalCharge:",getattr(atom,'formalcharge'),"PartialCharge",getattr(atom,'partialcharge')
if getattr(atom, 'formalcharge') != 0:
chargevector = Vector(getattr(atom, 'coords'))
ligand_charged[hetflag].append(
[getattr(atom, 'type'), chargevector, getattr(atom, 'formalcharge')])
if getattr(atom, 'OBAtom').IsCarboxylOxygen():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is
# CarboxylOxygen",chargevector
ligand_charged[hetflag].append(
[getattr(atom, 'type'), chargevector, -1])
if getattr(atom, 'OBAtom').IsHbondDonor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is
# Donor",chargevector
temphatoms = []
for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom):
neighbor = pybel.Atom(neighbor)
if getattr(neighbor, 'type') == "H":
# print "neighbor
# Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords')
temphatoms.append(
Vector(getattr(neighbor, 'coords')))
ligand_donors[hetflag].append(
[getattr(atom, 'type'), chargevector, temphatoms])
if getattr(atom, 'OBAtom').IsHbondAcceptor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is Acceptor",chargevector
ligand_acceptors[hetflag].append([getattr(atom, 'type'), chargevector])
# ligand_charged[hetflag].append([getattr(atom,'type'),chargevector,-1])
# Function to get ligand centers to maybe skip some
# residues
check = 0
center = Vector(0.0, 0.0, 0.0)
if peptideligand and chain.id==peptideligand:
if hetflag in ligandcenter:
center = ligandcenter[hetflag][2]
for atom in residue:
het_atom = atom.name
atom_vector = atom.get_vector()
center += atom_vector
hetlist[hetflag].append(
[hetresname, het_atom, atom_vector])
if not hetflag in ligand_atoms:
# make the ligand_atoms ready
ligand_atoms[hetflag] = []
ligand_atoms[hetflag].append(
[count_atom_ligand[hetflag], atom_vector, het_atom])
count_atom_ligand[hetflag] += 1
ligandcenter[hetflag] = [center, count_atom_ligand[hetflag]]
else:
for atom in residue:
if check == 0 and hetflag in ligand_atoms:
continue # skip when there are many of same ligand
het_atom = atom.name
check = 1
atom_vector = atom.get_vector()
center += atom_vector
hetlist[hetflag].append(
[hetresname, het_atom, atom_vector])
if not hetflag in ligand_atoms:
# make the ligand_atoms ready
ligand_atoms[hetflag] = []
ligand_atoms[hetflag].append(
[count_atom_ligand[hetflag], atom_vector, het_atom])
count_atom_ligand[hetflag] += 1
center2 = center / count_atom_ligand[hetflag]
ligandcenter[hetflag] = [
center2, count_atom_ligand[hetflag],center]
def remove_hyd(aa,ligand):
templist = []
for res in new_results[ligand]['interactions']:
#print res[0],res[2],aa
if res[0]==aa and (res[2]=='HYD' or res[2]=='hyd'):
continue
else:
templist.append(res)
new_results[ligand]['interactions'] = templist
def check_other_aromatic(aa,ligand,info):
templist = []
check = True
for res in new_results[ligand]['interactions']:
#print res[0],res[2],aa
if res[0]==aa and res[4]=='aromatic':
#if the new aromatic interaction has a center-center distance greater than the old one, keep old.
if info['Distance']>res[6]['Distance']:
templist.append(res)
check = False #Do not add the new one.
else: #if not, delete the old one, as the new is better.
check = True #add the new one
continue
else:
templist.append(res)
new_results[ligand]['interactions'] = templist
return check
# LOOP OVER RECEPTOR AND FIND INTERACTIONS
def find_interactions():
global count_calcs, count_skips
count_atom = 0
count_skips = 0
count_calcs = 0
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
for model in s:
for chain in model:
chainid = chain.get_id()
if peptideligand and chainid==peptideligand:
continue
for residue in chain:
aa_resname = residue.get_resname()
aa_seqid = str(residue.get_full_id()[3][1])
hetflagtest = str(residue.get_full_id()[3][0]).strip()
aaname = aa_resname + aa_seqid + chainid
hetflagtest = hetflagtest.replace("H_", "")
#hetflagtest = hetflagtest.replace("W","")
if hetflagtest:
continue # residue is a hetnam
if hetflagtest in hetlist:
continue # residue is a hetnam
# print "Looking at ",aa_resname,aa_seqid,chainid
countresidue = count_atom
# print aaname
# could probably make a check here to see if this residue was
# anywhere near the ligand, otherwise skip the check per atom
for hetflag, atomlist in hetlist.iteritems():
if not 'CA' in residue: # prevent errors
continue
ca = residue['CA'].get_vector()
if (ca - ligandcenter[hetflag][0]).norm() > ligandcenter[hetflag][1]:
# print "skipping"
count_skips += 1
continue
count_atom = countresidue
sum = 0
hydrophobic_count = 0
accesible_check = 0
# if goodhet!='' and hetflag!=goodhet and
# "H_"+goodhet!=hetflag: continue ### Only look at the
# ligand that has an image from poseview made for it.
tempdistance = radius
for atom in atomlist:
#print(hetflag,atom)
hetresname = atom[0]
het_atom = atom[1]
het_vector = atom[2]
hydrophobic_check = 1
aaatomlist = []
for atom in residue:
count_atom += 1
aa_vector = atom.get_vector()
aa_atom = atom.name
aa_atom_type = atom.element
aaatomlist.append([count_atom, aa_vector, aa_atom])
d = (het_vector - aa_vector)
count_calcs += 1
if d.norm() < radius:
if not hetflag in results:
results[hetflag] = {}
summary_results[hetflag] = {'score': [], 'hbond': [], 'hbondplus': [],
'hbond_confirmed': [], 'aromatic': [],'aromaticff': [],
'ionaromatic': [], 'aromaticion': [], 'aromaticef': [],
'aromaticfe': [], 'hydrophobic': [], 'waals': [], 'accessible':[]}
new_results[hetflag] = {'interactions':[]}
if not aaname in results[hetflag]:
results[hetflag][aaname] = []
if not (het_atom[0] == 'H' or aa_atom[0] == 'H' or aa_atom_type=='H'):
#print(aa_atom_type)
results[hetflag][aaname].append([het_atom, aa_atom, round(
d.norm(), 2), het_vector, aa_vector, aa_seqid, chainid])
tempdistance = round(d.norm(), 2)
sum += 1
# if both are carbon then we are making a hydrophic
# interaction
if het_atom[0] == 'C' and aa_atom[0] == 'C' and d.norm() < hydrophob_radius and hydrophobic_check:
hydrophobic_count += 1
hydrophobic_check = 0
# If within 5 angstrom and not a backbone atom (name C, O, N), then indicate as a residue in vicinity of the ligand
if d.norm() < 5 and (aa_atom!='C' and aa_atom!='O' and aa_atom!='N'):
#print(aa_atom)
accesible_check = 1
if accesible_check: #if accessible!
summary_results[hetflag]['accessible'].append(
[aaname])
fragment_file = fragment_library(hetflag, None, '',
aa_seqid, chainid, 'access')
new_results[hetflag]['interactions'].append([aaname,fragment_file,'acc','accessible','hidden',''])
if hydrophobic_count > 2 and AA[aaname[0:3]] in HYDROPHOBIC_AA: # min 3 c-c interactions
summary_results[hetflag]['hydrophobic'].append(
[aaname, hydrophobic_count])
fragment_file = fragment_library(hetflag, None, '',
aa_seqid, chainid, 'hydrop')
new_results[hetflag]['interactions'].append([aaname,fragment_file,'hyd','hydrophobic','hydrophobic',''])
if sum > 1 and aa_resname in AROMATIC:
# if debug:
# , get_ring_atoms(aaatomlist)
# print "Need to analyse aromatic ring in ", aaname
aarings = get_ring_from_aa(aa_seqid)
if not aarings:
# print "Could not find aromatic ring in",aaname
continue
#print "amount of rings in AA",len(aarings)
for aaring in aarings:
#aaring = aaring[0] # res_ring
center = aaring[1]
count = 0
#print "AARING",aaring
for ring in ligand_rings[hetflag]:
# print ring
shortest_center_het_ring_to_res_atom = 10
shortest_center_aa_ring_to_het_atom = 10
# print aaring[4]
# print ring[4]
for a in aaring[4]:
if (ring[1] - a).norm() < shortest_center_het_ring_to_res_atom:
shortest_center_het_ring_to_res_atom = (ring[1] - a).norm()
for a in ring[4]:
if (center - a).norm() < shortest_center_aa_ring_to_het_atom:
shortest_center_aa_ring_to_het_atom = (center - a).norm()
count += 1
# take vector from two centers, and compare against
# vector from center to outer point -- this will
# give the perpendicular angel.
angle = Vector.angle(center - ring[1], ring[2]) #aacenter to ring center vs ring normal
# take vector from two centers, and compare against
# vector from center to outer point -- this will
# give the perpendicular angel.
angle2 = Vector.angle(center - ring[1], aaring[2]) #aacenter to ring center vs AA normal
angle3 = Vector.angle(ring[2], aaring[2]) #two normal vectors against eachother
#print "angleaa",aaring[2],"anglelig",ring[2]
angle_degrees = [
round(degrees(angle), 1), round(degrees(angle2), 1), round(degrees(angle3), 1)]
distance = (center - ring[1]).norm()
#if debug:
#print aaname,"Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', shortest_center_het_ring_to_res_atom, 'Shortest lig->rescenter', shortest_center_aa_ring_to_het_atom
if distance < 5 and (angle_degrees[2]<20 or abs(angle_degrees[2]-180)<20): # poseview uses <5
# print "Ring
# #",count,"Distance:",round(distance,2),
# "Angle:",round(angle_degrees,2)
summary_results[hetflag]['aromatic'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"F2F Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ff','aromatic (face-to-face)','aromatic','none',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
# need to be careful for edge-edge
elif (shortest_center_aa_ring_to_het_atom < 4.5) and abs(angle_degrees[0]-90)<30 and abs(angle_degrees[2]-90)<30:
summary_results[hetflag]['aromaticfe'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"FE Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_fe_protein','aromatic (face-to-edge)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
# need to be careful for edge-edge
elif (shortest_center_het_ring_to_res_atom < 4.5) and abs(angle_degrees[1]-90)<30 and abs(angle_degrees[2]-90)<30:
summary_results[hetflag]['aromaticef'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"EF Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ef_protein','aromatic (edge-to-face)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
for charged in ligand_charged[hetflag]:
distance = (center - charged[1]).norm()
# needs max 4.2 distance to make aromatic+
if distance < 4.2 and charged[2] > 0:
if debug:
print "Ring #", count, "Distance:", round(distance, 2), "Angle:", round(angle_degrees, 2)
summary_results[hetflag]['aromaticion'].append(
[aaname, count, round(distance, 2), charged])
#FIXME fragment file
new_results[hetflag]['interactions'].append([aaname,'','aro_ion_protein','aromatic (pi-cation)','aromatic','protein',{'Distance':round(distance, 2)}])
remove_hyd(aaname,hetflag)
if sum > 2 and aa_resname in CHARGEDAA and ligand_rings[hetflag]:
# print "check for charged AA to aromatic
# rings!",aa_resname,hetflag
for atom in residue:
aa_vector = atom.get_vector()
aa_atom = atom.name
for ring in ligand_rings[hetflag]:
d = (ring[2] - aa_vector).norm()
# if d<10: print
# "aa_atom",aa_atom,aaname,"distance to a
# ring",d,hetflag,aa_resname
def analyze_interactions():
for ligand, result in results.iteritems():
# print "AA close to ligands ("+ligand+"): ",list(result.keys())
# print "Results for"+ligand
sortedresults = []
ligscore = 0
for residue, interaction in result.iteritems():
sum = 0
score = 0
hbond = []
hbondplus = []
type = 'waals'
for entry in interaction:
hbondconfirmed = []
if entry[2] <= 3.5:
# print(entry)
# if debug:
# print "Likely H-Bond", entry
if entry[0][0] == 'C' or entry[1][0] == 'C':
continue # If either atom is C then no hydrogen bonding
# if entry[1] == 'N': #if residue atom is N, then it is backbone!
# print('backbone interaction!')
aa_donors = get_hydrogen_from_aa(entry[5])
hydrogenmatch = 0
res_is_acceptor = False
res_is_donor = False
for donor in aa_donors:
d = (donor[1] - entry[4]).norm()
if d < 0.5:
#print 'found donor in residue',residue,entry,donor
hydrogens = donor[2]
res_is_acceptor = donor[3]
res_is_donor = True
for hydrogen in hydrogens:
hydrogenvector = hydrogen - donor[1]
bindingvector = entry[3] - hydrogen
angle = round(degrees(Vector.angle(
hydrogenvector, bindingvector)), 2)
distance = round(bindingvector.norm(), 2)
# print "RESDONOR",residue,"From
# ligand",entry[0],"To
# AA",entry[1],"HydrogenCheck
# angle",angle,"Distance from hydrogen to
# acceptor",distance
if distance > 2.5:
# print "Too far away"
continue
if angle > 60:
# print "Bad angle"
continue
hydrogenmatch = 1
hbondconfirmed.append(
["D", entry[0], entry[1], angle, distance])
# print "aadonors:",aa_donors
found_donor = 0
for donor in ligand_donors[ligand]:
d = (donor[1] - entry[3]).norm()
# print charged,d,residue,entry
if d < 0.5:
found_donor = 1
hydrogens = donor[2]
for hydrogen in hydrogens:
hydrogenvector = hydrogen - donor[1]
bindingvector = entry[4] - hydrogen
angle = round(degrees(Vector.angle(
hydrogenvector, bindingvector)), 2)
distance = round(bindingvector.norm(), 2)
# print "LIGDONOR",residue,"From
# ligand",entry[0],"To
# AA",entry[1],"HydrogenCheck
# angle",angle,"Distance from hydrogen to
# acceptor",distance
if distance > 2.5:
# print "Too far away"
continue
if angle > 60:
# print "Bad angle"
continue
hydrogenmatch = 1
hbondconfirmed.append(
["A", entry[0], entry[1], angle, distance])
found_acceptor = 0
for acceptor in ligand_acceptors[ligand]:
d = (acceptor[1] - entry[3]).norm()
# print charged,d,residue,entry
if d < 0.5:
found_acceptor = 1
if found_donor==0 and res_is_donor:
hydrogenmatch = 1
hbondconfirmed.append(['D']) #set residue as donor
#print 'found acceptor which is not donor',residue,entry[0],acceptor
if not found_acceptor and found_donor and res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['A']) #set residue as acceptor
#print 'donor which is not acceptor',residue,entry[0]
if found_acceptor and found_donor:
if res_is_donor and not res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['D'])
elif not res_is_donor and res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['A'])
else:
pass
#print 'can be both donor and acceptor'
chargedcheck = 0
charge_value = 0
res_charge_value = 0
doublechargecheck = 0
for charged in ligand_charged[ligand]:
d = (charged[1] - entry[3]).norm()
if d < 0.5:
# print 'found charge',residue,d,entry
chargedcheck = 1
hydrogenmatch = 0 # Replace previous match!
charge_value = charged[2]
if residue[0:3] in CHARGEDAA:
# print "check for hbondplus!",residue,entry
# Need to check which atoms, but for now assume charged
if chargedcheck:
doublechargecheck = 1
chargedcheck = 1
hydrogenmatch = 0 # Replace previous match!
if AA[residue[0:3]] in POSITIVE:
res_charge_value = 1
elif AA[residue[0:3]] in NEGATIVE:
res_charge_value = -1
if entry[1] == 'N': #backbone connection!
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB_backbone')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
elif entry[1] == 'O': #backbone connection!
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB_backbone')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
elif hydrogenmatch:
found = 0
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB')
for x in summary_results[ligand]['hbond_confirmed']:
if residue == x[0]:
# print "Already key there",residue
key = summary_results[ligand][
'hbond_confirmed'].index(x)
summary_results[ligand]['hbond_confirmed'][
key][1].extend(hbondconfirmed)
found = 1
if hbondconfirmed[0][0]=="D":
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_donor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
if hbondconfirmed[0][0]=="A":
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_acceptor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
if found == 0:
summary_results[ligand]['hbond_confirmed'].append(
[residue, hbondconfirmed])
if chargedcheck:
type = 'hbondplus'
hbondplus.append(entry)
elif chargedcheck:
type = 'hbondplus'
hbondplus.append(entry)
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HBC')
remove_hyd(residue,ligand)
if doublechargecheck:
if (res_charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_pos_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]])
elif (res_charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_neg_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]])
elif (charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]])
elif (charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]])
else:
if (res_charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
elif (res_charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
else:
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unknown_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
else:
type = 'hbond'
hbond.append(entry)
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unspecified','polar (hydrogen bond)','polar','',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
#print type,hbondconfirmed
entry[3] = ''
if (entry[2] < 4.5):
sum += 1
score += 4.5 - entry[2]
score = round(score, 2)
if type == 'waals' and score > 2: # mainly no hbond detected
summary_results[ligand]['waals'].append([residue, score, sum])
elif type == 'hbond':
summary_results[ligand]['hbond'].append(
[residue, score, sum, hbond])
elif type == 'hbondplus':
summary_results[ligand]['hbondplus'].append(
[residue, score, sum, hbondplus])
# elif type == 'hbond_confirmed':
# summary_results[ligand]['hbond_confirmed'].append([residue,score,sum,hbondconfirmed])
ligscore += score
# print "Total <4 (score is combined diff from
# 4)",sum,"score",score
sortedresults.append([residue, score, sum, hbond, type])
summary_results[ligand]['score'].append([ligscore])
summary_results[ligand]['inchikey'] = inchikeys[ligand]
summary_results[ligand]['smiles'] = smiles[ligand]
new_results[ligand]['score'] = ligscore
new_results[ligand]['inchikey'] = inchikeys[ligand]
new_results[ligand]['smiles'] = smiles[ligand]
if ligand in hetlist_display:
summary_results[ligand]['prettyname'] = hetlist_display[ligand]
new_results[ligand]['prettyname'] = hetlist_display[ligand]
# print ligand,"Ligand score:"+str(ligscore)
sortedresults = sorted(sortedresults, key=itemgetter(1), reverse=True)
def pretty_results():
for ligand, result in summary_results.iteritems():
output = ''
bindingresidues = []
#output += "Results for "+str(ligand)+"\n"
for type, typelist in result.iteritems():
if type == 'waals':
continue
output += type + "\n"
if type == 'waals':
typelist = sorted(typelist, key=itemgetter(2), reverse=True)
if type == 'hydrophobic':
typelist = sorted(typelist, key=itemgetter(1), reverse=True)
for entry in typelist:
if type != 'score':
bindingresidues.append(entry[0])
if type == 'hbond':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[3]:
output += '\t'.join(map(str, bond[0:3])) + '\n'
elif type == 'hbondplus':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[3]:
output += '\t'.join(map(str, bond[0:3])) + '\n'
elif type == 'hbond_confirmed':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[1]:
output += '\t'.join(map(str, bond)) + '\n'
else:
# print entry
output += '\t'.join(map(str, entry)) + '\n'
temp_path = projectdir + 'results/' + pdbname + '/output/' + \
pdbname + '_' + ligand.replace("H_", "") + '.yaml'
# yaml.dump(result, open(temp_path, 'w'))
yaml.dump(new_results[ligand], open(temp_path, 'w'))
if debug:
print ligand,'\n',open(temp_path,'r').read()
addresiduestoligand(ligand, pdbname, bindingresidues)
def calculate_interactions(pdb, session=None, peptide=None):
global pdbname, hetlist, hetlist_display, ligand_atoms, ligand_charged, ligandcenter, ligand_rings, ligand_donors, ligand_acceptors, results, sortedresults, summary_results, inchikeys, smiles, projectdir, new_results, peptideligand
hetlist = {}
hetlist_display = {}
ligand_atoms = {}
ligand_charged = {}
ligandcenter = {}
ligand_rings = {}
ligand_donors = {}
ligand_acceptors = {}
results = {}
sortedresults = {}
summary_results = {}
new_results = {}
inchikeys = {}
smiles = {}
peptideligand = peptide
if not session:
pdbname = pdb
# print "checking normal ",pdbname
check_pdb()
checkdirs()
hetlist_display = find_ligand_full_names()
create_ligands_and_poseview()
build_ligand_info()
find_interactions()
analyze_interactions()
pretty_results()
else:
pdbname = pdb
projectdir = '/tmp/interactions/' + session + "/"
checkdirs()
hetlist_display = find_ligand_full_names()
create_ligands_and_poseview()
build_ligand_info()
find_interactions()
analyze_interactions()
pretty_results()
def main(argv):
pdbname = ''
try:
# print 'ARGV :', argv
opts, args = getopt.getopt(argv, "p:s:c:", ["pdb"])
except getopt.GetoptError as err:
print "Remember PDB name -p "
print err
sys.exit(2)
session = None
peptide = None
for opt, arg in opts:
if opt in ("-p"):
pdbname = arg
elif opt in ("-s"):
session = arg
elif opt in ("-c"):
peptide = arg
if not pdbname:
print "Remember PDB name -p "
sys.exit(2)
if session:
calculate_interactions(pdbname, session, peptide=peptide)
else:
calculate_interactions(pdbname, peptide=peptide)
if __name__ == "__main__":
main(sys.argv[1:])
#pdbname = '1F88'
# calculate_interactions(pdbname)
|
|
"""
Copyright 2013 OpERA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import math
from abstractDecisionAlgorithm import AbstractDecisionAlgorithm
# ::TODO:: definicao de parametros e metodos.
class Channel(AbstractDecisionAlgorithm):
"""
Simulated Annealing decision algorithm.
"""
def __init__(self):
"""
CTOR
"""
AbstractDecisionAlgorithm.__init__(self)
self.w1 = None
self.w2 = None
self.w3 = None
self.w4 = None
self.w5 = None
self.w6 = None
self.temperature = None
self.min_bandwidth = None
self.max_bandwidth = None
self.min_power = None
self.max_power = None
self.maximum_scheme = None
self.maximum_modulation_index = None
self.min_symbol_rate = None
self.max_symbol_rate = None
self.min_tdd = None
self.max_tdd = None
self.min_pbe = None
self.max_pbe = None
self.solution = -1
# Chance of changing a value in simulated
self.chance = 0.05
self.pu_rate = None
def normalize(self, value, mini, maxi, nmin=0, nmax=1):
"""
@param value
@param mini
@param maxi
@param nmin
@param nmax
@return
"""
delta = maxi - mini
a = (value - mini) / delta
#Then scale to [x,y] via:
range2 = nmax - nmin
a = (a * range2) + nmin
return a
def minimize_power(self, power):
"""
@param power
@return
"""
if self.max_power is not None:
return power / self.max_power
else:
return None
def min_ber(self, pbe):
"""
@param pbe
@return
"""
return math.log10(0.5) / math.log10(pbe)
def max_throughput(self, modulation_index):
"""
@param modulation_index
@return
"""
if self.maximum_scheme is not None and self.maximum_modulation_index is not None:
return ((math.log(self.maximum_scheme[modulation_index], 2) / math.log(
self.maximum_scheme[self.maximum_modulation_index], 2)))
else:
return None
def min_interference(self, bandwidth, tdd, power):
"""
@param bandwidth
@param tdd
@param power
@return
"""
if self.min_power is not None and self.min_bandwidth is not None and self.max_power is not None and \
self.max_bandwidth is not None and self.max_symbol_rate is not None:
return ((power + bandwidth + tdd) - (self.min_power + self.min_bandwidth + 1)) / (
self.max_power + self.max_bandwidth + self.max_symbol_rate)
else:
return None
def max_spectral_eff(self, modulation_index, symbol_rate, bandwidth):
"""
@param modulation_index
@param symbol_rate
@param bandwidth
@return
"""
if self.maximum_scheme is not None and self.min_bandwidth is not None and self.maximum_scheme is not None and \
self.maximum_modulation_index is not None and self.max_symbol_rate is not None:
return (1 - ((self.maximum_scheme[modulation_index] * self.min_bandwidth * symbol_rate) / (
bandwidth * self.maximum_scheme[self.maximum_modulation_index] * self.max_symbol_rate)))
else:
return None
def five_objective(self, bandwidth, power, modulation_index, symbol_rate, tdd, pbe):
"""
@param bandwidth
@param power
@param modulation_index
@param symbol_rate
@param tdd
@param pbe
@return
"""
fmin_power = self.minimize_power(power)
fmin_ber = self.min_ber(pbe)
fmax_throughput = self.max_throughput(modulation_index)
f_min_interference = self.min_interference(bandwidth, tdd, power)
f_max_spec_eff = self.max_spectral_eff(modulation_index, symbol_rate, bandwidth)
print "\nParametros:::\n"
print "\nw1 = " + str(self.w1)
print "\nw2 = " + str(self.w2)
print "\nw3 = " + str(self.w3)
print "\nw4 = " + str(self.w4)
print "\nw5 = " + str(self.w5)
print "\nw6 = " + str(self.w6)
print "\npu_rate = " + str(self.pu_rate)
print "\n"
if self.w1 is not None and self.w2 is not None and self.w3 is not None and self.w4 is not None and \
self.w5 is not None and self.w6 is not None and self.pu_rate is not None:
return (
(self.w1 * fmin_power) + (self.w2 * fmin_ber) + (self.w3 * fmax_throughput) +
(self.w4 * f_min_interference) + (self.w5 * f_max_spec_eff) + (self.w6 * self.pu_rate))
else:
return None
def three_objective(self, power, modulation_index, pbe):
"""
@param power
@param modulation_index
@param pbe
@return
"""
fmin_power = self.minimize_power(power)
fmin_ber = self.min_ber(pbe)
fmax_throughput = self.max_throughput(modulation_index)
if self.w1 is not None and self.w2 is not None and self.w3 is not None:
return (self.w1 * fmin_power) + (self.w2 * fmin_ber) + (self.w3 * fmax_throughput)
else:
return None
def acceptance_probability(self, energy, new_energy, temperature):
"""
@param energy
@param new_energy
@param temperature
@return
"""
return math.exp((energy - new_energy) / temperature)
def calculate_ber(self, maximum_scheme):
#::TODO:: implementacao (???)
"""
@param maximum_scheme
"""
return "nada"
def head_text(self, number_objectives):
"""
@param number_objectives
@return
"""
if (number_objectives == 3):
return "Score, Power, Modulation"
else:
return "Score, Power, Modulation, Bandwidth, TDD, symbol_rate"
def evaluate(self, data):
"""
@return The solution.
"""
self.w1 = data[self.SIM_ANNEALING_PARAMETERS]['w1']
self.w2 = data[self.SIM_ANNEALING_PARAMETERS]['w2']
self.w3 = data[self.SIM_ANNEALING_PARAMETERS]['w3']
self.w4 = data[self.SIM_ANNEALING_PARAMETERS]['w4']
self.w5 = data[self.SIM_ANNEALING_PARAMETERS]['w5']
self.w6 = data[self.SIM_ANNEALING_PARAMETERS]['w6']
self.temperature = data[self.SIM_ANNEALING_PARAMETERS]['temperature']
self.min_bandwidth = data[self.SIM_ANNEALING_PARAMETERS]['min_bandwidth']
self.max_bandwidth = data[self.SIM_ANNEALING_PARAMETERS]['max_bandwidth']
self.min_power = data[self.SIM_ANNEALING_PARAMETERS]['min_power']
self.max_power = data[self.SIM_ANNEALING_PARAMETERS]['max_power']
self.maximum_scheme = data[self.SIM_ANNEALING_PARAMETERS]['maximum_scheme']
self.maximum_modulation_index = data[self.SIM_ANNEALING_PARAMETERS]['maximum_modulation_index']
self.min_symbol_rate = data[self.SIM_ANNEALING_PARAMETERS]['min_symbol_rate']
self.max_symbol_rate = data[self.SIM_ANNEALING_PARAMETERS]['max_symbol_rate']
self.min_tdd = data[self.SIM_ANNEALING_PARAMETERS]['min_tdd']
self.max_tdd = data[self.SIM_ANNEALING_PARAMETERS]['max_tdd']
self.min_pbe = data[self.SIM_ANNEALING_PARAMETERS]['min_pbe']
self.max_pbe = data[self.SIM_ANNEALING_PARAMETERS]['max_pbe']
self.solution = -1
# Chance of changing a value in simulated
self.chance = 0.05
self.pu_rate = data[self.SIM_ANNEALING_PARAMETERS]['pu_rate']
Tk = self.temperature
# -----Initial solution -> Random solution
power = random.uniform(self.min_power, self.max_power)
sol_power = power
modulation_index = random.randint(0, self.maximum_modulation_index)
maximum_scheme = self.maximum_scheme[modulation_index]
sol_modulation = maximum_scheme
bandwidth = random.uniform(self.min_bandwidth, self.max_bandwidth)
sol_bandwidth = bandwidth
tdd = random.uniform(self.min_tdd, self.max_tdd)
sol_tdd = tdd
symbol_rate = random.uniform(self.min_symbol_rate, self.max_symbol_rate)
sol_symbol_rate = symbol_rate
pbe = random.uniform(self.min_pbe, self.max_pbe)
sol_pbe = pbe
solution = self.five_objective(bandwidth, power, modulation_index, symbol_rate, tdd, pbe)
#-----
# Control of number of iterations
k = 0
sign = True
while (Tk > 0.1):
# T value for each iteration Paper Szu
Tk = self.temperature / (k + 1.0)
# Choosing if the next parameter will be modified
choose_parameter = random.random()
# Transmit power
if (choose_parameter <= self.chance):
ntk = self.normalize(Tk, 0, self.temperature, self.min_power, self.max_power)
if (sign):
power = sol_power + ntk
else:
power = sol_power - ntk
# Power must not exceed min_power and max_power
if (power > self.max_power):
power = self.max_power
elif (power < self.min_power):
power = self.min_power
# Choosing if the next parameter will be modified
choose_parameter = random.random()
# Transmit power
if (choose_parameter <= self.chance):
ntk = self.normalize(Tk, 0, self.temperature, self.min_pbe, self.max_pbe)
if (sign):
pbe = sol_pbe + ntk
else:
pbe = sol_pbe - ntk
# Power must not exceed min_power and max_power
if (pbe > self.max_pbe):
pbe = self.max_pbe
elif (pbe < self.min_pbe):
pbe = self.min_pbe
# Choosing if the next parameter will be modified
choose_parameter = random.random()
# Modulation Scheme
if (choose_parameter <= self.chance):
# Circular list of modulations
if (not (sign)):
if (modulation_index == 0):
modulation_index = self.maximum_modulation_index
else:
modulation_index -= 1
else:
if (modulation_index == self.maximum_modulation_index):
modulation_index = 0
else:
modulation_index += 1
maximum_scheme = self.maximum_scheme[modulation_index]
# Choosing if the next parameter will be modified
choose_parameter = random.random()
# Bandwidth
if (choose_parameter <= self.chance):
ntk = self.normalize(Tk, 0, self.temperature, self.min_bandwidth, self.max_bandwidth)
if (sign):
bandwidth = sol_bandwidth + ntk
else:
bandwidth = sol_bandwidth - ntk
# Bandwidth must not exceed min_bandwidth and max_bandwidth
if (bandwidth > self.max_bandwidth):
bandwidth = self.max_bandwidth
elif (bandwidth < self.min_bandwidth):
bandwidth = self.min_bandwidth
choose_parameter = random.random()
# TDD
if (choose_parameter <= self.chance):
ntk = self.normalize(Tk, 0, self.temperature, self.min_tdd, self.max_tdd)
if (sign):
tdd = sol_tdd + ntk
else:
tdd = sol_tdd - ntk
# TDD must not exceed min_tdd and max_tdd
if (tdd > self.max_tdd):
tdd = self.max_tdd
elif (tdd < self.min_tdd):
tdd = self.min_tdd
choose_parameter = random.random()
# Symbol Rate
if (choose_parameter <= self.chance): #(choose_parameter <= 1.00):
ntk = self.normalize(Tk, 0, self.temperature, self.min_symbol_rate, self.max_symbol_rate)
if (sign):
symbol_rate = sol_symbol_rate + ntk
else:
symbol_rate = sol_symbol_rate - ntk
# symbol_rate must not exceed min_symbol_rate and max_symbol_rate
if (symbol_rate > self.max_symbol_rate):
symbol_rate = self.max_symbol_rate
elif (symbol_rate < self.min_symbol_rate):
symbol_rate = self.min_symbol_rate
# New solution
new_solution = self.five_objective(bandwidth, power, modulation_index, symbol_rate, tdd, pbe)
# New solution is better than the old solution
if (new_solution < solution ):
solution = new_solution
sol_power = power
sol_modulation = modulation_index
sol_bandwidth = bandwidth
sol_tdd = tdd
sol_symbol_rate = symbol_rate
sol_pbe = pbe
if (new_solution > self.solution):
self.solution = new_solution
self.power = power
self.modulation = modulation_index
self.bandwidth = bandwidth
self.TDD = tdd
self.symbol_rate = symbol_rate
self.pbe = sol_pbe
self.throughput = self.max_throughput(modulation_index)
else:
# Probability of solution exchange, even if new solution is worse
p = self.acceptance_probability(solution, new_solution, self.temperature)
gen_prob = random.random()
if (gen_prob < p):
solution = new_solution
# Controlling iteration and temperature
k += 1
#Changing between inc/decrementing vars
sign = not (sign)
return self.solution
def main():
SIM_ANNEALING_PARAMETERS = 14
l = [1, 2]
ch = [1, 2, 3]
pu_rate = [1.0, 0.5, 0.1]
links = [[(1, pu_rate[0]), (2, pu_rate[1]), (3, pu_rate[2])], [(1, pu_rate[0]), (2, pu_rate[1]), (3, pu_rate[2])]]
data = {}
data[SIM_ANNEALING_PARAMETERS] = {}
# Weights for maximizing throughput
data[SIM_ANNEALING_PARAMETERS]['w1'] = 0.05
data[SIM_ANNEALING_PARAMETERS]['w2'] = 0.05
data[SIM_ANNEALING_PARAMETERS]['w3'] = 0.05
data[SIM_ANNEALING_PARAMETERS]['w4'] = 0.05
data[SIM_ANNEALING_PARAMETERS]['w5'] = 0.05
data[SIM_ANNEALING_PARAMETERS]['w6'] = 0.75
# Transmitted power between 0.158 and 251 mW
data[SIM_ANNEALING_PARAMETERS]['min_power'] = 1
data[SIM_ANNEALING_PARAMETERS]['max_power'] = 1
# Modulation Scheme QAM between 2 and 256
# data[SIM_ANNEALING_PARAMETERS]['modulation_scheme'] = [4]
data[SIM_ANNEALING_PARAMETERS]['maximum_scheme'] = [4]
data[SIM_ANNEALING_PARAMETERS]['maximum_modulation_index'] = 0
# Bandwidth between 2 and 32 MHz
data[SIM_ANNEALING_PARAMETERS]['min_bandwidth'] = 200
data[SIM_ANNEALING_PARAMETERS]['max_bandwidth'] = 400
# TDD between
data[SIM_ANNEALING_PARAMETERS]['min_tdd'] = 485
data[SIM_ANNEALING_PARAMETERS]['max_tdd'] = 490
# Symbol Rate between 125kbps and 1Mbps
data[SIM_ANNEALING_PARAMETERS]['min_symbol_rate'] = 125
data[SIM_ANNEALING_PARAMETERS]['max_symbol_rate'] = 1024
# BER
data[SIM_ANNEALING_PARAMETERS]['max_pbe'] = math.pow(10, -6)
data[SIM_ANNEALING_PARAMETERS]['min_pbe'] = math.pow(10, -8)
# Initial temperature
data[SIM_ANNEALING_PARAMETERS]['temperature'] = 1000
# Primary user rate.
data[SIM_ANNEALING_PARAMETERS]['pu_rate'] = pu_rate
# to_sa is a list of list.
# inner list is for each link
# [ [(ch1, pu_rate) (ch2, pu_rate) (ch3, pu_rate)] ]
## links = [[(1, pu_rate[0]), (2, pu_rate[1]), (3, pu_rate[2])], [(1, pu_rate[0]), (2, pu_rate[1]), (3, pu_rate[2])]]
links = [77, 79]
channels = [1, 2, 3]
to_sa = [[(1, 77), (2, 77), (3, 77)], [(1, 79), (2, 79), (3, 79)]]
results = []
for i in range(0, len(to_sa)):
for c in to_sa[i]:
channel = Channel()
data[channel.SIM_ANNEALING_PARAMETERS]['pu_rate'] = c[1]
simu = channel.evaluate(data)
flag = False
j = 0
while flag is False and j < len(results):
if results[j][2] > simu:
results.insert(j, [i, c[0], simu])
flag = True
j = j + 1
if flag is False:
results.append([i, c[0], simu])
d = {}
for l, ch, rw in results:
if l in d or ch in d.itervalues():
pass
else:
d[l] = ch
print "\nLinks = " + str(to_sa)
print "\nResults = " + str(results)
print "\nDict = " + str(d)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import division
import inspect
from decimal import ROUND_DOWN, Decimal
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import F
from django.db.models.signals import class_prepared
from django.utils import translation
from moneyed import Currency, Money
from moneyed.localization import _FORMATTER, format_money
from djmoney import forms
from .._compat import (
BaseExpression,
Expression,
deconstructible,
smart_unicode,
split_expression,
string_types,
)
from ..settings import CURRENCY_CHOICES, DEFAULT_CURRENCY
from ..utils import get_currency_field_name, prepare_expression
# If django-money-rates is installed we can automatically
# perform operations with different currencies
if 'djmoney_rates' in settings.INSTALLED_APPS:
try:
from djmoney_rates.utils import convert_money
AUTO_CONVERT_MONEY = True
except ImportError:
# NOTE. djmoney_rates doesn't support Django 1.9+
AUTO_CONVERT_MONEY = False
else:
AUTO_CONVERT_MONEY = False
__all__ = ('MoneyField', 'NotSupportedLookup')
SUPPORTED_LOOKUPS = ('exact', 'isnull', 'lt', 'gt', 'lte', 'gte')
class NotSupportedLookup(Exception):
def __init__(self, lookup):
self.lookup = lookup
def __str__(self):
return 'Lookup \'%s\' is not supported for MoneyField' % self.lookup
@deconstructible
class MoneyPatched(Money):
# Set to True or False has a higher priority
# than USE_L10N == True in the django settings file.
# The variable "self.use_l10n" has three states:
use_l10n = None
def __float__(self):
return float(self.amount)
def _convert_to_local_currency(self, other):
"""
Converts other Money instances to the local currency
"""
if AUTO_CONVERT_MONEY:
return convert_money(other.amount, other.currency, self.currency)
else:
return other
@classmethod
def _patch_to_current_class(cls, money):
"""
Converts object of type MoneyPatched on the object of type Money.
"""
return cls(money.amount, money.currency)
def __pos__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__pos__())
def __neg__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__neg__())
def __add__(self, other):
other = self._convert_to_local_currency(other)
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__add__(other))
def __sub__(self, other):
other = self._convert_to_local_currency(other)
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__sub__(other))
def __mul__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__mul__(other))
def __eq__(self, other):
if hasattr(other, 'currency'):
if self.currency == other.currency:
return self.amount == other.amount
raise TypeError('Cannot add or subtract two Money ' +
'instances with different currencies.')
return False
def __truediv__(self, other):
if isinstance(other, Money):
return super(MoneyPatched, self).__truediv__(other)
else:
return self._patch_to_current_class(
super(MoneyPatched, self).__truediv__(other))
def __rmod__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__rmod__(other))
def __get_current_locale(self):
# get_language can return None starting on django 1.8
language = translation.get_language() or settings.LANGUAGE_CODE
locale = translation.to_locale(language)
if _FORMATTER.get_formatting_definition(locale):
return locale
if _FORMATTER.get_formatting_definition('%s_%s' % (locale, locale)):
return '%s_%s' % (locale, locale)
return ''
def __use_l10n(self):
"""
Return boolean.
"""
if self.use_l10n is None:
return settings.USE_L10N
return self.use_l10n
def __unicode__(self):
if self.__use_l10n():
locale = self.__get_current_locale()
if locale:
return format_money(self, locale=locale)
return format_money(self)
__str__ = __unicode__
def __repr__(self):
return '%s %s' % (self.amount.to_integral_value(ROUND_DOWN), self.currency)
def get_value(obj, expr):
"""
Extracts value from object or expression.
"""
if isinstance(expr, F):
expr = getattr(obj, expr.name)
elif hasattr(expr, 'value'):
expr = expr.value
return expr
def validate_money_expression(obj, expr):
"""
Money supports different types of expressions, but you can't do following:
- Add or subtract money with not-money
- Any exponentiation
- Any operations with money in different currencies
- Multiplication, division, modulo with money instances on both sides of expression
"""
lhs, rhs = split_expression(expr)
connector = expr.connector
lhs = get_value(obj, lhs)
rhs = get_value(obj, rhs)
if (not isinstance(rhs, Money) and connector in ('+', '-')) or connector == '^':
raise ValidationError('Invalid F expression for MoneyField.', code='invalid')
if isinstance(lhs, Money) and isinstance(rhs, Money):
if connector in ('*', '/', '^', '%%'):
raise ValidationError('Invalid F expression for MoneyField.', code='invalid')
if lhs.currency != rhs.currency:
raise ValidationError('You cannot use F() with different currencies.', code='invalid')
def validate_money_value(value):
"""
Valid value for money are:
- Single numeric value
- Money instances
- Pairs of numeric value and currency. Currency can't be None.
"""
if isinstance(value, (list, tuple)) and (len(value) != 2 or value[1] is None):
raise ValidationError(
'Invalid value for MoneyField: %(value)s.',
code='invalid',
params={'value': value},
)
def get_currency(value):
"""
Extracts currency from value.
"""
if isinstance(value, Money):
return smart_unicode(value.currency)
elif isinstance(value, (list, tuple)):
return value[1]
class MoneyFieldProxy(object):
def __init__(self, field):
self.field = field
self.currency_field_name = get_currency_field_name(self.field.name)
def _money_from_obj(self, obj):
amount = obj.__dict__[self.field.name]
currency = obj.__dict__[self.currency_field_name]
if amount is None:
return None
return MoneyPatched(amount=amount, currency=currency)
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
if isinstance(obj.__dict__[self.field.name], BaseExpression):
return obj.__dict__[self.field.name]
if not isinstance(obj.__dict__[self.field.name], Money):
obj.__dict__[self.field.name] = self._money_from_obj(obj)
return obj.__dict__[self.field.name]
def __set__(self, obj, value): # noqa
if isinstance(value, BaseExpression):
validate_money_expression(obj, value)
prepare_expression(value)
else:
validate_money_value(value)
currency = get_currency(value)
if currency:
self.set_currency(obj, currency)
value = self.field.to_python(value)
obj.__dict__[self.field.name] = value
def set_currency(self, obj, value):
# we have to determine whether to replace the currency.
# i.e. if we do the following:
# .objects.get_or_create(money_currency='EUR')
# then the currency is already set up, before this code hits
# __set__ of MoneyField. This is because the currency field
# has less creation counter than money field.
object_currency = obj.__dict__[self.currency_field_name]
default_currency = str(self.field.default_currency)
if object_currency != value and (object_currency == default_currency or value != default_currency):
# in other words, update the currency only if it wasn't
# changed before.
setattr(obj, self.currency_field_name, value)
class CurrencyField(models.CharField):
description = 'A field which stores currency.'
def __init__(self, price_field=None, verbose_name=None, name=None,
default=DEFAULT_CURRENCY, **kwargs):
if isinstance(default, Currency):
default = default.code
kwargs['max_length'] = 3
self.price_field = price_field
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(CurrencyField, self).__init__(verbose_name, name, default=default,
**kwargs)
def contribute_to_class(self, cls, name):
if not self.frozen_by_south and name not in [f.name for f in cls._meta.fields]:
super(CurrencyField, self).contribute_to_class(cls, name)
class MoneyField(models.DecimalField):
description = 'A field which stores both the currency and amount of money.'
def __init__(self, verbose_name=None, name=None,
max_digits=None, decimal_places=None,
default=None,
default_currency=DEFAULT_CURRENCY,
currency_choices=CURRENCY_CHOICES, **kwargs):
nullable = kwargs.get('null', False)
if default is None and not nullable:
# Backwards compatible fix for non-nullable fields
default = 0.0
if isinstance(default, string_types):
try:
# handle scenario where default is formatted like:
# 'amount currency-code'
amount, currency = default.split(' ')
except ValueError:
# value error would be risen if the default is
# without the currency part, i.e
# 'amount'
amount = default
currency = default_currency
default = Money(Decimal(amount), Currency(code=currency))
elif isinstance(default, (float, Decimal, int)):
default = Money(default, default_currency)
if not (nullable and default is None) and not isinstance(default, Money):
raise Exception(
'default value must be an instance of Money, is: %s' % str(default))
# Avoid giving the user hard-to-debug errors if they miss required attributes
if max_digits is None:
raise Exception(
'You have to provide a max_digits attribute to Money fields.')
if decimal_places is None:
raise Exception(
'You have to provide a decimal_places attribute to Money fields.')
if not default_currency:
default_currency = default.currency
self.default_currency = default_currency
self.currency_choices = currency_choices
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(MoneyField, self).__init__(verbose_name, name, max_digits,
decimal_places, default=default,
**kwargs)
def to_python(self, value):
if isinstance(value, Expression):
return value
if isinstance(value, Money):
value = value.amount
if isinstance(value, tuple):
value = value[0]
if isinstance(value, float):
value = str(value)
return super(MoneyField, self).to_python(value)
def contribute_to_class(self, cls, name):
cls._meta.has_money_field = True
if not self.frozen_by_south:
self.add_currency_field(cls, name)
super(MoneyField, self).contribute_to_class(cls, name)
setattr(cls, self.name, MoneyFieldProxy(self))
def add_currency_field(self, cls, name):
"""
Adds CurrencyField instance to a model class.
"""
currency_field = CurrencyField(
max_length=3, price_field=self,
default=self.default_currency, editable=False,
choices=self.currency_choices
)
currency_field.creation_counter = self.creation_counter
self.creation_counter += 1
currency_field_name = get_currency_field_name(name)
cls.add_to_class(currency_field_name, currency_field)
def get_db_prep_save(self, value, connection):
if isinstance(value, Expression):
return value
if isinstance(value, Money):
value = value.amount
return super(MoneyField, self).get_db_prep_save(value, connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if lookup_type not in SUPPORTED_LOOKUPS:
raise NotSupportedLookup(lookup_type)
value = self.get_db_prep_save(value, connection)
return super(MoneyField, self).get_db_prep_lookup(lookup_type, value, connection, prepared)
def get_default(self):
if isinstance(self.default, Money):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
# We need to return the numerical value if this is called by south
if mod.__name__ == 'south.db.generic':
return float(self.default.amount)
return self.default
else:
return super(MoneyField, self).get_default()
def formfield(self, **kwargs):
defaults = {'form_class': forms.MoneyField}
defaults.update(kwargs)
defaults['currency_choices'] = self.currency_choices
return super(MoneyField, self).formfield(**defaults)
def get_south_default(self):
return '%s' % str(self.default)
def get_south_default_currency(self):
return '"%s"' % str(self.default_currency.code)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
# # South support
def south_field_triple(self):
"""Returns a suitable description of this field for South."""
# Note: This method gets automatically with schemamigration time.
from south.modelsinspector import introspector
field_class = self.__class__.__module__ + '.' + self.__class__.__name__
args, kwargs = introspector(self)
# We need to
# 1. Delete the default, 'cause it's not automatically supported.
kwargs.pop('default')
# 2. add the default currency, because it's not picked up from the inspector automatically.
kwargs['default_currency'] = "'%s'" % self.default_currency
return field_class, args, kwargs
# # Django 1.7 migration support
def deconstruct(self):
name, path, args, kwargs = super(MoneyField, self).deconstruct()
if self.default is not None:
kwargs['default'] = self.default.amount
if self.default_currency != DEFAULT_CURRENCY:
kwargs['default_currency'] = str(self.default_currency)
if self.currency_choices != CURRENCY_CHOICES:
kwargs['currency_choices'] = self.currency_choices
return name, path, args, kwargs
try:
from south.modelsinspector import add_introspection_rules
rules = [
# MoneyField has its own method.
((CurrencyField,),
[], # No positional args
{'default': ('default', {'default': DEFAULT_CURRENCY.code}),
'max_length': ('max_length', {'default': 3})}),
]
# MoneyField implement the serialization in south_field_triple method
add_introspection_rules(rules, ['^djmoney\.models\.fields\.CurrencyField'])
except ImportError:
pass
def patch_managers(sender, **kwargs):
"""
Patches models managers
"""
from .managers import money_manager
if hasattr(sender._meta, 'has_money_field'):
sender.copy_managers([
(_id, name, money_manager(manager))
for _id, name, manager in sender._meta.concrete_managers
])
class_prepared.connect(patch_managers)
|
|
"""Support for LCN covers."""
import pypck
from homeassistant.components.cover import CoverEntity
from homeassistant.const import CONF_ADDRESS
from . import LcnEntity
from .const import CONF_CONNECTIONS, CONF_MOTOR, CONF_REVERSE_TIME, DATA_LCN
from .helpers import get_connection
PARALLEL_UPDATES = 0
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Setups the LCN cover platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_MOTOR] == "OUTPUTS":
devices.append(LcnOutputsCover(config, address_connection))
else: # RELAYS
devices.append(LcnRelayCover(config, address_connection))
async_add_entities(devices)
class LcnOutputsCover(LcnEntity, CoverEntity):
"""Representation of a LCN cover connected to output ports."""
def __init__(self, config, device_connection):
"""Initialize the LCN cover."""
super().__init__(config, device_connection)
self.output_ids = [
pypck.lcn_defs.OutputPort["OUTPUTUP"].value,
pypck.lcn_defs.OutputPort["OUTPUTDOWN"].value,
]
if CONF_REVERSE_TIME in config:
self.reverse_time = pypck.lcn_defs.MotorReverseTime[
config[CONF_REVERSE_TIME]
]
else:
self.reverse_time = None
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.device_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTUP"]
)
await self.device_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTDOWN"]
)
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs):
"""Close the cover."""
state = pypck.lcn_defs.MotorStateModifier.DOWN
if not await self.device_connection.control_motors_outputs(
state, self.reverse_time
):
return
self._is_opening = False
self._is_closing = True
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
state = pypck.lcn_defs.MotorStateModifier.UP
if not await self.device_connection.control_motors_outputs(
state, self.reverse_time
):
return
self._is_closed = False
self._is_opening = True
self._is_closing = False
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
state = pypck.lcn_defs.MotorStateModifier.STOP
if not await self.device_connection.control_motors_outputs(state):
return
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set cover states when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() not in self.output_ids
):
return
if input_obj.get_percent() > 0: # motor is on
if input_obj.get_output_id() == self.output_ids[0]:
self._is_opening = True
self._is_closing = False
else: # self.output_ids[1]
self._is_opening = False
self._is_closing = True
self._is_closed = self._is_closing
else: # motor is off
# cover is assumed to be closed if we were in closing state before
self._is_closed = self._is_closing
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
class LcnRelayCover(LcnEntity, CoverEntity):
"""Representation of a LCN cover connected to relays."""
def __init__(self, config, device_connection):
"""Initialize the LCN cover."""
super().__init__(config, device_connection)
self.motor = pypck.lcn_defs.MotorPort[config[CONF_MOTOR]]
self.motor_port_onoff = self.motor.value * 2
self.motor_port_updown = self.motor_port_onoff + 1
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.device_connection.activate_status_request_handler(self.motor)
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs):
"""Close the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.DOWN
if not await self.device_connection.control_motors_relays(states):
return
self._is_opening = False
self._is_closing = True
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.UP
if not await self.device_connection.control_motors_relays(states):
return
self._is_closed = False
self._is_opening = True
self._is_closing = False
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.STOP
if not await self.device_connection.control_motors_relays(states):
return
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set cover states when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
states = input_obj.states # list of boolean values (relay on/off)
if states[self.motor_port_onoff]: # motor is on
self._is_opening = not states[self.motor_port_updown] # set direction
self._is_closing = states[self.motor_port_updown] # set direction
else: # motor is off
self._is_opening = False
self._is_closing = False
self._is_closed = states[self.motor_port_updown]
self.async_write_ha_state()
|
|
# -*- encoding: utf-8 -*-
import PointDefine_pb2
import ctypes
import json
from flask import abort, make_response
import dicttoxml
import string
class LibConfig(ctypes.Structure):
_fields_=[('kMatchRadius', ctypes.c_int),
('kMatchAngle', ctypes.c_int),
('kMinTimeInterval', ctypes.c_int),
('kMaxTimeInterval', ctypes.c_int),
('libpath', ctypes.c_char * 256)]
def getdict(self):
return dict((f, getattr(self, f)) for f, _ in self._fields_)
class CarGpsData(ctypes.Structure):
_fields_=[('iGpsTime', ctypes.c_int),
('iAzimuth', ctypes.c_int),
('fGpsSpeed', ctypes.c_float),
('dLongitude', ctypes.c_double),
('dLatitude', ctypes.c_double)]
def getdict(self):
return dict((f, getattr(self, f)) for f, _ in self._fields_)
# gps list header
class CarGpsHeader(ctypes.Structure):
_fields_=[('iGPSCount', ctypes.c_int),
('pstGPSData', ctypes.POINTER(CarGpsData))]
class TrafficInfo(ctypes.Structure):
_fields_=[('iTileID', ctypes.c_int),
('iLinkID', ctypes.c_int),
('iLinkDir', ctypes.c_int),
('iLinkDegree', ctypes.c_int),
('iTime', ctypes.c_int),
('IsConnected', ctypes.c_int),
('fLinkLength', ctypes.c_float),
('fPathLength', ctypes.c_float),
('fAverageSpeed', ctypes.c_float),
('fMaxSpeed', ctypes.c_float)]
def getdict(self):
return dict((f, getattr(self, f)) for f, _ in self._fields_)
# Traffic information header
class TrafficInfoHeader(ctypes.Structure):
_fields_=[('iTrafficInfoCount', ctypes.c_int),
('pstTrafficInfoData', ctypes.POINTER(TrafficInfo))]
class TimoLibrary(object):
def __init__(self, config):
timo_library = ctypes.cdll.LoadLibrary(config['TIMO_LIB_PATH'])
if timo_library == None:
raise Exception('TIMO_LIB_PATH library is not found.')
# init api
self.inittimo = timo_library.InitTimo3
self.inittimo.restype = ctypes.c_void_p
self.inittimo.argtypes = [ctypes.POINTER(LibConfig)]
self.ProcessTimo = timo_library.ProcessTimo
self.ProcessTimo.argtypes = [ctypes.c_void_p, ctypes.POINTER(CarGpsHeader), ctypes.POINTER(TrafficInfoHeader)]
self.ProcessTimo.restype = None
self.CleanTimoCache = timo_library.CleanTimoCache
self.CleanTimoCache.argtypes = [ctypes.c_void_p, ctypes.POINTER(TrafficInfoHeader)]
self.CleanTimoCache.restype = None
self.CloseTimo = timo_library.CloseTimo
self.CloseTimo.argtypes = [ctypes.c_void_p]
self.CloseTimo.restype = None
self._timolib = ctypes.c_void_p()
# initialize config
libconfig = LibConfig()
libconfig.kMatchRadius = config['TIMO_LIB_MATCH_RADIUS']
libconfig.kMatchAngle = config['TIMO_LIB_MATCH_ANGLE']
libconfig.kMinTimeInterval = config['TIMO_LIB_MIN_TIMEGAP']
libconfig.kMaxTimeInterval = config['TIMO_LIB_MAX_TIMEGAP']
libconfig.libpath = config['TIMO_LIB_DATA_PATH']
self._timolib = self.inittimo(ctypes.byref(libconfig))
if self._timolib == None:
raise Exception('initial failed.')
def __enter__(self):
return self
def __exit__(self):
self.CloseTimo(ctypes.c_void_p(self._timolib))
def close(self):
self.CloseTimo(ctypes.c_void_p(self._timolib))
def gettrafficinfolist(self, proto):
inputheader = CarGpsHeader()
pointlist_proto = PointDefine_pb2.pointlist()
try:
pointlist_proto.ParseFromString(proto)
except:
abort(make_response("Parse Error", 400))
#initialize gps header
inputheader.iGPSCount = len(pointlist_proto.iGpsTime)
if inputheader.iGPSCount == 0:
abort(make_response("Input Error", 400))
inputheader.pstGPSData = (inputheader.iGPSCount * CarGpsData)()
for i in range(inputheader.iGPSCount):
inputheader.pstGPSData[i].iGpsTime = pointlist_proto.iGpsTime[i]
inputheader.pstGPSData[i].iAzimuth = pointlist_proto.iAzimuth[i]
inputheader.pstGPSData[i].fGpsSpeed = pointlist_proto.fGpsSpeed[i]
inputheader.pstGPSData[i].dLongitude = pointlist_proto.dLongitude[i]
inputheader.pstGPSData[i].dLatitude = pointlist_proto.dLatitude[i]
trafficinfolist = TrafficInfoHeader()
trafficinfolist.iTrafficInfoCount = 0
trafficinfolist.pstTrafficInfoData = None
#process
try:
if self._timolib is not None:
self.ProcessTimo(ctypes.c_void_p(self._timolib), ctypes.byref(inputheader), ctypes.byref(trafficinfolist))
except:
abort(make_response("Process Error", 500))
return trafficinfolist
def decodingdata(self, proto):
trafficinfolist = self.gettrafficinfolist(proto)
#encoding to protobuff
trafficinfoheader_proto = PointDefine_pb2.trafficinfoheader()
for i in range(trafficinfolist.iTrafficInfoCount):
trafficinfoheader_proto.iTileID.append(trafficinfolist.pstTrafficInfoData[i].iTileID)
trafficinfoheader_proto.iLinkID.append(trafficinfolist.pstTrafficInfoData[i].iLinkID)
trafficinfoheader_proto.iLinkDir.append(trafficinfolist.pstTrafficInfoData[i].iLinkDir)
trafficinfoheader_proto.iLinkDegree.append(trafficinfolist.pstTrafficInfoData[i].iLinkDegree)
trafficinfoheader_proto.iTime.append(trafficinfolist.pstTrafficInfoData[i].iTime)
trafficinfoheader_proto.IsConnected.append(trafficinfolist.pstTrafficInfoData[i].IsConnected)
trafficinfoheader_proto.fLinkLength.append(trafficinfolist.pstTrafficInfoData[i].fLinkLength)
trafficinfoheader_proto.fPathLength.append(trafficinfolist.pstTrafficInfoData[i].fPathLength)
trafficinfoheader_proto.fAverageSpeed.append(trafficinfolist.pstTrafficInfoData[i].fAverageSpeed)
trafficinfoheader_proto.fMaxSpeed.append(trafficinfolist.pstTrafficInfoData[i].fMaxSpeed)
#clean cache
try:
if self._timolib is not None:
self.CleanTimoCache(ctypes.c_void_p(self._timolib), ctypes.byref(trafficinfolist))
except:
abort(make_response("Clean Error", 500))
#transform to buff
outputstr = trafficinfoheader_proto.SerializeToString()
return outputstr
def decoding2json(self, proto):
try:
newdata = json.loads(proto)
except:
abort(make_response("Decoding Error", 400))
inputheader = CarGpsHeader()
#initialize gps header
inputheader.iGPSCount = len(newdata)
if inputheader.iGPSCount == 0:
abort(make_response("Input Error", 400))
inputheader.pstGPSData = (inputheader.iGPSCount * CarGpsData)()
try:
for i in range(inputheader.iGPSCount):
inputheader.pstGPSData[i].iGpsTime = string.atoi(newdata[i]['iGpsTime'])
inputheader.pstGPSData[i].iAzimuth = string.atoi(newdata[i]['iAzimuth'])
inputheader.pstGPSData[i].fGpsSpeed = string.atof(newdata[i]['fGpsSpeed'])
inputheader.pstGPSData[i].dLongitude = string.atof(newdata[i]['dLongitude'])
inputheader.pstGPSData[i].dLatitude = string.atof(newdata[i]['dLatitude'])
except:
abort(make_response("Input Error", 500))
trafficinfolist = TrafficInfoHeader()
trafficinfolist.iTrafficInfoCount = 0
trafficinfolist.pstTrafficInfoData = None
#process
try:
if self._timolib is not None:
self.ProcessTimo(ctypes.c_void_p(self._timolib), ctypes.byref(inputheader), ctypes.byref(trafficinfolist))
except:
abort(make_response("Process Error", 500))
#encoding to json
jsonlist = []
for i in range(trafficinfolist.iTrafficInfoCount):
jsonlist.append(trafficinfolist.pstTrafficInfoData[i].getdict())
#clean cache
try:
if self._timolib is not None:
self.CleanTimoCache(ctypes.c_void_p(self._timolib), ctypes.byref(trafficinfolist))
except:
abort(make_response("Clean Error", 500))
return json.dumps(jsonlist)
def decoding2xml(self, proto):
trafficinfolist = self.gettrafficinfolist(proto)
jsonlist = []
for i in range(trafficinfolist.iTrafficInfoCount):
jsonlist.append(trafficinfolist.pstTrafficInfoData[i].getdict())
#clean cache
try:
if self._timolib is not None:
self.CleanTimoCache(ctypes.c_void_p(self._timolib), ctypes.byref(trafficinfolist))
except:
abort(make_response("Clean Error", 500))
xml = dicttoxml.dicttoxml(jsonlist)
return xml
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
from tvm import te, tir
from ..te import hybrid
from .scan import cumsum
from .sort import sort, argsort
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
with ib.for_range(0, data.shape[0], kind="parallel") as i:
with ib.if_scope(i == 0):
output_ptr[0] = 0
with ib.else_scope():
output_ptr[i] = tir.Cast(output.dtype, binop(data_ptr[i], data_ptr[i - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_cpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
output[0] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
data_length = data.shape[0]
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
with ib.if_scope(i > 0):
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_seq_indices_ptr[inc_scan_ptr[i] - 1] = i
with ib.new_scope():
with ib.for_range(0, num_unique, kind="parallel") as i:
unique_idx = i if not index_converter_ptr else index_converter_ptr[i]
with ib.if_scope(i == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[i]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[i] - unique_seq_indices_ptr[i - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
data_idx = argsorted_indices_ptr[i]
unique_idx = (
inc_scan_ptr[i] if not index_converter_ptr else index_converter_ptr[inc_scan_ptr[i]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(i == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
@hybrid.script
def _calc_first_occurence(argsorted_indices, inc_scan):
"""Hybrid script to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : tvm.te.Tensor
A tensor that stores the argsorted indices of the input data.
inc_scan : tvm.te.Tensor
A tensor that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : tvm.te.Tensor
A tensor that stores the first occurence of each unique element in the input data.
"""
first_occurence = output_tensor(argsorted_indices.shape, "int32")
for i in parallel(argsorted_indices.shape[0]):
first_occurence[i] = argsorted_indices.shape[0]
for i in parallel(argsorted_indices.shape[0]):
if i == 0 or inc_scan[i] != inc_scan[i - 1]:
first_occurence[inc_scan[i]] = argsorted_indices[i]
return first_occurence
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in
the unique array. (Note that inverse_indices is very similar to indices if output is not
sorted.)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [2, 3, 4, 0, 1, _, _, _]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, "int32", tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# prepare outputs
if return_counts:
out_data_shape = [data.shape] * 3
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
first_occurence = _calc_first_occurence(argsorted_indices, inc_scan)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
in_data = [data, argsorted_indices, inc_scan, index_converter]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
# First occurence is in order of sorted unique output, if we sort the first_occurence array
# we get the correct result
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
name="_calc_unique",
tag="_calc_unique_cpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
|
|
"""Compat module to handle files security on Windows and Linux"""
from __future__ import absolute_import
import errno
import os # pylint: disable=os-module-forbidden
import stat
import sys
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
try:
import ntsecuritycon
import pywintypes
import win32api
import win32con
import win32file
import win32security
import winerror
except ImportError:
POSIX_MODE = True
else:
POSIX_MODE = False
# Windows umask implementation, since Windows does not have a concept of umask by default.
# We choose 022 as initial value since it is the default one on most Linux distributions, and
# it is a decent choice to not have write permissions for group owner and everybody by default.
# We use a class here to avoid needing to define a global variable, and the potential mistakes
# that could happen with this kind of pattern.
class _WindowsUmask:
"""Store the current umask to apply on Windows"""
def __init__(self) -> None:
self.mask = 0o022
_WINDOWS_UMASK = _WindowsUmask()
def chmod(file_path: str, mode: int) -> None:
"""
Apply a POSIX mode on given file_path:
- for Linux, the POSIX mode will be directly applied using chmod,
- for Windows, the POSIX mode will be translated into a Windows DACL that make sense for
Certbot context, and applied to the file using kernel calls.
The definition of the Windows DACL that correspond to a POSIX mode, in the context of Certbot,
is explained at https://github.com/certbot/certbot/issues/6356 and is implemented by the
method `_generate_windows_flags()`.
:param str file_path: Path of the file
:param int mode: POSIX mode to apply
"""
if POSIX_MODE:
os.chmod(file_path, mode)
else:
_apply_win_mode(file_path, mode)
def umask(mask: int) -> int:
"""
Set the current numeric umask and return the previous umask. On Linux, the built-in umask
method is used. On Windows, our Certbot-side implementation is used.
:param int mask: The user file-creation mode mask to apply.
:rtype: int
:return: The previous umask value.
"""
if POSIX_MODE:
return os.umask(mask)
previous_umask = _WINDOWS_UMASK.mask
_WINDOWS_UMASK.mask = mask
return previous_umask
# One could ask why there is no copy_ownership() function, or even a reimplementation
# of os.chown() that would modify the ownership of file without touching the mode itself.
# This is because on Windows, it would require recalculating the existing DACL against
# the new owner, since the DACL is composed of ACEs that targets a specific user, not dynamically
# the current owner of a file. This action would be necessary to keep consistency between
# the POSIX mode applied to the file and the current owner of this file.
# Since copying and editing arbitrary DACL is very difficult, and since we actually know
# the mode to apply at the time the owner of a file should change, it is easier to just
# change the owner, then reapply the known mode, as copy_ownership_and_apply_mode() does.
def copy_ownership_and_apply_mode(src: str, dst: str, mode: int,
copy_user: bool, copy_group: bool) -> None:
"""
Copy ownership (user and optionally group on Linux) from the source to the
destination, then apply given mode in compatible way for Linux and Windows.
This replaces the os.chown command.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param int mode: Permission mode to apply on the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
# On Windows, os.chown does not exist. This is checked through POSIX_MODE value,
# but MyPy/PyLint does not know it and raises an error here on Windows.
# We disable specifically the check to fix the issue.
os.chown(dst, user_id, group_id)
elif copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
chmod(dst, mode)
# Quite similar to copy_ownership_and_apply_mode, but this time the DACL is copied from
# the source file on Windows. The DACL stays consistent with the dynamic rights of the
# equivalent POSIX mode, because ownership and mode are copied altogether on the destination
# file, so no recomputing of the DACL against the new owner is needed, as it would be
# for a copy_ownership alone method.
def copy_ownership_and_mode(src: str, dst: str,
copy_user: bool = True, copy_group: bool = True) -> None:
"""
Copy ownership (user and optionally group on Linux) and mode/DACL
from the source to the destination.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
# On Linux, we just delegate to chown and chmod.
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
os.chown(dst, user_id, group_id)
chmod(dst, stats.st_mode)
else:
if copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
_copy_win_mode(src, dst)
def check_mode(file_path: str, mode: int) -> bool:
"""
Check if the given mode matches the permissions of the given file.
On Linux, will make a direct comparison, on Windows, mode will be compared against
the security model.
:param str file_path: Path of the file
:param int mode: POSIX mode to test
:rtype: bool
:return: True if the POSIX mode matches the file permissions
"""
if POSIX_MODE:
return stat.S_IMODE(os.stat(file_path).st_mode) == mode
return _check_win_mode(file_path, mode)
def check_owner(file_path: str) -> bool:
"""
Check if given file is owned by current user.
:param str file_path: File path to check
:rtype: bool
:return: True if given file is owned by current user, False otherwise.
"""
if POSIX_MODE:
return os.stat(file_path).st_uid == os.getuid()
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# Compare sids
return _get_current_user() == user
def check_permissions(file_path: str, mode: int) -> bool:
"""
Check if given file has the given mode and is owned by current user.
:param str file_path: File path to check
:param int mode: POSIX mode to check
:rtype: bool
:return: True if file has correct mode and owner, False otherwise.
"""
return check_owner(file_path) and check_mode(file_path, mode)
def open(file_path: str, flags: int, mode: int = 0o777) -> int: # pylint: disable=redefined-builtin
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
:raise: OSError(errno.EEXIST) if the file already exists and os.O_CREAT & os.O_EXCL are set,
OSError(errno.EACCES) on Windows if the file already exists and is a directory, and
os.O_CREAT is set.
"""
if POSIX_MODE:
# On Linux, invoke os.open directly.
return os.open(file_path, flags, mode)
# Windows: handle creation of the file atomically with proper permissions.
if flags & os.O_CREAT:
# If os.O_EXCL is set, we will use the "CREATE_NEW", that will raise an exception if
# file exists, matching the API contract of this bit flag. Otherwise, we use
# "CREATE_ALWAYS" that will always create the file whether it exists or not.
disposition = win32con.CREATE_NEW if flags & os.O_EXCL else win32con.CREATE_ALWAYS
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
# We set second parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptorowner # pylint: disable=line-too-long
security.SetSecurityDescriptorOwner(user, 0)
# We set first parameter to 1 (`True`) to say that this security descriptor contains
# a DACL. Otherwise second and third parameters are ignored.
# We set third parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptordacl # pylint: disable=line-too-long
security.SetSecurityDescriptorDacl(1, dacl, 0)
handle = None
try:
handle = win32file.CreateFile(file_path, win32file.GENERIC_READ,
win32file.FILE_SHARE_READ & win32file.FILE_SHARE_WRITE,
attributes, disposition, 0, None)
except pywintypes.error as err:
# Handle native windows errors into python errors to be consistent with the API
# of os.open in the situation of a file already existing or locked.
if err.winerror == winerror.ERROR_FILE_EXISTS:
raise OSError(errno.EEXIST, err.strerror)
if err.winerror == winerror.ERROR_SHARING_VIOLATION:
raise OSError(errno.EACCES, err.strerror)
raise err
finally:
if handle:
handle.Close()
# At this point, the file that did not exist has been created with proper permissions,
# so os.O_CREAT and os.O_EXCL are not needed anymore. We remove them from the flags to
# avoid a FileExists exception before calling os.open.
return os.open(file_path, flags ^ os.O_CREAT ^ os.O_EXCL)
# Windows: general case, we call os.open, let exceptions be thrown, then chmod if all is fine.
handle = os.open(file_path, flags)
chmod(file_path, mode)
return handle
def makedirs(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.makedirs function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on leaf directory when created, Python defaults
will be applied if ``None``
"""
current_umask = umask(0)
try:
# Since Python 3.7, os.makedirs does not set the given mode to the intermediate
# directories that could be created in the process. To keep things safe and consistent
# on all Python versions, we set the umask accordingly to have all directories
# (intermediate and leaf) created with the given mode.
umask(current_umask | 0o777 ^ mode)
if POSIX_MODE:
return os.makedirs(file_path, mode)
orig_mkdir_fn = os.mkdir
try:
# As we know that os.mkdir is called internally by os.makedirs, we will swap the
# function in os module for the time of makedirs execution on Windows.
os.mkdir = mkdir # type: ignore
return os.makedirs(file_path, mode)
finally:
os.mkdir = orig_mkdir_fn
finally:
umask(current_umask)
def mkdir(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.mkdir function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on directory when created, Python defaults
will be applied if ``None``
"""
if POSIX_MODE:
return os.mkdir(file_path, mode)
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
security.SetSecurityDescriptorOwner(user, False)
security.SetSecurityDescriptorDacl(1, dacl, 0)
try:
win32file.CreateDirectory(file_path, attributes)
except pywintypes.error as err:
# Handle native windows error into python error to be consistent with the API
# of os.mkdir in the situation of a directory already existing.
if err.winerror == winerror.ERROR_ALREADY_EXISTS:
raise OSError(errno.EEXIST, err.strerror, file_path, err.winerror)
raise err
return None
def replace(src: str, dst: str) -> None:
"""
Rename a file to a destination path and handles situations where the destination exists.
:param str src: The current file path.
:param str dst: The new file path.
"""
if hasattr(os, 'replace'):
# Use replace if possible. Since we don't support Python 2 on Windows
# and os.replace() was added in Python 3.3, we can assume that
# os.replace() is always available on Windows.
getattr(os, 'replace')(src, dst)
else:
# Otherwise, use os.rename() that behaves like os.replace() on Linux.
os.rename(src, dst)
def realpath(file_path: str) -> str:
"""
Find the real path for the given path. This method resolves symlinks, including
recursive symlinks, and is protected against symlinks that creates an infinite loop.
:param str file_path: The path to resolve
:returns: The real path for the given path
:rtype: str
"""
original_path = file_path
# Since Python 3.8, os.path.realpath also resolves symlinks on Windows.
if POSIX_MODE or sys.version_info >= (3, 8):
path = os.path.realpath(file_path)
if os.path.islink(path):
# If path returned by realpath is still a link, it means that it failed to
# resolve the symlink because of a loop.
# See realpath code: https://github.com/python/cpython/blob/master/Lib/posixpath.py
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
return path
inspected_paths: List[str] = []
while os.path.islink(file_path):
link_path = file_path
file_path = os.readlink(file_path)
if not os.path.isabs(file_path):
file_path = os.path.join(os.path.dirname(link_path), file_path)
if file_path in inspected_paths:
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
inspected_paths.append(file_path)
return os.path.abspath(file_path)
def readlink(link_path: str) -> str:
"""
Return a string representing the path to which the symbolic link points.
:param str link_path: The symlink path to resolve
:return: The path the symlink points to
:returns: str
:raise: ValueError if a long path (260> characters) is encountered on Windows
"""
path = os.readlink(link_path)
if POSIX_MODE or not path.startswith('\\\\?\\'):
return path
# At this point, we know we are on Windows and that the path returned uses
# the extended form which is done for all paths in Python 3.8+
# Max length of a normal path is 260 characters on Windows, including the non printable
# termination character "<NUL>". The termination character is not included in Python
# strings, giving a max length of 259 characters, + 4 characters for the extended form
# prefix, to an effective max length 263 characters on a string representing a normal path.
if len(path) < 264:
return path[4:]
raise ValueError("Long paths are not supported by Certbot on Windows.")
# On Windows is_executable run from an unprivileged shell may claim that a path is
# executable when it is executable only if run from a privileged shell. This result
# is due to the fact that GetEffectiveRightsFromAcl calculate effective rights
# without taking into consideration if the target user has currently required the
# elevated privileges or not. However this is not a problem since certbot always
# requires to be run under a privileged shell, so the user will always benefit
# from the highest (privileged one) set of permissions on a given file.
def is_executable(path: str) -> bool:
"""
Is path an executable file?
:param str path: path to test
:return: True if path is an executable file
:rtype: bool
"""
if POSIX_MODE:
return os.path.isfile(path) and os.access(path, os.X_OK)
return _win_is_executable(path)
def has_world_permissions(path: str) -> bool:
"""
Check if everybody/world has any right (read/write/execute) on a file given its path.
:param str path: path to test
:return: True if everybody/world has any right to the file
:rtype: bool
"""
if POSIX_MODE:
return bool(stat.S_IMODE(os.stat(path).st_mode) & stat.S_IRWXO)
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
return bool(dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': win32security.ConvertStringSidToSid('S-1-1-0'),
}))
def compute_private_key_mode(old_key: str, base_mode: int) -> int:
"""
Calculate the POSIX mode to apply to a private key given the previous private key.
:param str old_key: path to the previous private key
:param int base_mode: the minimum modes to apply to a private key
:return: the POSIX mode to apply
:rtype: int
"""
if POSIX_MODE:
# On Linux, we keep read/write/execute permissions
# for group and read permissions for everybody.
old_mode = (stat.S_IMODE(os.stat(old_key).st_mode) &
(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH))
return base_mode | old_mode
# On Windows, the mode returned by os.stat is not reliable,
# so we do not keep any permission from the previous private key.
return base_mode
def has_same_ownership(path1: str, path2: str) -> bool:
"""
Return True if the ownership of two files given their respective path is the same.
On Windows, ownership is checked against owner only, since files do not have a group owner.
:param str path1: path to the first file
:param str path2: path to the second file
:return: True if both files have the same ownership, False otherwise
:rtype: bool
"""
if POSIX_MODE:
stats1 = os.stat(path1)
stats2 = os.stat(path2)
return (stats1.st_uid, stats1.st_gid) == (stats2.st_uid, stats2.st_gid)
security1 = win32security.GetFileSecurity(path1, win32security.OWNER_SECURITY_INFORMATION)
user1 = security1.GetSecurityDescriptorOwner()
security2 = win32security.GetFileSecurity(path2, win32security.OWNER_SECURITY_INFORMATION)
user2 = security2.GetSecurityDescriptorOwner()
return user1 == user2
def has_min_permissions(path: str, min_mode: int) -> bool:
"""
Check if a file given its path has at least the permissions defined by the given minimal mode.
On Windows, group permissions are ignored since files do not have a group owner.
:param str path: path to the file to check
:param int min_mode: the minimal permissions expected
:return: True if the file matches the minimal permissions expectations, False otherwise
:rtype: bool
"""
if POSIX_MODE:
st_mode = os.stat(path).st_mode
return st_mode == st_mode | min_mode
# Resolve symlinks, to get a consistent result with os.stat on Linux,
# that follows symlinks by default.
path = realpath(path)
# Get owner sid of the file
security = win32security.GetFileSecurity(
path, win32security.OWNER_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
dacl = security.GetSecurityDescriptorDacl()
min_dacl = _generate_dacl(user, min_mode)
for index in range(min_dacl.GetAceCount()):
min_ace = min_dacl.GetAce(index)
# On a given ACE, index 0 is the ACE type, 1 is the permission mask, and 2 is the SID.
# See: http://timgolden.me.uk/pywin32-docs/PyACL__GetAce_meth.html
mask = min_ace[1]
user = min_ace[2]
effective_mask = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': user,
})
if effective_mask != effective_mask | mask:
return False
return True
def _win_is_executable(path: str) -> bool:
if not os.path.isfile(path):
return False
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
mode = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': _get_current_user(),
})
return mode & ntsecuritycon.FILE_GENERIC_EXECUTE == ntsecuritycon.FILE_GENERIC_EXECUTE
def _apply_win_mode(file_path: str, mode: int) -> None:
"""
This function converts the given POSIX mode into a Windows ACL list, and applies it to the
file given its path. If the given path is a symbolic link, it will resolved to apply the
mode on the targeted file.
"""
file_path = realpath(file_path)
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# New DACL, that will overwrite existing one (including inherited permissions)
dacl = _generate_dacl(user, mode)
# Apply the new DACL
security.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(file_path, win32security.DACL_SECURITY_INFORMATION, security)
def _generate_dacl(user_sid: Any, mode: int, mask: Optional[int] = None) -> Any:
if mask:
mode = mode & (0o777 - mask)
analysis = _analyze_mode(mode)
# Get standard accounts from "well-known" sid
# See the list here:
# https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
system = win32security.ConvertStringSidToSid('S-1-5-18')
admins = win32security.ConvertStringSidToSid('S-1-5-32-544')
everyone = win32security.ConvertStringSidToSid('S-1-1-0')
# New dacl, without inherited permissions
dacl = win32security.ACL()
# If user is already system or admins, any ACE defined here would be superseded by
# the full control ACE that will be added after.
if user_sid not in [system, admins]:
# Handle user rights
user_flags = _generate_windows_flags(analysis['user'])
if user_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, user_flags, user_sid)
# Handle everybody rights
everybody_flags = _generate_windows_flags(analysis['all'])
if everybody_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, everybody_flags, everyone)
# Handle administrator rights
full_permissions = _generate_windows_flags({'read': True, 'write': True, 'execute': True})
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, system)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, admins)
return dacl
def _analyze_mode(mode: int) -> Dict[str, Dict[str, int]]:
return {
'user': {
'read': mode & stat.S_IRUSR,
'write': mode & stat.S_IWUSR,
'execute': mode & stat.S_IXUSR,
},
'all': {
'read': mode & stat.S_IROTH,
'write': mode & stat.S_IWOTH,
'execute': mode & stat.S_IXOTH,
},
}
def _copy_win_ownership(src: str, dst: str) -> None:
# Resolve symbolic links
src = realpath(src)
security_src = win32security.GetFileSecurity(src, win32security.OWNER_SECURITY_INFORMATION)
user_src = security_src.GetSecurityDescriptorOwner()
security_dst = win32security.GetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION)
# Second parameter indicates, if `False`, that the owner of the file is not provided by some
# default mechanism, but is explicitly set instead. This is obviously what we are doing here.
security_dst.SetSecurityDescriptorOwner(user_src, False)
win32security.SetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION, security_dst)
def _copy_win_mode(src: str, dst: str) -> None:
# Resolve symbolic links
src = realpath(src)
# Copy the DACL from src to dst.
security_src = win32security.GetFileSecurity(src, win32security.DACL_SECURITY_INFORMATION)
dacl = security_src.GetSecurityDescriptorDacl()
security_dst = win32security.GetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION)
security_dst.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION, security_dst)
def _generate_windows_flags(rights_desc: Dict[str, int]) -> int:
# Some notes about how each POSIX right is interpreted.
#
# For the rights read and execute, we have a pretty bijective relation between
# POSIX flags and their generic counterparts on Windows, so we use them directly
# (respectively ntsecuritycon.FILE_GENERIC_READ and ntsecuritycon.FILE_GENERIC_EXECUTE).
#
# But ntsecuritycon.FILE_GENERIC_WRITE does not correspond to what one could expect from a
# write access on Linux: for Windows, FILE_GENERIC_WRITE does not include delete, move or
# rename. This is something that requires ntsecuritycon.FILE_ALL_ACCESS.
# So to reproduce the write right as POSIX, we will apply ntsecuritycon.FILE_ALL_ACCESS
# subtracted of the rights corresponding to POSIX read and POSIX execute.
#
# Finally, having read + write + execute gives a ntsecuritycon.FILE_ALL_ACCESS,
# so a "Full Control" on the file.
#
# A complete list of the rights defined on NTFS can be found here:
# https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2003/cc783530(v=ws.10)#permissions-for-files-and-folders
flag = 0
if rights_desc['read']:
flag = flag | ntsecuritycon.FILE_GENERIC_READ
if rights_desc['write']:
flag = flag | (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE)
if rights_desc['execute']:
flag = flag | ntsecuritycon.FILE_GENERIC_EXECUTE
return flag
def _check_win_mode(file_path: str, mode: int) -> bool:
# Resolve symbolic links
file_path = realpath(file_path)
# Get current dacl file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION
| win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
# Get current file owner sid
user = security.GetSecurityDescriptorOwner()
if not dacl:
# No DACL means full control to everyone
# This is not a deterministic permissions set.
return False
# Calculate the target dacl
ref_dacl = _generate_dacl(user, mode)
return _compare_dacls(dacl, ref_dacl)
def _compare_dacls(dacl1: Any, dacl2: Any) -> bool:
"""
This method compare the two given DACLs to check if they are identical.
Identical means here that they contains the same set of ACEs in the same order.
"""
return ([dacl1.GetAce(index) for index in range(dacl1.GetAceCount())] ==
[dacl2.GetAce(index) for index in range(dacl2.GetAceCount())])
def _get_current_user() -> Any:
"""
Return the pySID corresponding to the current user.
"""
# We craft the account_name ourselves instead of calling for instance win32api.GetUserNameEx,
# because this function returns nonsense values when Certbot is run under NT AUTHORITY\SYSTEM.
# To run Certbot under NT AUTHORITY\SYSTEM, you can open a shell using the instructions here:
# https://blogs.technet.microsoft.com/ben_parker/2010/10/27/how-do-i-run-powershell-execommand-prompt-as-the-localsystem-account-on-windows-7/
account_name = r"{0}\{1}".format(win32api.GetDomainName(), win32api.GetUserName())
# LookupAccountName() expects the system name as first parameter. By passing None to it,
# we instruct Windows to first search the matching account in the machine local accounts,
# then into the primary domain accounts, if the machine has joined a domain, then finally
# into the trusted domains accounts. This is the preferred lookup mechanism to use in Windows
# if there is no reason to use a specific lookup mechanism.
# See https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-lookupaccountnamea
return win32security.LookupAccountName(None, account_name)[0]
|
|
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from mistral_lib import actions as actions_base
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.services import workflows as wf_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WF = """
---
version: '2.0'
wf:
input:
- workflow_input: '__WORKFLOW_INPUT__'
- action_output_length: 0
- action_output_dict: false
- action_error: false
tasks:
task1:
action: my_action
input:
input: '__ACTION_INPUT__'
output_length: <% $.action_output_length %>
output_dict: <% $.action_output_dict %>
error: <% $.action_error %>
publish:
p_var: '__TASK_PUBLISHED__'
"""
class MyAction(actions_base.Action):
def __init__(self, input, output_length, output_dict=False, error=False):
self.input = input
self.output_length = output_length
self.output_dict = output_dict
self.error = error
def run(self, context):
if not self.output_dict:
result = ''.join('A' for _ in range(self.output_length))
else:
result = {}
for i in range(self.output_length):
result[i] = 'A'
if not self.error:
return actions_base.Result(data=result)
else:
return actions_base.Result(error=result)
def test(self):
raise NotImplementedError
def generate_workflow(tokens):
new_wf = WF
long_string = ''.join('A' for _ in range(1024))
for token in tokens:
new_wf = new_wf.replace(token, long_string)
return new_wf
class ExecutionFieldsSizeLimitTest(base.EngineTestCase):
def setUp(self):
"""Resets the size limit config between tests"""
super(ExecutionFieldsSizeLimitTest, self).setUp()
cfg.CONF.set_default(
'execution_field_size_limit_kb',
0,
group='engine'
)
self.register_action_class('my_action', MyAction)
def tearDown(self):
"""Restores the size limit config to default"""
super(ExecutionFieldsSizeLimitTest, self).tearDown()
cfg.CONF.set_default(
'execution_field_size_limit_kb',
1024,
group='engine'
)
def test_default_limit(self):
cfg.CONF.set_default(
'execution_field_size_limit_kb',
-1,
group='engine'
)
new_wf = generate_workflow(
['__ACTION_INPUT_', '__WORKFLOW_INPUT__', '__TASK_PUBLISHED__']
)
wf_service.create_workflows(new_wf)
# Start workflow.
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_success(wf_ex.id)
def test_workflow_input_default_value_limit(self):
new_wf = generate_workflow(['__WORKFLOW_INPUT__'])
wf_service.create_workflows(new_wf)
# Start workflow.
e = self.assertRaises(
exc.SizeLimitExceededException,
self.engine.start_workflow,
'wf'
)
self.assertEqual(
'Field size limit exceeded'
' [class=TaskExecution, field=input, size=1KB, limit=0KB]',
str(e)
)
def test_workflow_input_limit(self):
wf_service.create_workflows(WF)
# Start workflow.
e = self.assertRaises(
exc.SizeLimitExceededException,
self.engine.start_workflow,
'wf',
wf_input={'workflow_input': ''.join('A' for _ in range(1024))}
)
self.assertEqual(
'Field size limit exceeded'
' [class=TaskExecution, field=input, size=1KB, limit=0KB]',
str(e)
)
def test_action_input_limit(self):
new_wf = generate_workflow(['__ACTION_INPUT__'])
wf_service.create_workflows(new_wf)
# Start workflow.
wf_ex = self.engine.start_workflow('wf')
self.assertEqual(states.ERROR, wf_ex.state)
self.assertIn(
"Field size limit exceeded"
" [class=TaskExecution, field=input, size=1KB, limit=0KB]",
wf_ex.state_info
)
def test_action_output_limit(self):
wf_service.create_workflows(WF)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={'action_output_length': 1024}
)
self.await_workflow_error(wf_ex.id)
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertIn(
'Field size limit exceeded'
' [class=TaskExecution, field=output, size=1KB, limit=0KB]',
wf_ex.state_info
)
self.assertEqual(states.ERROR, wf_ex.state)
def test_task_published_limit(self):
new_wf = generate_workflow(['__TASK_PUBLISHED__'])
wf_service.create_workflows(new_wf)
# Start workflow.
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
# Note: We need to reread execution to access related tasks.
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertIn(
'Failed to handle action completion [error=Field size',
wf_ex.state_info
)
self.assertIn('wf=wf, task=task1', wf_ex.state_info)
task_ex = self._assert_single_item(task_execs, name='task1')
self.assertIn(
'Field size limit exceeded'
' [class=TaskExecution, field=published, size=1KB, limit=0KB]',
task_ex.state_info
)
def test_workflow_params_limit(self):
wf_service.create_workflows(WF)
# Start workflow.
long_string = ''.join('A' for _ in range(1024))
e = self.assertRaises(
exc.SizeLimitExceededException,
self.engine.start_workflow,
'wf',
env={'param': long_string}
)
self.assertIn(
'Field size limit exceeded'
' [class=TaskExecution, field=params, size=1KB, limit=0KB]',
str(e)
)
def test_task_execution_state_info_trimmed(self):
# No limit on output, input and other JSON fields.
cfg.CONF.set_default(
'execution_field_size_limit_kb',
-1,
group='engine'
)
wf_service.create_workflows(WF)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'action_output_length': 80000,
'action_output_dict': True,
'action_error': True
}
)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_ex = self._assert_single_item(
wf_ex.task_executions,
state=states.ERROR
)
# "state_info" must be trimmed so that it's not greater than 65535.
self.assertLess(len(task_ex.state_info), 65536)
self.assertGreater(len(task_ex.state_info), 65490)
self.assertLess(len(wf_ex.state_info), 65536)
self.assertGreater(len(wf_ex.state_info), 65490)
def test_fail_workflow_no_limit(self):
cfg.CONF.set_default(
'execution_field_size_limit_kb',
-1,
group='engine'
)
wf_service.create_workflows(WF)
# Start workflow.
wf_ex = self.engine.start_workflow(
'wf',
wf_input={
'action_output_length': 10000,
'action_output_dict': True,
'action_error': True
}
)
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertGreater(len(wf_ex.output['result']), 10000)
|
|
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python code to wrap convolution kernels
"""
import numpy as np
import pycuda.driver as drv
from pycuda.compiler import SourceModule
from pycuda.tools import context_dependent_memoize
from neon.backends import kernel_specs
from neon.backends.cuda_templates import _common_round, _ew_types
from math import ceil
from operator import mul
import sys
if sys.version_info >= (3, 0):
from functools import reduce
class KernelGroup(object):
def __init__(self, lib, dtype):
self.lib = lib
self.dtype = dtype
self.dtype_str = dtype.str[1:]
self.vec_size = 4 if dtype.itemsize == 4 else 8
if dtype.type is np.float16:
self.clss = "hconv"
elif dtype.type is np.float32:
self.clss = "sconv"
elif dtype.type is np.int16:
self.clss = "fconv"
elif dtype.type is np.int8:
self.clss = "bconv"
else:
raise TypeError("dtype not supported.")
def __str__(self):
raise TypeError("please implement __str__ to describe kernel params for logging.")
def bind_params(self, *args):
raise TypeError("bind_params not implemented.")
def execute(self, repeat=1, unbind=True):
raise TypeError("execute not implemented.")
def init_bsum(self, bsum, flags):
flags |= self.flags
if bsum:
bsum_gpudata = bsum.gpudata
self.bsum_zero = [bsum_gpudata, 0, bsum.size, self.lib.stream]
flags |= 4
else:
bsum_gpudata = 0
self.bsum_zero = 0
flags &= ~4
return bsum_gpudata, flags
def k_partitions(self, K, tiles):
k = K
partitions = []
for tile_K in tiles:
grid_K = (k + tiles[-1] - 1) // tile_K
if grid_K > 0:
partitions.append([tile_K, grid_K, K-k])
k -= grid_K * tile_K
if k <= 0:
break
return partitions
def xprop_kernels(self, op, tile_dim, tile_N, grid_N, K, tiles, PQM, RST, args):
self.kernels = []
for tile_K, grid_K, offset_K in self.k_partitions(K, tiles):
kernel_name = "%s_%s_%s%d_N%d" % (self.clss, op, tile_dim, tile_K, tile_N)
block = (kernel_specs.kernels[kernel_name]["threads"], 1, 1)
if RST > 1:
grid = (PQM, grid_K, grid_N)
else:
grid = (grid_K, grid_N, PQM)
params = [
kernel_name, grid, block, None,
None, None, None, None, None, None, None, offset_K]
params.extend(args)
self.kernels.append(params)
class FpropDirect(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w,
relu, bsum):
super(FpropDirect, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
assert K % self.vec_size == 0, "K dim must be multiple of %d" % self.vec_size
tile_N = 128 if N > 64 else 64
grid_N = _ceil_div(N, tile_N)
tile_K = (128, 64, 32) if tile_N == 128 else (128, 64)
magic_PQ = _magic64(P*Q)
magic_Q = _magic64(Q)
magic_RS = _magic32(R*S*T+32, R*S)
magic_S = _magic32(R*S+32, S)
self.xprop_kernels(
"fprop", "K", tile_N, grid_N, K, tile_K, P*Q*M, R*S*T,
_flatten([N, K, D, H, W, W*N, H*W*N, D*H*W*N,
C, K*R*S*T, R*S*T, R*S, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
Q, P*Q, Q*N, P*Q*N, M*P*Q*N, magic_Q, magic_PQ]))
self.shared = R*S*T * 4 * 2
self.flags = (relu and 2) + (bsum and 4)
def bind_params(self, I, F, O, alpha, beta, bsum, flags=0):
assert I.dtype == F.dtype == O.dtype
bsum_gpudata, flags = self.init_bsum(bsum, flags)
for kernel in self.kernels:
kernel[3:11] = (self.lib.stream, bsum_gpudata, O.gpudata, I.gpudata, F.gpudata,
alpha, beta, flags)
def execute(self, repeat=1, unbind=True):
for r in range(repeat):
if self.bsum_zero:
drv.memset_d32_async(*self.bsum_zero)
for kernel_params in self.kernels:
kernel = kernel_specs.get_kernel(kernel_params[0])
kernel.prepared_async_call(*kernel_params[1:], shared_size=self.shared)
if unbind:
self.bsum_zero = None
for kernel_params in self.kernels:
kernel_params[3:11] = (None,) * 8
def __str__(self):
return "FpropDirect " + str([k[0] for k in self.kernels])
class FpropCuda(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w,
bsum):
super(FpropCuda, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
assert K % self.vec_size == 0, "K dim must be multiple of %d" % self.vec_size
magic_PQ = _magic64(P*Q)
magic_Q = _magic64(Q)
magic_S = _magic32(R*S+32, S)
HWN = H * W * N
RST = R * S * T
KRST = K * RST
PQ = P * Q
PQN = PQ * N
from neon.backends.kernels.cuda.convolution import _get_conv_kernel
self.kernel = _get_conv_kernel(dtype=self.dtype.str[1:], filter_size=R*S,
bsum=bsum, operation="fprop")
grid = (PQ * (-(-N // 32)), (-(-K // 32)), 1)
block = (8, 8, 1)
static_kernel_args = _flatten([C, D, H, W, N, T, R, S, K, M, P, Q,
str_w, str_h, pad_w, pad_h,
HWN // 4, KRST // 4, PQN // 4,
PQ, 0, 0,
magic_PQ, magic_Q, magic_S])
self.launch_args = [grid, block] + [None] * 7 + static_kernel_args
self.shared = RST * 4 * 2
self.flags = (bsum and 4)
def bind_params(self, I, F, O, alpha, beta, bsum, flags=0):
assert I.dtype == F.dtype == O.dtype
bsum_gpudata, flags = self.init_bsum(bsum, flags)
self.launch_args[2:9] = (self.lib.stream, alpha, beta,
I.gpudata, F.gpudata, O.gpudata, bsum_gpudata)
def execute(self, repeat=1, unbind=True):
for r in range(repeat):
if self.bsum_zero:
drv.memset_d32_async(*self.bsum_zero)
self.kernel.prepared_async_call(*self.launch_args, shared_size=self.shared)
if unbind:
self.bsum_zero = None
self.launch_args[2:9] = (None,) * 7
def __str__(self):
return "FpropCuda"
class BpropCuda(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w,
bsum):
super(BpropCuda, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
assert K % self.vec_size == 0, "K dim must be multiple of %d" % self.vec_size
magic_HW = _magic64(H*W)
magic_W = _magic64(W)
magic_RS = _magic32(R*S*T+32, R*S)
magic_S = _magic32(R*S+32, S)
HW = H * W
HWN = HW * N
RST = R * S * T
CRST = C * RST
PQ = P * Q
PQN = PQ * N
self.bsum = bsum
from neon.backends.kernels.cuda.convolution import _get_conv_kernel
self.kernel = _get_conv_kernel(dtype=self.dtype.str[1:], filter_size=R*S,
bsum=bsum, operation="bprop")
grid = (HW * (-(-N // 32)), -(-C // 32), 1)
block = (8, 8, 1)
static_kernel_args = _flatten([K, M, P, Q, N, T, R, S, C, D, H, W,
str_w, str_h, pad_w, pad_h,
PQN // 4, CRST // 4, HWN // 4,
HW, 0, 0,
magic_HW, magic_W, magic_S])
self.launch_args = [grid, block] + [None] * 7 + static_kernel_args
self.shared = R*S*T * 4 * 2
self.flags = (bsum and 4)
# generate the kernel args for dim shuffling CTRSK => KTRSC
shuffle_grid = (_ceil_div(K, 32), _ceil_div(C, 32), R*S*T)
self.shuffle_size = C*T*R*S*K*dtype.itemsize
self.shuffle_args = [shuffle_grid, (32, 8, 1), None, None, None]
self.shuffle_args.extend(_flatten([
R*S*T*K, R*S*K, S*K, K,
R*S*T*C, R*S*C, S*C, C,
R*S, T, R, S, magic_RS, magic_S]))
lib.set_scratch_size(self.shuffle_size)
def bind_params(self, I, F, O, alpha, beta, bsum, flags=0):
assert I.dtype == F.dtype == O.dtype
if self.bsum:
assert bsum is not None, "must use initialized bsum config"
bsum_gpudata, flags = self.init_bsum(bsum, flags)
filter_temp = self.lib.scratch_buffer(self.shuffle_size)
self.shuffle_args[2:5] = (self.lib.stream, filter_temp, F.gpudata)
self.launch_args[2:9] = (self.lib.stream, alpha, beta,
I.gpudata, filter_temp, O.gpudata, bsum_gpudata)
def execute(self, repeat=1, unbind=True):
shuffle_kernel = _get_shuffle_kernel(self.dtype.str[1:])
for r in range(repeat):
if self.bsum_zero:
drv.memset_d32_async(*self.bsum_zero)
shuffle_kernel.prepared_async_call(*self.shuffle_args)
self.kernel.prepared_async_call(*self.launch_args, shared_size=self.shared)
if unbind:
self.bsum_zero = None
self.shuffle_args[2:5] = (None,) * 3
self.launch_args[2:9] = (None,) * 7
def __str__(self):
return "BpropCuda"
class UpdateCuda(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w):
super(UpdateCuda, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
HWN = H * W * N
RS = R * S
RST = RS * T
KRST = K * RST
CRSTK = KRST * C
PQ = P * Q
PQN = PQ * N
magic_S = _magic32(R*S+32, S)
if lib.deterministic:
grid_P = 1
grid_Q = 1
self.determ = CRSTK
else:
grid_P = P
grid_Q = Q
self.determ = 0
pq_blocks = grid_P * grid_Q
magic_PQ = _magic64(pq_blocks)
magic_Q = _magic64(grid_Q)
from neon.backends.kernels.cuda.convolution import _get_conv_kernel
self.kernel = _get_conv_kernel(dtype=self.dtype.str[1:], filter_size=R*S,
bsum=False, operation="update")
grid = (pq_blocks * (-(-K // 32)), (-(-(C*RS) // 32)), 1)
block = (8, 32, 1)
static_kernel_args = _flatten([C, D, H, W, N, T, R, S, K, M, P, Q,
str_w, str_h, pad_w, pad_h,
HWN // 4, KRST // 4, PQN // 4,
PQ, grid_P, grid_Q,
magic_PQ, magic_Q, magic_S])
self.launch_args = [grid, block] + [None] * 7 + static_kernel_args
lib.set_scratch_size((self.determ or C*T*R*S*K)*4)
def update_grid(self, kernel_name, base_blocks, P, Q, SM_count):
threads = kernel_specs.kernels[kernel_name]["threads"]
occupancy = kernel_specs.kernels[kernel_name]["occupancy"]
# warps per scheduler for one block
occ_per_block = threads / (32.0 * 4.0 * SM_count)
grid = []
for p in range(1, P+1):
for q in range(1, Q+1):
occup = p*q*base_blocks * occ_per_block
groups = occup / occupancy
slots = ceil(groups)
# This is a heuristic that keeps the balance of work accross the SMs
# while also maximizing the work that each block does
heuristic = min(abs(x - slots) for x in range(4, 8)) + (slots - groups) / 100.0
grid.append((p, q, heuristic))
grid.sort(key=lambda x: x[-1])
return (grid[0][0], grid[0][1], threads)
def bind_params(self, I, E, O, alpha):
assert I.dtype == E.dtype
if O.dtype.type is not np.float32:
update_temp = self.lib.scratch_buffer((self.determ or O.size)*4)
self.convert_args = [update_temp, "f4", O, False]
else:
update_temp = O.gpudata
self.convert_args = False
self.zero_args = [update_temp, 0, O.size, self.lib.stream]
beta = 0.0
bsum_gpudata = 0
self.launch_args[2:9] = (self.lib.stream, alpha, beta,
I.gpudata, E.gpudata, O.gpudata, bsum_gpudata)
def execute(self, repeat=1, unbind=True):
for r in range(repeat):
drv.memset_d32_async(*self.zero_args)
self.kernel.prepared_async_call(*self.launch_args)
if self.convert_args:
_fp_convert(*self.convert_args)
if unbind:
self.zero_args = self.convert_args = None
self.launch_args[2:9] = (None,) * 7
def __str__(self):
return "UpdateCuda"
class BpropDirect(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w,
relu, bsum):
super(BpropDirect, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
assert C % self.vec_size == 0, "C dim must be multiple of %d" % self.vec_size
tile_N = 128 if N > 64 else 64
grid_N = _ceil_div(N, tile_N)
tile_C = (128, 64, 32) if tile_N == 128 else (128, 64)
magic_HW = _magic64(H*W)
magic_W = _magic64(W)
magic_RS = _magic32(R*S*T+32, R*S)
magic_S = _magic32(R*S+32, S)
magic_str_w = _magic32(W + S, str_w)
magic_str_h = _magic32(H + R, str_h)
magic_str_d = _magic32(D + T, str_d)
self.xprop_kernels(
"bprop", "C", tile_N, grid_N, C, tile_C, D*H*W, R*S*T,
_flatten([N, C, M, P, Q, Q*N, P*Q*N, M*P*Q*N,
K, C*R*S*T, R*S*T, R*S, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
W, H*W, W*N, H*W*N, D*H*W*N, magic_W, magic_HW,
R, T, magic_str_w, magic_str_h, magic_str_d]))
self.shared = R*S*T * 4 * 2
self.flags = (relu and 2) + (bsum and 4)
# generate the kernel args for dim shuffling CTRSK => KTRSC
shuffle_grid = (_ceil_div(K, 32), _ceil_div(C, 32), R*S*T)
self.shuffle_size = C*T*R*S*K*dtype.itemsize
self.shuffle_args = [shuffle_grid, (32, 8, 1), None, None, None]
self.shuffle_args.extend(_flatten([
R*S*T*K, R*S*K, S*K, K,
R*S*T*C, R*S*C, S*C, C,
R*S, T, R, S, magic_RS, magic_S]))
lib.set_scratch_size(self.shuffle_size)
def bind_params(self, I, F, O, alpha, beta, bsum, flags=0):
assert I.dtype == F.dtype == O.dtype
bsum_gpudata, flags = self.init_bsum(bsum, flags)
filter_temp = self.lib.scratch_buffer(self.shuffle_size)
self.shuffle_args[2:5] = (self.lib.stream, filter_temp, F.gpudata)
for kernel in self.kernels:
kernel[3:11] = (self.lib.stream, bsum_gpudata, O.gpudata, I.gpudata, filter_temp,
alpha, beta, flags)
def execute(self, repeat=1, unbind=True):
shuffle_kernel = _get_shuffle_kernel(self.dtype_str)
for r in range(repeat):
if self.bsum_zero:
drv.memset_d32_async(*self.bsum_zero)
shuffle_kernel.prepared_async_call(*self.shuffle_args)
for kernel_params in self.kernels:
kernel = kernel_specs.get_kernel(kernel_params[0])
kernel.prepared_async_call(*kernel_params[1:], shared_size=self.shared)
if unbind:
self.bsum_zero = None
self.shuffle_args[2:5] = (None,) * 3
for kernel_params in self.kernels:
kernel_params[3:11] = (None,) * 8
def __str__(self):
return "BpropDirect " + str([k[0] for k in self.kernels])
class BpropDirectSmallC(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w):
super(BpropDirectSmallC, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
magic_PQ = _magic64(P*Q)
magic_Q = _magic64(Q)
magic_RST = _magic32(C*R*S*T, R*S*T)
magic_RS = _magic32(R*S*T+32, R*S)
magic_S = _magic32(R*S+32, S)
# special kernel for deconv into first layer
kernel_name = "%s_bprop_C1_N64" % self.clss
grid = (P*Q*M, _ceil_div(C*R*S*T, 32), _ceil_div(N, 64))
block = (32, 1, 1)
self.kernel = [kernel_name, grid, block, None, None, None, None, None]
self.kernel.extend(_flatten([
N, K, D, H, W, W*N, H*W*N, D*H*W*N,
C, C*R*S*T, R*S*T, magic_RST, R*S, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
Q, P*Q, Q*N, P*Q*N, M*P*Q*N, magic_Q, magic_PQ,
C*R*S*T*8*dtype.itemsize, M*P*Q*N*8*dtype.itemsize]))
# generate the kernel args for transpose CRST,K => K,CRST
shuffle_grid = (_ceil_div(K, 32), _ceil_div(C*R*S*T, 32), 1)
self.shuffle_size = K*T*R*S*C*dtype.itemsize
self.shuffle_args = [shuffle_grid, (32, 8, 1), None, None, None, C*R*S*T, K]
self.zero = C*D*H*W*N * dtype.itemsize
lib.set_scratch_size(self.shuffle_size)
def bind_params(self, I, F, O, alpha, beta, bsum, flags=0):
assert I.dtype == F.dtype == O.dtype
if beta and beta != 1.0:
O[:] = O * beta # pre-apply beta
self.beta = beta
self.zero_args = [O.gpudata, 0, self.zero, self.lib.stream]
filter_temp = self.lib.scratch_buffer(self.shuffle_size)
self.shuffle_args[2:5] = (self.lib.stream, filter_temp, F.gpudata)
self.kernel[3:8] = (self.lib.stream, O.gpudata, I.gpudata, filter_temp, alpha)
def execute(self, repeat=1, unbind=True):
shuffle_kernel = _get_transpose_kernel(self.dtype_str)
kernel = kernel_specs.get_kernel(self.kernel[0])
for r in range(repeat):
# let atomic adds accumulate on top
if not self.beta:
drv.memset_d8_async(*self.zero_args)
shuffle_kernel.prepared_async_call(*self.shuffle_args)
kernel.prepared_async_call(*self.kernel[1:])
if unbind:
self.zero_args = None
self.shuffle_args[2:5] = (None,) * 3
self.kernel[3:8] = (None,) * 5
def __str__(self):
return "BpropDirectSmallC " + str(self.kernel[0])
class UpdateDirect(KernelGroup):
def __init__(self, lib, dtype,
N, C, K,
D, H, W,
T, R, S,
M, P, Q,
pad_d, pad_h, pad_w,
str_d, str_h, str_w):
super(UpdateDirect, self).__init__(lib, dtype)
assert N % 32 == 0, "N dim must be multiple of 32"
magic_RST = _magic32(C*R*S*T, R*S*T)
magic_RS = _magic32(R*S*T+32, R*S)
magic_S = _magic32(R*S+32, S)
grid_C = _ceil_div(C*R*S*T, 128)
sm_count = _get_sm_count()
# in float32 for big feature_map layers the smaller tile is actually faster
# so restrict tile selection to just that.
if dtype.type is np.float32 and P*Q > 56*56:
K_tiles = (64,)
else:
K_tiles = (128, 64)
if lib.deterministic:
determ = "D"
if K <= 64:
K_tiles = (64,)
else:
K_tiles = K_tiles[0:1]
self.determ = C*T*R*S*K
else:
determ = ""
self.determ = 0
self.kernels = []
for tile_K, grid_K, offset_K in self.k_partitions(K, K_tiles):
kernel_name = "%s_updat%s_C128_K%d" % (self.clss, determ, tile_K)
base_blocks = M*grid_C*grid_K
grid_P, grid_Q, threads = self.update_grid(kernel_name, base_blocks, P, Q, sm_count)
# print grid_P, grid_Q
grid_PQ = grid_P * grid_Q
magic_PQu = _magic64(grid_PQ)
magic_Qu = _magic64(grid_Q)
block = (threads, 1, 1)
if R*S*T > 1:
grid = (M*grid_PQ, grid_C, grid_K)
else:
grid = (grid_C, grid_K, M*grid_PQ)
self.determ *= M*grid_PQ
self.determ_shape = (M*grid_PQ, C*T*R*S*K)
kernel = [kernel_name, grid, block, None, None, None, None, None]
kernel.extend(_flatten([
offset_K, N, K, D, H, W, W*N, H*W*N, D*H*W*N,
C, C*R*S*T, R*S*T, magic_RST, R*S, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, P*Q, Q*N, P*Q*N, M*P*Q*N, magic_Qu, magic_PQu,
grid_P, grid_Q, grid_PQ, C*R*S*T*K]))
self.kernels.append(kernel)
lib.set_scratch_size((self.determ or C*T*R*S*K)*4)
def update_grid(self, kernel_name, base_blocks, P, Q, SM_count):
threads = kernel_specs.kernels[kernel_name]["threads"]
occupancy = kernel_specs.kernels[kernel_name]["occupancy"]
# warps per scheduler for one block
occ_per_block = threads / (32.0 * 4.0 * SM_count)
grid = []
for p in range(1, P+1):
for q in range(1, Q+1):
occup = p*q*base_blocks * occ_per_block
groups = occup / occupancy
slots = ceil(groups)
# This is a heuristic that keeps the balance of work accross the SMs
# while also maximizing the work that each block does
heuristic = min(abs(x - slots) for x in range(4, 8)) + (slots - groups) / 100.0
grid.append((p, q, heuristic))
grid.sort(key=lambda x: x[-1])
return (grid[0][0], grid[0][1], threads)
def bind_params(self, I, E, O, alpha):
assert I.dtype == E.dtype
if O.dtype.type is not np.float32 or self.determ:
update_temp = self.lib.scratch_buffer((self.determ or O.size)*4)
self.convert_args = [update_temp, "f4", O, False]
if self.determ:
self.convert_args[3] = self.determ_shape
else:
update_temp = O.gpudata
self.convert_args = False
self.zero_args = [update_temp, 0, O.size, self.lib.stream]
for kernel in self.kernels:
kernel[3:8] = (self.lib.stream, update_temp, I.gpudata, E.gpudata, alpha)
def execute(self, repeat=1, unbind=True):
for r in range(repeat):
if not self.determ:
drv.memset_d32_async(*self.zero_args)
for kernel_params in self.kernels:
kernel = kernel_specs.get_kernel(kernel_params[0])
kernel.prepared_async_call(*kernel_params[1:])
if self.convert_args:
_fp_convert(*self.convert_args)
if unbind:
self.zero_args = self.convert_args = None
for kernel_params in self.kernels:
kernel_params[3:8] = (None,) * 5
def __str__(self):
return "UpdateDirect " + str([k[0] for k in self.kernels])
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 32 bits
# Shamelessly pulled directly from:
# http://www.hackersdelight.org/hdcodetxt/magicgu.py.txt
def _magic32(nmax, d):
nc = ((nmax + 1) // d) * d - 1
nbits = len(bin(nmax)) - 2
for p in range(0, 2 * nbits + 1):
if 2 ** p > nc * (d - 1 - (2 ** p - 1) % d):
m = (2 ** p + d - 1 - (2 ** p - 1) % d) // d
return (m, p)
raise ValueError("Can't find magic number for division")
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 64 bits and the shift
# lops off the lower 32 bits
def _magic64(d):
# 3 is a special case that only ends up in the high bits
# if the nmax is 0xffffffff
# we can't use 0xffffffff for all cases as some return a 33 bit
# magic number
nmax = 0xffffffff if d == 3 else 0x7fffffff
magic, shift = _magic32(nmax, d)
if magic != 1:
shift -= 32
return (magic, shift)
# flatten a nested list of lists or values
def _flatten(lst):
return sum(([x] if not isinstance(x, (list, tuple))
else _flatten(x) for x in lst), [])
def _ceil_div(x, y):
return -(-x // y)
def _closest_divisor(val, div, maxdiv=8):
return -sorted([(abs(i-div), -i) for i in range(1, maxdiv) if val % i == 0])[0][1]
@context_dependent_memoize
def _get_sm_count():
attributes = drv.Context.get_device().get_attributes()
return attributes[drv.device_attribute.MULTIPROCESSOR_COUNT]
def _fp_convert(src_data, src_type, dest_tensor, reduce_shape):
if reduce_shape:
kernel = _get_reduce_kernel(dest_tensor.dtype.str[1:])
blocks = _ceil_div(reduce_shape[1], 32)
kernel.prepared_async_call((blocks, 1, 1), (32, 1, 1),
dest_tensor.backend.stream,
dest_tensor.gpudata,
src_data,
reduce_shape[1],
reduce_shape[0]*reduce_shape[1])
else:
from neon.backends.nervanagpu import GPUTensor
from neon.backends.float_ew import _get_compound_kernel, _get_fast_ew_dims
# quick wrapper to convert raw fp32 scratch data to a destination tensor
shape, strides = _get_fast_ew_dims(dest_tensor.size)
kernel_args = [0,
dest_tensor.gpudata, strides[0], strides[1],
src_data, strides[0], strides[1],
shape[1]]
kernel = _get_compound_kernel((
(GPUTensor, 0, dest_tensor.dtype.str[1:], 0, False),
(GPUTensor, 1, src_type, 0, False),
('assign', 0, False, 32)),
dest_tensor.backend.compute_capability)
kernel.prepared_async_call((shape[0], 1, 1),
(32, 1, 1),
dest_tensor.backend.stream,
*kernel_args)
# fast axis=0 reduction kernel used for deterministic update
@context_dependent_memoize
def _get_reduce_kernel(dtype):
_reduce_kernel = r"""
%(common)s
__global__ void reduce(%(type)s* out, const float* in, int CRSTK, int PQCRSTK)
{
int offset = blockIdx.x * 32 + threadIdx.x;
if (offset < CRSTK)
{
float sum = 0.0f;
for (int i = offset; i < PQCRSTK; i += CRSTK)
{
sum += __ldg(in + i);
}
out[offset] = %(cvt_out)s(sum);
}
}
"""
template_vals = {
"common": _common_round["nearest"].get(dtype, ""),
"type": _ew_types[dtype]["type"],
}
if dtype == "f2":
template_vals["cvt_out"] = "fp32_to_fp16"
elif dtype == "f4":
template_vals["cvt_out"] = ""
elif dtype == "x2":
template_vals["cvt_out"] = "fp32_to_int16"
else:
raise TypeError("Missing reduction type")
code = _reduce_kernel % template_vals
module = SourceModule(code)
kernel = module.get_function("reduce")
kernel.prepare("PPII")
return kernel
@context_dependent_memoize
def _get_transpose_kernel(dtype):
_transpose_kernel = r"""
__global__ void transpose(%(type)s* out, const %(type)s* in, int rows, int cols)
{
__shared__ %(type)s tile[32][33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int gx = bx * 32 + tx;
int gy = by * 32 + ty;
for (int j = 0; j < 32; j += 8)
{
int gy8 = gy + j;
if (gy8 < rows && gx < cols)
tile[ty + j][tx] = in[gy8*cols + gx];
}
__syncthreads();
gx = by * 32 + tx;
gy = bx * 32 + ty;
for (int j = 0; j < 32; j += 8)
{
int gy8 = gy + j;
if (gy8 < cols && gx < rows)
out[gy8*rows + gx] = tile[tx][ty + j];
}
}
"""
code = _transpose_kernel % _ew_types[dtype]
module = SourceModule(code)
kernel = module.get_function("transpose")
kernel.prepare("PPII")
return kernel
@context_dependent_memoize
def _get_shuffle_kernel(dtype):
_shuffle_kernel = r"""
__global__ void dimShuffle(
%(type)s* out, const %(type)s* in,
int TRSK, int RSK, int SK, int K,
int TRSC, int RSC, int SC, int C,
int RS, int T, int R, int S,
int magic_RS, int shift_RS,
int magic_S, int shift_S)
{
__shared__ %(type)s tile[32][33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bk = blockIdx.x;
int bc = blockIdx.y;
int trs = blockIdx.z;
int k = bk * 32 + tx;
int c = bc * 32 + ty;
int t = magic_RS * trs; t >>= shift_RS;
int rs = trs - t*RS;
int r = magic_S * rs; r >>= shift_S;
int s = rs - r*S;
for (int j = 0; j < 32; j += 8)
{
int cj = c + j;
if (cj < C && k < K)
tile[ty + j][tx] = in[ cj*TRSK + t*RSK + r*SK + s*K + k ];
}
__syncthreads();
k = bk * 32 + ty;
c = bc * 32 + tx;
// Mirror RST
s = S - s - 1;
r = R - r - 1;
t = T - t - 1;
for (int i = 0; i < 32; i += 8)
{
int ki = k + i;
if (ki < K && c < C)
out[ ki*TRSC + t*RSC + r*SC + s*C + c ] = tile[tx][ty + i];
}
}
"""
code = _shuffle_kernel % _ew_types[dtype]
module = SourceModule(code)
kernel = module.get_function("dimShuffle")
kernel.prepare("PPIIIIIIIIIIIIIIII")
return kernel
@context_dependent_memoize
def _get_copy_transpose_kernel(dtype, shape, axes=None):
src = range(len(shape))
dst = list(axes)
src_dim = src[-1]
dst_dim = dst[-1]
# If the inner dim is the same for both, no need for shared memory tile
# Then map the outer source dim to the threadIdx.y values
if src_dim == dst_dim:
dst_dim = src[0]
shared_tile = False
else:
shared_tile = True
src_offset = []
dst_offset = []
params = []
values = []
magic = ""
# add dims for bounds checking
for dim in (src_dim, dst_dim):
params.append("int dim_%s" % dim)
values.append(shape[dim])
# collapse src and dst shape by 32
grid_shape = list(shape)
grid_shape[src_dim] = _ceil_div(shape[src_dim], 32)
grid_shape[dst_dim] = _ceil_div(shape[dst_dim], 32)
# get a src list without dst dim
src2 = [s for s in src if s != dst_dim]
# get the name of the first compound index
blkx_name = compound_idx = "".join(str(x) for x in src2)
# generate the magic number math to extract all indeces
while len(src2) > 1:
idx1 = src2[0]
del src2[0]
idx2 = "".join(str(i) for i in src2)
div = reduce(mul, (grid_shape[i] for i in src2), 1)
params.extend(p % idx2 for p in ("int magic_%s", "int shift_%s", "int div_%s"))
values.extend(_magic64(div))
values.append(div)
magic += r"""
int idx_{1} = div64(idx_{0}, magic_{2}, shift_{2});
int idx_{2} = idx_{0} - idx_{1}*div_{2};
""".format(compound_idx, idx1, idx2)
compound_idx = idx2
# Add params for src strides and generate src offset
# The param values will be added externally
for s in src:
params.append("int src_str_%d" % s)
src_offset.append("src_str_%d*idx_%d" % (s, s))
# Add params for dst strides and generate dst offset
for d in dst:
params.append("int dst_str_%d" % d)
dst_offset.append("dst_str_%d*idx_%d" % (d, d))
div64 = r"""
__device__ __forceinline__ int div64(int value, int magic, int shift)
{
// if the divisor is a power of 2 the magic will be 1 and it's just a simple right shift
// Otherwise multiply by magic and right shift just the high bits
int result;
asm("{\n\t"
".reg .pred p;\n\t"
".reg .u64 res64;\n\t"
".reg .u32 lo32, hi32;\n\t"
"setp.ne.s32 p, %2, 1;\n\t"
"mul.wide.u32 res64, %1, %2;\n\t"
"mov.b64 {lo32, hi32}, res64;\n\t"
"selp.u32 hi32, hi32, %1, p;\n\t"
"shr.u32 %0, hi32, %3;\n\t"
"}" : "=r"(result) : "r"(value), "r"(magic), "r"(shift));
return result;
}
"""
if shared_tile:
copy_transpose = r"""
%(common)s
__global__ void copy_transpose(%(type)s* out, const %(type)s* in, %(params)s)
{
__shared__ %(type)s tile[32][33];
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int idx_%(blk)s = blockIdx.x;
int idx_%(dst)s = blockIdx.y;
%(magic)s
idx_%(src)s = (idx_%(src)s << 5) + tid_x;
idx_%(dst)s = (idx_%(dst)s << 5) + tid_y;
const %(type)s* in00 = in + %(src_offset)s;
const %(type)s* in08 = in00 + src_str_%(dst)s*8;
const %(type)s* in16 = in08 + src_str_%(dst)s*8;
const %(type)s* in24 = in16 + src_str_%(dst)s*8;
bool b%(src)s = idx_%(src)s < dim_%(src)s;
if (idx_%(dst)s + 0 < dim_%(dst)s && b%(src)s) tile[tid_y + 0][tid_x] = *in00;
if (idx_%(dst)s + 8 < dim_%(dst)s && b%(src)s) tile[tid_y + 8][tid_x] = *in08;
if (idx_%(dst)s + 16 < dim_%(dst)s && b%(src)s) tile[tid_y + 16][tid_x] = *in16;
if (idx_%(dst)s + 24 < dim_%(dst)s && b%(src)s) tile[tid_y + 24][tid_x] = *in24;
__syncthreads();
%(type)s val00 = tile[tid_x][tid_y + 0];
%(type)s val08 = tile[tid_x][tid_y + 8];
%(type)s val16 = tile[tid_x][tid_y + 16];
%(type)s val24 = tile[tid_x][tid_y + 24];
idx_%(src)s += tid_y - tid_x;
idx_%(dst)s += tid_x - tid_y;
bool b%(dst)s = idx_%(dst)s < dim_%(dst)s;
%(type)s* out00 = out + %(dst_offset)s;
%(type)s* out08 = out00 + dst_str_%(src)s*8;
%(type)s* out16 = out08 + dst_str_%(src)s*8;
%(type)s* out24 = out16 + dst_str_%(src)s*8;
if (idx_%(src)s + 0 < dim_%(src)s && b%(dst)s) *out00 = val00;
if (idx_%(src)s + 8 < dim_%(src)s && b%(dst)s) *out08 = val08;
if (idx_%(src)s + 16 < dim_%(src)s && b%(dst)s) *out16 = val16;
if (idx_%(src)s + 24 < dim_%(src)s && b%(dst)s) *out24 = val24;
}
"""
else:
copy_transpose = r"""
%(common)s
__global__ void copy_transpose(%(type)s* out, const %(type)s* in, %(params)s)
{
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int idx_%(blk)s = blockIdx.x;
int idx_%(dst)s = blockIdx.y;
%(magic)s
idx_%(src)s = (idx_%(src)s << 5) + tid_x;
idx_%(dst)s = (idx_%(dst)s << 5) + tid_y;
bool b%(src)s = idx_%(src)s < dim_%(src)s;
bool b%(dst)s_00 = idx_%(dst)s + 0 < dim_%(dst)s && b%(src)s;
bool b%(dst)s_08 = idx_%(dst)s + 8 < dim_%(dst)s && b%(src)s;
bool b%(dst)s_16 = idx_%(dst)s + 16 < dim_%(dst)s && b%(src)s;
bool b%(dst)s_24 = idx_%(dst)s + 24 < dim_%(dst)s && b%(src)s;
%(type)s val00 = 0;
%(type)s val08 = 0;
%(type)s val16 = 0;
%(type)s val24 = 0;
const %(type)s* in00 = in + %(src_offset)s;
const %(type)s* in08 = in00 + src_str_%(dst)s*8;
const %(type)s* in16 = in08 + src_str_%(dst)s*8;
const %(type)s* in24 = in16 + src_str_%(dst)s*8;
if (b%(dst)s_00) val00 = *in00;
if (b%(dst)s_08) val08 = *in08;
if (b%(dst)s_16) val16 = *in16;
if (b%(dst)s_24) val24 = *in24;
%(type)s* out00 = out + %(dst_offset)s;
%(type)s* out08 = out00 + dst_str_%(dst)s*8;
%(type)s* out16 = out08 + dst_str_%(dst)s*8;
%(type)s* out24 = out16 + dst_str_%(dst)s*8;
if (b%(dst)s_00) *out00 = val00;
if (b%(dst)s_08) *out08 = val08;
if (b%(dst)s_16) *out16 = val16;
if (b%(dst)s_24) *out24 = val24;
}
"""
code = copy_transpose % dict(
common=div64,
type=_ew_types[dtype[1:]]["type"],
params=", ".join(params),
blk=blkx_name,
src=src_dim,
dst=dst_dim,
magic=magic,
src_offset=" + ".join(src_offset),
dst_offset=" + ".join(dst_offset)
)
# print code
module = SourceModule(code)
kernel = module.get_function("copy_transpose")
kernel.prepare("PP" + "I"*len(params))
grid_x = grid_shape[src_dim]
grid_y = grid_shape[dst_dim]
for s in src:
if s not in (src_dim, dst_dim):
grid_x *= grid_shape[s]
kernel.grid = (grid_x, grid_y, 1)
kernel.block = (32, 8, 1)
kernel.args = tuple(values)
return kernel
|
|
# coding: utf-8
"""This module contains decorators for parallel vectorization of functions.
The decorators vectorize the function over the first argument and return a
list of the returned results of the functions.
The main decorator is vectorize_parallel that supports parallelization via the
multiprocessing module or via MPI.
Example for multiprocessing:
>>> from parallel_decorators import vectorize_parallel
>>> @vectorize_parallel(method='processes', num_procs=2)
... def power(x, y):
... return x**y
>>> result = power(range(5), 3)
[0, 1, 8, 27, 64]
Example for MPI:
>>> from parallel_decorators import vectorize_parallel, is_master
>>> @vectorize_parallel(method='MPI')
... def power(x, y):
... return x**y
>>> # computation in parallel here
>>> result = power(range(5), 3)
[0, 1, 8, 27, 64]
>>> if is_master();
... # use results on master core
... print(result)
Then start script with
$ mpiexec -np <num> python script.py
"""
from functools import wraps
import traceback
# some MPI helper functions
def is_master():
"""return True if current rank is master"""
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
return rank == 0
except ImportError:
return True
def is_mpi():
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
return size > 1
except ImportError:
return False
def mpi_size():
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
return comm.Get_size()
except ImportError:
return 1
def mpi_rank():
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
return comm.Get_rank()
except ImportError:
return 0
def mpi_barrier():
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
comm.Barrier()
except ImportError:
pass
# more helper functions
def is_iterable(xs):
"""returns True if xs is an iterable"""
# try to get iterator; if error occurs, xs is not iterable
try:
iter(xs)
except TypeError:
return False
return True
def staticvariables(**variables):
def decorate(function):
for variable in variables:
setattr(function, variable, variables[variable])
return function
return decorate
# vectorization functions down here
def vectorize(f):
"""decorator for vectorization of functions
Function wrapper that vectorizes f over the first argument
if the first argument is an iterable.
"""
@wraps(f)
def newfun(xs, *args, **kwargs):
if not is_iterable(xs):
# no iteration, simply call function
return f(xs, *args, **kwargs)
# gather results in a list
result = []
for x in xs:
result.append(f(x, *args, **kwargs))
return result
return newfun
def vectorize_queue(num_procs=2, use_progressbar=False, label=None):
"""decorator for parallel vectorization of functions using processes
Function wrapper that vectorizes f over the first argument
if the first argument is an iterable.
This function wrapper uses the multiprocessing module to implement
parallelism for the vectorization.
Usage:
>>> @vectorize_queue(4)
... def power(x, y):
... return x**y
>>> power(4, 3)
64
>>> power(range(5), 3)
[0, 1, 8, 27, 64]
"""
show_progressbar = False
if use_progressbar:
try:
from progressbar import Bar, AdaptiveETA, Percentage, ProgressBar,\
FormatLabel
show_progressbar = True
except ImportError:
print("Progressbar requested, but module progressbar not found."
" Disabling progressbar.")
@staticvariables(show_progressbar=show_progressbar)
def decorator(f):
"""the decorator function we return"""
@staticvariables(show_progressbar=decorator.show_progressbar)
@wraps(f)
def newfun(xs, *args, **kwargs):
"""the function that replaces the wrapped function"""
if not is_iterable(xs):
# no iteration, simply call function
return f(xs, *args, **kwargs)
show_progressbar = newfun.show_progressbar
from multiprocessing import Process, Queue
if show_progressbar:
if label is None:
bar_label = f.__name__
else:
bar_label = label
widgets = [FormatLabel(bar_label), ' ', Percentage(),
Bar(), AdaptiveETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(xs))
pbar.start()
task_queue = Queue()
done_queue = Queue()
# fill tasks, first argument in tuple for ordering
for i, x in enumerate(xs):
task_queue.put((i, x))
# define worker function
# this definition is inherited by all processes
# -> important that f, args and kwargs are known by childs
def worker(in_queue, out_queue):
for i, x in iter(in_queue.get, 'STOP'):
try:
res = f(x, *args, **kwargs)
out_queue.put((i, res, None))
except Exception as e:
print("Caught exception in parallel vectorized "
"function:")
# print out traceback
traceback.print_exc()
print()
out_queue.put((i, None, e))
# start workers
for i in range(num_procs):
Process(target=worker, args=(task_queue, done_queue)).start()
# get results, ordering done by first argument
result = [None] * len(xs)
errors = []
for i in range(len(xs)):
j, res, e = done_queue.get()
result[j] = res
# caught exception?
if e is not None:
errors.append(e)
if show_progressbar:
pbar.update(i)
# stop workers
for i in range(num_procs):
task_queue.put('STOP')
if show_progressbar:
pbar.finish()
# error ocurred?
if len(errors) > 0:
print("Caught at least one error during execution:")
raise errors[0]
return result
return newfun
return decorator
def vectorize_mpi(use_progressbar=False, label=None, scheduling='auto'):
"""Decorator for parallel vectorization of functions using MPI
Function wrapper that vectorizes f over the first argument
if the first argument is an iterable.
This function wrapper uses the mpi4py module to implement
parallelism for the vectorization.
Usage:
>>> @vectorize_mpi()
... def power(x, y):
... return x**y
>>> # computation in parallel here
>>> result = power(range(5), 3)
[0, 1, 8, 27, 64]
>>> if is_master();
... # use results on master core
... print(result)
then start script with
$ mpiexec -np <num> python script.py
"""
show_progressbar = False
if use_progressbar:
try:
from progressbar import Bar, AdaptiveETA, Percentage, ProgressBar,\
FormatLabel
show_progressbar = True
except ImportError:
print("Progressbar requested, but module progressbar not found."
" Disabling progressbar.")
@staticvariables(show_progressbar=show_progressbar)
def decorator(f):
@wraps(f)
@staticvariables(show_progressbar=decorator.show_progressbar)
def newfun(xs, *args, **kwargs):
"""the function that replaces the wrapped function"""
show_progressbar = newfun.show_progressbar
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank != 0:
show_progressbar = False
pbar = None
# first broadcast array from root
xs = comm.bcast(xs, root=0)
if not is_iterable(xs):
# no iteration, simply call function
return f(xs, *args, **kwargs)
# only one process
if size == 1:
return vectorize(f)(xs, *args, **kwargs)
result = [None] * len(xs)
error = None
comm.Barrier()
# simple task distribution for less than 4 tasks or if indicated by
# parameter scheduling
# otherwise use slave-master model
if (scheduling == 'static' or (scheduling == 'auto' and size < 4))\
and not (scheduling == 'dynamic'):
# compute results
for i, x in enumerate(xs):
if rank == i % size:
result[i] = f(x, *args, **kwargs)
# communicate results
# for the easy load balancing
for i in range(len(xs)):
if i % size == 0:
# already there
continue
if rank == i % size:
# process that sends
comm.send(result[i], dest=0, tag=0)
elif rank == 0:
# root receives
result[i] = comm.recv(source=(i % size), tag=0)
else:
if rank == 0:
# create progressbar
if show_progressbar:
if label is None:
bar_label = f.__name__
else:
bar_label = label
widgets = [FormatLabel(bar_label), ' ', Percentage(),
Bar(), AdaptiveETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(xs))
pbar.start()
# master process -> handles distribution of tasks
all_sent = False
current = 0
ranks = [None] * len(xs)
reqs_sent = []
reqs_rcvd = []
completed_reqs = []
# send first batch of tasks
for i in range(1, size):
ranks[current] = i
reqs_sent.append(comm.isend((current, None),
dest=i, tag=0))
reqs_rcvd.append(comm.irecv(source=i, tag=current))
if current < len(xs):
current += 1
else:
break
for r in reqs_sent:
r.wait()
while True:
new_reqs = []
for i, r in enumerate(reqs_rcvd):
# check for completed requests
completed, data = r.test()
if completed:
if data is None:
continue
completed_reqs.append(i)
if data[2] is not None:
error = data[2]
result[data[0]] = data[1]
# check if all tasks have been distributed
if current >= len(xs):
all_sent = True
continue
ranks[current] = ranks[data[0]]
# send new taks and get result (asynchronously)
comm.send((current, None),
dest=ranks[data[0]], tag=0)
new_reqs.append(comm.irecv(
source=ranks[data[0]], tag=current))
current += 1
for r in new_reqs:
reqs_rcvd.append(r)
if all_sent and len(completed_reqs) == len(xs):
# send None to all processes to exit loop
req_finished = []
for r in range(1, size):
req_finished.append(comm.isend(
(None, None), dest=r, tag=0))
for req in req_finished:
req.wait()
break
# check if error occurred and propagate to slaves
if error is not None:
for r in range(1, size):
comm.send((None, error), dest=r, tag=0)
break
# update progressbar
if show_progressbar:
pbar.update(len(completed_reqs))
else:
# slave processes -> do the computation
current, e = comm.recv(source=0, tag=0)
while True:
# compute result for index current
try:
res = f(xs[current], *args, **kwargs)
comm.send((current, res, None),
dest=0, tag=current)
except Exception as e:
print("Caught exception in parallel vectorized "
"function:")
# print out traceback
traceback.print_exc()
print()
comm.send((current, None, e), dest=0, tag=current)
# receive next task
current, e = comm.recv(source=0, tag=0)
# exit loop if None is sent
if current is None:
if e is not None:
error = e
break
comm.Barrier()
if show_progressbar and pbar is not None:
pbar.finish()
# check for error
if error is not None:
if rank == 0:
raise error
return
# distribute data to all cores
for i in range(len(xs)):
if rank == 0:
# root sends to all other processes
for j in range(1, size):
comm.send(result[i], dest=j, tag=1)
else:
# each process receives from root
result[i] = comm.recv(source=0, tag=1)
comm.Barrier()
return result
return newfun
return decorator
def vectorize_parallel(method='processes', num_procs=2, use_progressbar=False,
label=None, scheduling='auto'):
"""Decorator for parallel vectorization of functions
-- method: can be 'processes' for shared-memory parallelization or 'MPI'
for distributed memory parallelization or 'adaptive' to use MPI if it is
active (caution: mpi4py must be installed for using mpi!)
-- num_procs: number of processors for method == 'processes'
-- use_progressbar: this indicates if a progress bar should be printed;
requires progressbar module
-- label: for use_progressbar==True, this sets the label of the
progress bar. Defaults to the name of the decorated function.
-- scheduling: scheduling method to use for method == 'MPI'; can be
'auto', 'static', or 'dynamic'
Example for multiprocessing:
>>> from parallel_decorators import vectorize_parallel
>>> @vectorize_parallel(method='processes', num_procs=2)
... def power(x, y):
... return x**y
>>> result = power(range(5), 3)
[0, 1, 8, 27, 64]
Example for MPI:
>>> from parallel_decorators import vectorize_parallel, is_master
>>> @vectorize_parallel(method='MPI')
... def power(x, y):
... return x**y
>>> # computation in parallel here
>>> result = power(range(5), 3)
[0, 1, 8, 27, 64]
>>> if is_master();
... # use results on master core
... print(result)
Then start script with
$ mpiexec -np <num> python script.py
"""
if method == 'adaptive':
if is_mpi():
method = 'MPI'
else:
method = 'processes'
if method == 'processes':
return vectorize_queue(num_procs, use_progressbar, label)
elif method == 'MPI':
return vectorize_mpi(use_progressbar, label, scheduling)
else:
return vectorize
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Auth driver for ldap. Includes FakeLdapDriver.
It should be easy to create a replacement for this driver supporting
other backends by creating another class that exposes the same
public methods.
"""
import functools
import sys
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
ldap_opts = [
cfg.IntOpt('ldap_schema_version',
default=2,
help='Current version of the LDAP schema'),
cfg.StrOpt('ldap_url',
default='ldap://localhost',
help='Point this at your ldap server'),
cfg.StrOpt('ldap_password',
default='changeme',
help='LDAP password'),
cfg.StrOpt('ldap_user_dn',
default='cn=Manager,dc=example,dc=com',
help='DN of admin user'),
cfg.StrOpt('ldap_user_id_attribute',
default='uid',
help='Attribute to use as id'),
cfg.StrOpt('ldap_user_name_attribute',
default='cn',
help='Attribute to use as name'),
cfg.StrOpt('ldap_user_unit',
default='Users',
help='OID for Users'),
cfg.StrOpt('ldap_user_subtree',
default='ou=Users,dc=example,dc=com',
help='OU for Users'),
cfg.BoolOpt('ldap_user_modify_only',
default=False,
help='Modify user attributes instead of creating/deleting'),
cfg.StrOpt('ldap_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Projects'),
cfg.StrOpt('role_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Roles'),
# NOTE(vish): mapping with these flags is necessary because we're going
# to tie in to an existing ldap schema
cfg.StrOpt('ldap_cloudadmin',
default='cn=cloudadmins,ou=Groups,dc=example,dc=com',
help='cn for Cloud Admins'),
cfg.StrOpt('ldap_itsec',
default='cn=itsec,ou=Groups,dc=example,dc=com',
help='cn for ItSec'),
cfg.StrOpt('ldap_sysadmin',
default='cn=sysadmins,ou=Groups,dc=example,dc=com',
help='cn for Sysadmins'),
cfg.StrOpt('ldap_netadmin',
default='cn=netadmins,ou=Groups,dc=example,dc=com',
help='cn for NetAdmins'),
cfg.StrOpt('ldap_developer',
default='cn=developers,ou=Groups,dc=example,dc=com',
help='cn for Developers'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(ldap_opts)
LOG = logging.getLogger(__name__)
if FLAGS.memcached_servers:
import memcache
else:
from nova.common import memorycache as memcache
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
def _clean(attr):
"""Clean attr for insertion into ldap"""
if attr is None:
return None
if isinstance(attr, unicode):
return str(attr)
return attr
def sanitize(fn):
"""Decorator to sanitize all args"""
@functools.wraps(fn)
def _wrapped(self, *args, **kwargs):
args = [_clean(x) for x in args]
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
return fn(self, *args, **kwargs)
_wrapped.func_name = fn.func_name
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
project_pattern = '(owner=*)'
isadmin_attribute = 'isNovaAdmin'
project_attribute = 'owner'
project_objectclass = 'groupOfNames'
conn = None
mc = None
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
if FLAGS.ldap_schema_version == 1:
LdapDriver.project_pattern = '(objectclass=novaProject)'
LdapDriver.isadmin_attribute = 'isAdmin'
LdapDriver.project_attribute = 'projectManager'
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def __enter__(self):
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
self.__cache = {}
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__cache = None
return False
def __local_cache(key_fmt): # pylint: disable=E0213
"""Wrap function to cache it's result in self.__cache.
Works only with functions with one fixed argument.
"""
def do_wrap(fn):
@functools.wraps(fn)
def inner(self, arg, **kwargs):
cache_key = key_fmt % (arg,)
try:
res = self.__cache[cache_key]
LOG.debug('Local cache hit for %s by key %s' %
(fn.__name__, cache_key))
return res
except KeyError:
res = fn(self, arg, **kwargs)
self.__cache[cache_key] = res
return res
return inner
return do_wrap
@sanitize
@__local_cache('uid_user-%s')
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__get_ldap_user(uid)
if attr is None:
raise exception.LDAPUserNotFound(user_id=uid)
return self.__to_user(attr)
@sanitize
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
cache_key = 'uak_dn_%s' % (access,)
user_dn = self.mc.get(cache_key)
if user_dn:
user = self.__to_user(
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
if user:
if user['access'] == access:
return user
else:
self.mc.set(cache_key, None)
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
user_obj = self.__find_object(dn, query)
user = self.__to_user(user_obj)
if user:
self.mc.set(cache_key, user_obj['dn'][0])
return user
@sanitize
@__local_cache('pid_project-%s')
def get_project(self, pid):
"""Retrieve project by id"""
dn = self.__project_to_dn(pid, search=False)
attr = self.__find_object(dn, LdapDriver.project_pattern,
scope=self.ldap.SCOPE_BASE)
return self.__to_project(attr)
@sanitize
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)')
users = []
for attr in attrs:
user = self.__to_user(attr)
if user is not None:
users.append(user)
return users
@sanitize
def get_projects(self, uid=None):
"""Retrieve list of projects"""
pattern = LdapDriver.project_pattern
if uid:
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
pattern)
return [self.__to_project(attr) for attr in attrs]
@sanitize
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
raise exception.LDAPUserExists(user=name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
user = self.__get_ldap_user(name)
# Entry could be malformed, test for missing attrs.
# Malformed entries are useless, replace attributes found.
attr = []
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'secretKey',
[secret_key]))
else:
attr.append((self.ldap.MOD_ADD, 'secretKey',
[secret_key]))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'accessKey',
[access_key]))
else:
attr.append((self.ldap.MOD_ADD, 'accessKey',
[access_key]))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_REPLACE,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
else:
attr.append((self.ldap.MOD_ADD,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
raise exception.LDAPUserNotFound(user_id=name)
else:
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.ldap_user_unit]),
(FLAGS.ldap_user_id_attribute, [name]),
('sn', [name]),
(FLAGS.ldap_user_name_attribute, [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
(LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
@sanitize
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
raise exception.ProjectExists(project=name)
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
description = name
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
members.append(manager_dn)
attr = [
('objectclass', [LdapDriver.project_objectclass]),
('cn', [name]),
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
dn = self.__project_to_dn(name, search=False)
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
@sanitize
def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project"""
if not manager_uid and not description:
return
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
if not self.is_in_project(manager_uid, project_id):
self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
@sanitize
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
@sanitize
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
@sanitize
def has_role(self, uid, role, project_id=None):
"""Check if user has role
If project is specified, it checks for local role, otherwise it
checks for global role
"""
role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn)
@sanitize
def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
if not self.__group_exists(role_dn):
# create the role if it doesn't exist
description = '%s role for %s' % (role, project_id)
self.__create_group(role_dn, role, uid, description)
else:
return self.__add_to_group(uid, role_dn)
@sanitize
def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
@sanitize
def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)"""
if project_id is None:
# NOTE(vish): This is unneccesarily slow, but since we can't
# guarantee that the global roles are located
# together in the ldap tree, we're doing this version.
roles = []
for role in FLAGS.allowed_roles:
role_dn = self.__role_to_dn(role)
if self.__is_in_group(uid, role_dn):
roles.append(role)
return roles
else:
project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles]
@sanitize
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
attr = []
# Retrieve user by name
user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'secretKey',
user['secretKey']))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'accessKey',
user['accessKey']))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_DELETE,
LdapDriver.isadmin_attribute,
user[LdapDriver.isadmin_attribute]))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
else:
# Delete entry
self.conn.delete_s(self.__uid_to_dn(uid))
@sanitize
def delete_project(self, project_id):
"""Delete a project"""
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@sanitize
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing user"""
if not access_key and not secret_key and admin is None:
return
attr = []
if access_key:
attr.append((self.ldap.MOD_REPLACE, 'accessKey', access_key))
if secret_key:
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
if admin is not None:
attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
str(admin).upper()))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
def __user_exists(self, uid):
"""Check if user exists"""
try:
return self.get_user(uid) is not None
except exception.LDAPUserNotFound:
return False
def __ldap_user_exists(self, uid):
"""Check if the user exists in ldap"""
return self.__get_ldap_user(uid) is not None
def __project_exists(self, project_id):
"""Check if project exists"""
return self.get_project(project_id) is not None
@__local_cache('uid_attrs-%s')
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
dn = FLAGS.ldap_user_subtree
query = ('(&(%s=%s)(objectclass=novaUser))' %
(FLAGS.ldap_user_id_attribute, uid))
return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the DNs
return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
if query is None:
query = "(objectClass=*)"
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the attributes
# FIXME(yorik-sar): Whole driver should be refactored to
# prevent this hack
res1 = []
for dn, attrs in res:
attrs['dn'] = [dn]
res1.append(attrs)
return res1
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
query = ('(&(objectclass=groupOfNames)(!%s))' %
LdapDriver.project_pattern)
return self.__find_dns(tree, query)
def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member"""
query = ('(&(objectclass=groupOfNames)(member=%s))' %
self.__uid_to_dn(uid))
dns = self.__find_dns(tree, query)
return dns
def __group_exists(self, dn):
"""Check if group exists"""
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS["ldap_%s" % role]
else:
project_dn = self.__project_to_dn(project_id)
return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.LDAPGroupExists(group=name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
members.append(dn)
attr = [
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
('member', members)]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE)
return res is not None
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn)
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__is_in_group(uid, group_dn):
raise exception.LDAPGroupMembershipNotFound(user_id=uid,
group_id=group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
# FIXME(vish): what if deleted user is a project manager?
attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
self.__safe_remove_from_group(uid, role_dn)
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
self.__safe_remove_from_group(uid, project_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
"""Delete all roles for project"""
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@__local_cache('uid_dn-%s')
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
if search:
query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
user = self.__find_dns(FLAGS.ldap_user_subtree, query)
if len(user) > 0:
userdn = user[0]
return userdn
@__local_cache('pid_dn-%s')
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
if search:
query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
project = self.__find_dns(FLAGS.ldap_project_subtree, query)
if len(project) > 0:
projectdn = project[0]
return projectdn
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
if attr is None:
return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() and
LdapDriver.isadmin_attribute in attr.keys()):
return {
'id': attr[FLAGS.ldap_user_id_attribute][0],
'name': attr[FLAGS.ldap_user_name_attribute][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
else:
return None
@__local_cache('dn_uid-%s')
def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
query = '(objectclass=novaUser)'
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self):
import nova.auth.fakeldap
sys.modules['ldap'] = nova.auth.fakeldap
super(FakeLdapDriver, self).__init__()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
def _check_table_dtypes(self, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != self.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(self.key_dtype, key_dtype))
if value_dtype != self.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(self.value_dtype, value_dtype))
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase, self).__init__(
initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(default_value,
dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
if name is None:
name = "%s_Size" % self._name
# pylint: disable=protected-access
return gen_data_flow_ops._lookup_table_size(self._table_ref, name=name)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is use for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
if name is None:
name = "%s_lookup_table_find" % self._name
key_tensor = keys
if isinstance(keys, ops.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
# pylint: disable=protected-access
values = gen_data_flow_ops._lookup_table_find(self._table_ref,
key_tensor,
self._default_value,
name=name)
# pylint: enable=protected-access
if isinstance(keys, ops.SparseTensor):
return ops.SparseTensor(keys.indices, values, keys.shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.op_scope([initializer], name, "hash_table"):
# pylint: disable=protected-access
table_ref = gen_data_flow_ops._hash_table(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=name)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.op_scope([keys, values], name, "key_value_init") as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(values,
dtype=value_dtype,
name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
# pylint: disable=protected-access
table._check_table_dtypes(self._keys.dtype, self._values.dtype)
with ops.op_scope([table], self._name) as scope:
init_op = gen_data_flow_ops._initialize_table(table.table_ref,
self._keys,
self._values,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`tf.initialize_all_tables.run()` once.
For example:
```python
mapping_strings = t.constant(["emerson", "lake", "palmer")
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.initialize_all_tables().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
with ops.op_scope([tensor], name, "string_to_index") as scope:
shared_name = ""
keys = ops.convert_to_tensor(mapping, dtypes.string)
vocab_size = array_ops.size(keys)
values = math_ops.cast(math_ops.range(vocab_size), dtypes.int64)
init = KeyValueTensorInitializer(keys,
values,
dtypes.string,
dtypes.int64,
name="table_init")
t = HashTable(init,
default_value,
shared_name=shared_name,
name="hash_table")
return t.lookup(tensor, name=scope)
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.initialize_all_tables.run()` once.
For example:
```python
mapping_string = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.initialize_all_tables().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
with ops.op_scope([tensor], name, "index_to_string") as scope:
shared_name = ""
values = ops.convert_to_tensor(mapping, dtypes.string)
vocab_size = array_ops.size(values)
keys = math_ops.cast(math_ops.range(vocab_size), dtypes.int64)
init = KeyValueTensorInitializer(keys,
values,
dtypes.int64,
dtypes.string,
name="table_init")
t = HashTable(init,
default_value,
shared_name=shared_name,
name="hash_table")
return t.lookup(tensor, name=scope)
|
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Jorge A. Gomes (jorgegomes83 at hotmail dot com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import math
import pygame as pg
from OpenGL.GL import GL_LINE_STRIP, GL_TRIANGLE_STRIP
from typing import Optional, Callable
from easygl.arrays import VertexArrayData, DType, attribute, vertex, VertexArray
from easygl.shaders import ShaderProgramData, ShaderProgram
from easygl.textures import TexDescriptor, TextureData, MipMap, Wrap, Filter
from easygl.structures import FrozenMat4, Vec2, Vec4
from easygl.display import BlendMode, GLWindow, Projection
import geometry as ge
__all__ = [
'init',
'bake_vertices',
'stripe',
]
_initialized = False
def hypot(a):
# type: (tuple) -> float
"""Returns the hypotenuse of point."""
return (a[0] ** 2) + (a[1] ** 2)
def length(a):
# type: (tuple) -> float
return math.sqrt(hypot(a))
def normalize(a):
# type: (tuple) -> tuple
mag = length(a)
if mag != 0:
return a[0] / mag, a[1] / mag
return 0.0, 0.0
def _get_startpoints(start, end, thickness, alignment=0.5):
# type: (tuple, tuple) -> tuple
alignment = max(0., min(alignment, 1.))
sx, sy = start
ex, ey = end
# subtract start from end
lx = ex - sx
ly = ey - sy
# normalize length
nx, ny = normalize((lx, ly))
# perpend
plx, ply = -ny, nx
prx, pry = ny, -nx
left = thickness * alignment
right = thickness * (1. - alignment)
# point a
ax = sx + (plx * left)
ay = sy + (ply * left)
#point b
bx = sx + (prx * right)
by = sy + (pry * right)
return (ax, ay), (bx, by)
def _get_endpoints(start, end, thickness, alignment=0.5):
# type: (tuple, tuple) -> tuple
alignment = max(0., min(alignment, 1.))
sx, sy = start
ex, ey = end
# subtract start from end
lx = ex - sx
ly = ey - sy
# normalize length
nx, ny = normalize((lx, ly))
# perpend
plx, ply = -ny, nx
prx, pry = ny, -nx
left = thickness * alignment
right = thickness * (1. - alignment)
# point a
ax = ex + (plx * left)
ay = ey + (ply * left)
#point b
bx = ex + (prx * right)
by = ey + (pry * right)
return (ax, ay), (bx, by)
def stripe(window, view, projection, points, color_a, color_b=None, tex=None, vcoord=0., blend=BlendMode.alpha,
update=True):
# type: (GLWindow, Mat4, Mat4, Union[tuple, list], Vec4, Optional[Vec4], Optional[TextDescriptor], float, BlendMode, bool) -> None
pass
# region - - -- ----==<[ BAKER ]>==---- -- - -
def bake_vertices(points, thickness):
start = points[0]
end = points[1]
baked = []
a, b = _get_startpoints(start, end, thickness)
baked.append(a)
baked.append(b)
for i, end in enumerate(points[1:], 1):
c, d = _get_endpoints(start, end, thickness)
start = end
baked.append(c)
baked.append(d)
return tuple(baked)
# endregion
def init():
# type: () -> None
global _initialized, stripe, stripe_array_data, bake_vertices, texdata, line_shader_data, line_shader
# region - - -- ----==<[ ARRAY DATA ]>==---- -- - -
stripe_array_data = VertexArrayData()
with stripe_array_data.definition():
attribute('position', DType.float_v2)
# attribute('texcoord', DType.float_v2)
with stripe_array_data.new_primitive('stripe', 1024):
for i in range(1024):
vertex(position=(0., 0.))
# endregion
# region - - -- ----==<[ TEXTURES ]>==---- -- - -
s = pg.Surface((4, 1))
s.fill((255, 255, 255))
texdata = TextureData()
texdata.create_from_surface('line_tex', s, False, False, MipMap.linear_linear, Wrap.repeat, Filter.linear)
# endregion
# region - - -- ----==<[ SHADERS ]>==---- -- - -
line_vshader_code = """
#version 330 core
in vec2 position;
uniform mat4 view;
uniform mat4 projection;
uniform vec4 start_color;
uniform vec4 end_color;
uniform float point_count;
uniform float vcoord;
out vec4 color;
out vec2 coord;
void main() {
gl_Position = projection * view * vec4(position, 1.f, 1.f);
color = mix(start_color, end_color, gl_VertexID / point_count);
coord = vec2(mod(gl_VertexID, 2.f), vcoord);
}
"""
line_fshader_code = """
#version 330 core
in vec4 color;
in vec2 coord;
uniform sampler2D tex;
uniform bool solidcolor;
void main() {
vec4 basecolor = color;
if (solidcolor)
basecolor *= texture(tex, coord);
gl_FragColor = basecolor;
}
"""
line_shader_data = ShaderProgramData("")
line_shader_data.compile_vertex_shader('line', shader_code=line_vshader_code)
line_shader_data.compile_fragment_shader('line', shader_code=line_fshader_code)
line_shader_data.link('line', vertex='line', fragment='line')
line_shader = line_shader_data.build('line')
# endregion
# region - - -- ----==<[ VAOS ]>==---- -- - -
line_vertex_array = VertexArray(stripe_array_data, 'stripe', line_shader)
# endregion
# region - - -- ----==<[ RENDER FUNCTIONS ]>==---- -- - -
def stripe(window, view, projection, points, color_a, color_b=None, tex=None, vcoord=0., blend=BlendMode.alpha, update=True):
# type: (GLWindow, Mat4, Mat4, Union[tuple, list], Vec4, Optional[Vec4], Optional[TextDescriptor], float, BlendMode, bool) -> None
if len(points) < 4:
return
current = window.blend_mode
if update:
data = Vec2(points[0]).pack() # type: bytes
if window.projection is Projection.ortho_down:
h = window.height
for (x, y) in points[1:]:
data += Vec2.pack_values(x, h - y)
else:
for (x, y) in points[1:]:
data += Vec2.pack_values(x, y)
line_vertex_array.update_data(0, data)
if not isinstance(color_b , Vec4):
color_b = color_a
window.blend_mode = blend
count = max(2, min(len(points), 1024))
with line_vertex_array.render(GL_TRIANGLE_STRIP, count) as shader: # type: ShaderProgram
shader.load_matrix4f('view', 1, False, tuple(view))
shader.load_matrix4f('projection', 1, False, tuple(projection))
shader.load4f('start_color', *color_a)
shader.load4f('end_color', *color_b)
shader.load1f('point_count', count)
shader.load1f('vcoord', vcoord)
if isinstance(tex, TexDescriptor):
shader.load_sampler2d('tex', tex.id, 0)
shader.load1i('solidcolor', 0)
else:
shader.load_sampler2d('tex', texdata['line_tex'].id, 0)
shader.load1i('solidcolor', 1)
window.blend_mode = current
# endregion
|
|
"""
Differential Equations in Action
Lesson 1 - Houston We have a problem
"""
# Import
import math # import math.cos(), math.sin(), math.pi
import numpy # import distance
import matplotlib.pyplot as plt # import plot
"""
-----------------------------------------------------------------------
- 1 - Exercices
-----------------------------------------------------------------------
"""
# Exo 1 - Define x, sin(x) and cos(x)
def sin_cos(num_steps):
x = numpy.zeros(num_steps)
sin_x = numpy.zeros(num_steps)
cos_x = numpy.zeros(num_steps)
for i in range(num_steps):
x[i] =2 * math.pi * i / (num_steps-1)
sin_x[i] = math.cos(x[i])
cos_x[i] = math.sin(x[i])
return x, sin_x, cos_x
# Exo 2 - Forward Euler method
def forward_euler(time_steps, num_steps, g):
t = numpy.zeros(num_steps + 1)
x = numpy.zeros(num_steps + 1)
v = numpy.zeros(num_steps + 1)
for step in range(num_steps):
t[step + 1] = (step+1) * time_steps
x[step + 1] = x[step] + time_steps*v[step]
v[step + 1] = v[step] - time_steps*g
return t, x, v
# Exo 3 - Spaceship acceleration with Moon and Earth at a t-time
def acceleration(moon_position, spaceship_position):
X_ES = - spaceship_position
X_MS = moon_position - spaceship_position
F_ES = ( gravitational_constant * earth_mass / ( numpy.linalg.norm(X_ES)**3) ) * X_ES
F_MS = ( gravitational_constant * earth_mass / ( numpy.linalg.norm(X_MS)**3) ) * X_MS
return F_ES + F_MS
"""
-----------------------------------------------------------------------
- 2 - Problems
-----------------------------------------------------------------------
"""
# PROBLEM 1
# Modelise one revolution of the moon around the earth, assuming that
# the orbit is circular.
def orbit(num_steps):
x = numpy.zeros([num_steps + 1, 2])
for i in range(num_steps + 1):
x[i,0] = moon_distance * math.cos(2. * math.pi * i /num_steps)
x[i,1] = moon_distance * math.sin(2. * math.pi * i /num_steps)
return x
# PROBLEM 2
# Free fall at initial speed with initial angles.
def trajectory(time_steps, num_steps, g, initial_speed, inital_angles):
acceleration = numpy.array([0., -g])
x = numpy.zeros([num_steps + 1, 2]) # m
v = numpy.zeros([num_steps + 1, 2]) # m / s
# init position and speed
x[0,:] = [0,0]
v[0,:] = [initial_speed * math.cos(inital_angles), initial_speed * math.sin(inital_angles) ]
for step in range(num_steps):
# Forward Euler Method
v[step + 1,:] = v[step] + time_steps*acceleration
x[step + 1,:] = x[step] + time_steps*v[step]
return x, v
# PROBLEM 3
# Spaceship Acceleration
def acceleration(spaceship_position):
a = numpy.zeros(2) # m
a[0] = - gravitational_constant * (earth_mass * spaceship_position[0]) / numpy.linalg.norm(spaceship_position)**3
a[1] = - gravitational_constant * (earth_mass * spaceship_position[1]) / numpy.linalg.norm(spaceship_position)**3
return a
# Trajectory of a spacecraft with the given initial position and velocity.
def ship_trajectory(time_steps, num_steps, x_init, v_init):
x = numpy.zeros([num_steps + 1, 2]) # m
v = numpy.zeros([num_steps + 1, 2]) # m / s
# init position and speed
x[0, :] = x_init
v[0, :] = v_init
for step in range(num_steps):
# Forward Euler Method
v[step + 1,:] = v[step] + time_steps*acceleration(x[step,:])
x[step + 1,:] = x[step] + time_steps*v[step]
return x, v
"""
-----------------------------------------------------------------------
- 3 - Plot
-----------------------------------------------------------------------
"""
# Exo 1 - plot sin(x) and cos(x)
def plot_sin_cos():
plt.plot(x, sin_x)
plt.plot(x, cos_x)
plt.show()
# Exo 2 - plot Forward Euler Method
def plot_euler():
axes_height = plt.subplot(211)
plt.plot(t, x)
axes_velocity = plt.subplot(212)
plt.plot(t, v)
axes_height.set_ylabel('Height in m')
axes_velocity.set_ylabel('Velocity in m/s')
axes_velocity.set_xlabel('Time in s')
plt.show()
# Pb1 - plot moon orbit
def plot_orbit():
plt.axis('equal')
plt.plot(x[:, 0], x[:, 1])
axes = plt.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral position in m')
plt.show()
# Pb2 - plot Earth free fall
def plot_earth_free_fall(time_steps, num_steps, earth_gravitation, initial_speed, inital_angles):
for angles in inital_angles:
x,v = trajectory(time_steps, num_steps, earth_gravitation, initial_speed, angles)
plt.plot(x[:, 0], x[:, 1])
axes = plt.gca()
axes.set_xlabel('x position in m')
axes.set_ylabel('y position in m')
plt.show()
# Pb3 - plot spaceship orbit
def plot_ship_trajectory():
plt.plot(x[:, 0], x[:, 1])
plt.scatter(0, 0)
plt.axis('equal')
axes = plt.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral position in m')
plt.show()
"""
-----------------------------------------------------------------------
- Main
-----------------------------------------------------------------------
"""
# STEPS
num_steps = 50 # Max iteration number
time_steps = 0.1 # s
# EARTH and MOON DATA
# Mass
earth_mass = 5.97e24 # kg
moon_mass = 7.35e22 # kg
# Distance
moon_distance = 384e6 # m
# Gravitation
gravitational_constant = 6.67e-11 # N m2 / kg2
earth_gravitation = 9.81 # m / s2
"""
-------------------- Exercices --------------------
"""
# Exo 1 - Cosinus and sinus
x, sin_x, cos_x = sin_cos(num_steps)
#plot_sin_cos() # uncomment for ploting
# Exo 2 - Forward Euler Method
t, x, v = forward_euler(time_steps, num_steps, earth_gravitation)
#plot_euler() # uncomment for ploting
"""
-------------------- Problems --------------------
"""
# pb1 - Moon orbit
x = orbit(num_steps)
#plot_orbit() # uncomment for ploting
# pb2 - Earth free fall
# Initial value
initial_speed = 20. # m / s
initial_angles = math.pi /180 * numpy.linspace(20., 70., 6)
#plot_earth_free_fall(time_steps, num_steps, earth_gravitation, initial_speed, initial_angles) # uncomment for ploting
# pb3 - spaceship orbit
time_steps = 0.1 # s
num_steps = 130000
x_init = [15e6, 1e6]
v_init = [2e3, 4e3]
x, v = ship_trajectory(time_steps, num_steps, x_init, v_init)
plot_ship_trajectory() # uncomment for ploting
|
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf numpy mathematical methods."""
import itertools
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow.compat.v2 as tf
from trax.tf_numpy.numpy_impl import array_ops
from trax.tf_numpy.numpy_impl import arrays
from trax.tf_numpy.numpy_impl import math_ops
class MathTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.array_transforms = [
lambda x: x, # Identity,
tf.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
array_ops.array,
lambda x: array_ops.array(x, dtype=np.float32),
lambda x: array_ops.array(x, dtype=np.float64),
]
self.types = [np.int32, np.int64, np.float32, np.float64]
def _testBinaryOp(self, math_fun, np_fun, name, operands=None,
extra_operands=None,
check_promotion=True,
check_promotion_result_type=True):
def run_test(a, b):
for fn in self.array_transforms:
arg1 = fn(a)
arg2 = fn(b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2))
# Tests type promotion
for type_a in self.types:
for type_b in self.types:
if not check_promotion and type_a != type_b:
continue
arg1 = array_ops.array(a, dtype=type_a)
arg2 = array_ops.array(b, dtype=type_b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2),
check_dtype=check_promotion_result_type)
if operands is None:
operands = [(5, 2),
(5, [2, 3]),
(5, [[2, 3], [6, 7]]),
([1, 2, 3], 7),
([1, 2, 3], [5, 6, 7])]
for operand1, operand2 in operands:
run_test(operand1, operand2)
if extra_operands is not None:
for operand1, operand2 in extra_operands:
run_test(operand1, operand2)
def testDot(self):
extra_operands = [
([1, 2], [[5, 6, 7], [8, 9, 10]]),
(np.arange(2 * 3 * 5).reshape([2, 3, 5]).tolist(),
np.arange(5 * 7 * 11).reshape([7, 5, 11]).tolist())]
return self._testBinaryOp(
math_ops.dot, np.dot, 'dot', extra_operands=extra_operands)
def testMinimum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
math_ops.minimum,
np.minimum,
'minimum',
check_promotion_result_type=False)
def testMaximum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
math_ops.maximum,
np.maximum,
'maximum',
check_promotion_result_type=False)
def testMatmul(self):
operands = [([[1, 2]], [[3, 4, 5], [6, 7, 8]])]
return self._testBinaryOp(
math_ops.matmul, np.matmul, 'matmul', operands=operands)
def testMatmulError(self):
with self.assertRaisesRegex(ValueError, r''):
math_ops.matmul(
array_ops.ones([], np.int32), array_ops.ones([2, 3], np.int32))
with self.assertRaisesRegex(ValueError, r''):
math_ops.matmul(
array_ops.ones([2, 3], np.int32), array_ops.ones([], np.int32))
def _testUnaryOp(self, math_fun, np_fun, name):
def run_test(a):
for fn in self.array_transforms:
arg1 = fn(a)
self.match(math_fun(arg1), np_fun(arg1),
msg='{}({})'.format(name, arg1))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
def testLog(self):
self._testUnaryOp(math_ops.log, np.log, 'log')
def testExp(self):
self._testUnaryOp(math_ops.exp, np.exp, 'exp')
def testTanh(self):
self._testUnaryOp(math_ops.tanh, np.tanh, 'tanh')
def testSqrt(self):
self._testUnaryOp(math_ops.sqrt, np.sqrt, 'sqrt')
def match(self, actual, expected, msg='', check_dtype=True):
self.assertIsInstance(actual, arrays.ndarray)
if check_dtype:
self.assertEqual(
actual.dtype, expected.dtype,
'Dtype mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.dtype, expected.dtype, msg))
self.assertEqual(
actual.shape, expected.shape,
'Shape mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.shape, expected.shape, msg))
np.testing.assert_almost_equal(actual.tolist(), expected.tolist())
def testArgsort(self):
self._testUnaryOp(math_ops.argsort, np.argsort, 'argsort')
# Test stability
r = np.arange(100)
a = np.zeros(100)
np.testing.assert_equal(math_ops.argsort(a, kind='stable'), r)
def testArgMaxArgMin(self):
data = [
0,
5,
[1],
[1, 2, 3],
[[1, 2, 3]],
[[4, 6], [7, 8]],
[[[4, 6], [9, 10]], [[7, 8], [12, 34]]],
]
for fn, d in itertools.product(self.array_transforms, data):
arr = fn(d)
self.match(math_ops.argmax(arr), np.argmax(arr))
self.match(math_ops.argmin(arr), np.argmin(arr))
if hasattr(arr, 'shape'):
ndims = len(arr.shape)
else:
ndims = array_ops.array(arr, copy=False).ndim
if ndims == 0:
# Numpy flattens the scalar ndarray and treats it as a 1-d array of
# size 1.
ndims = 1
for axis in range(-ndims, ndims):
self.match(math_ops.argmax(arr, axis=axis), np.argmax(arr, axis=axis))
self.match(math_ops.argmin(arr, axis=axis), np.argmin(arr, axis=axis))
@parameterized.parameters([False, True])
def testIsCloseEqualNan(self, equal_nan):
a = np.asarray([1, 1, np.nan, 1, np.nan], np.float32)
b = np.asarray([1, 2, 1, np.nan, np.nan], np.float32)
self.match(
math_ops.isclose(a, b, equal_nan=equal_nan),
np.isclose(a, b, equal_nan=equal_nan))
def testAverageWrongShape(self):
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError, r''):
math_ops.average(np.ones([2, 3]), weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError, r''):
math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError, r''):
math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([]))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError, r''):
math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([5]))
def testClip(self):
def run_test(arr, *args, **kwargs):
check_dtype = kwargs.pop('check_dtype', True)
for fn in self.array_transforms:
arr = fn(arr)
self.match(
math_ops.clip(arr, *args, **kwargs),
np.clip(arr, *args, **kwargs),
check_dtype=check_dtype)
# NumPy exhibits weird typing behavior when a/a_min/a_max are scalars v/s
# lists, e.g.,
#
# np.clip(np.array(0, dtype=np.int32), -5, 5).dtype == np.int64
# np.clip(np.array([0], dtype=np.int32), -5, 5).dtype == np.int32
# np.clip(np.array([0], dtype=np.int32), [-5], [5]).dtype == np.int64
#
# So we skip matching type. In tf-numpy the type of the output array is
# always the same as the input array.
run_test(0, -1, 5, check_dtype=False)
run_test(-1, -1, 5, check_dtype=False)
run_test(5, -1, 5, check_dtype=False)
run_test(-10, -1, 5, check_dtype=False)
run_test(10, -1, 5, check_dtype=False)
run_test(10, None, 5, check_dtype=False)
run_test(10, -1, None, check_dtype=False)
run_test([0, 20, -5, 4], -1, 5, check_dtype=False)
run_test([0, 20, -5, 4], None, 5, check_dtype=False)
run_test([0, 20, -5, 4], -1, None, check_dtype=False)
run_test([0.5, 20.2, -5.7, 4.4], -1.5, 5.1, check_dtype=False)
run_test([0, 20, -5, 4], [-5, 0, -5, 0], [0, 5, 0, 5], check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], [2, 0, 2], 5, check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], 0, [5, 3, 1], check_dtype=False)
def testPtp(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
math_ops.ptp(arg, *args, **kwargs), np.ptp(arg, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testLinSpace(self):
array_transforms = [
lambda x: x, # Identity,
tf.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
array_ops.array,
lambda x: array_ops.array(x, dtype=np.float32),
lambda x: array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
math_ops.linspace(arg1, arg2, **kwargs),
np.linspace(arg1, arg2, **kwargs),
msg='linspace({}, {})'.format(arg1, arg2))
run_test(0, 1)
run_test(0, 1, num=10)
run_test(0, 1, endpoint=False)
run_test(0, -1)
run_test(0, -1, num=10)
run_test(0, -1, endpoint=False)
def testLogSpace(self):
array_transforms = [
lambda x: x, # Identity,
tf.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
array_ops.array,
lambda x: array_ops.array(x, dtype=np.float32),
lambda x: array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
math_ops.logspace(arg1, arg2, **kwargs),
np.logspace(arg1, arg2, **kwargs),
msg='logspace({}, {})'.format(arg1, arg2))
run_test(0, 5)
run_test(0, 5, num=10)
run_test(0, 5, endpoint=False)
run_test(0, 5, base=2.0)
run_test(0, -5)
run_test(0, -5, num=10)
run_test(0, -5, endpoint=False)
run_test(0, -5, base=2.0)
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
|
# coding: utf-8
# ## Introduction
#
# I obtained this Fort Lauderdale Police Department data from the City of Fort Lauderdale via the [Fort Lauderdale Civic Hackathon](http://ftlcivichackathon.com/).
#
# See my blog post about [my participation in the hackathon]({filename}../hackathons/code-for-ftl-hackathon.rst).
#
# This is my first Pelican blog post using a Jupyter notebook made possible by [this plug-in](https://github.com/danielfrg/pelican-ipynb).
# ### Use this notebook.
#
# This notebook and Docker, and Docker Compose files are available in [this GitHub repo](https://github.com/dm-wyncode/docker-mongo-flpd-hackathon-data).
#
# [Download most current version of this notebook from my blog repository](https://github.com/dm-wyncode/zipped-code/blob/master/content/posts/python-mongodb/set_creation_speed_test.ipynb).
# *Import some tools from Python standard library.*
# In[53]:
import logging
import timeit
# *Use [PyMongo](https://api.mongodb.com/python/current/) Python distribution containing tools for working with MongoDB.*
# In[54]:
from pymongo import MongoClient
from bson.son import SON
# *Import some Jupyter notebook related objects to display HTML in the notebook.*
# In[55]:
from IPython.core.display import HTML, display
from ipy_table import render, make_table, set_column_style
# *Use Python logging module to log info in this notebook.*
# In[56]:
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# seems IPthon adds a stream handler. Might as well use it.
stream_handler = logger.handlers.pop(0) if logger.handlers else logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
assert logger.handlers
logger.debug("The logger is working.")
# *Define text formatting constants.*
# In[57]:
(
CR,
SPACE,
COLON_SPACE,
COMMA,
) = (
'\n',
' ',
': ',
',',
)
# *Instantiate a MongoDB client.*
# In[58]:
client = MongoClient()
# *Define a database of interest.*
#
# 'app' is the name of the MongoDB database set in [this line of the Dockerfile](https://github.com/dm-wyncode/docker-mongo-flpd-hackathon-data/blob/5905ebd5e0ba5edf1c92a6b81b521a5fd75db462/mongo-seed/Dockerfile#L6) and [this line of the Dockerfile](https://github.com/dm-wyncode/docker-mongo-flpd-hackathon-data/blob/5905ebd5e0ba5edf1c92a6b81b521a5fd75db462/mongo-seed/Dockerfile#L13) when the database was seeded.
# In[59]:
db = client.app
# *Define contants.*
# In[60]:
collection_names = (
CITATIONS,
ACCIDENTS,
) = (
'citations',
'accidents',
)
# *Define collections via Python built-in function [getattr](http://www.diveintopython.net/power_of_introspection/getattr.html).*
# In[61]:
collections = citations, accidents = [
getattr(db, database_name)
for database_name
in collection_names
]
# ## How many records are in each collection?
#
# *I recognize this code may be hard to read for those unfamiliar with [list comprehensions](https://en.wikipedia.org/wiki/List_comprehension). It's a one-off comprehension so I am not going to take extra time to refactor it. It runs once and serves its logging purpose.*
# In[62]:
logger.info(
CR.join(SPACE.join((COLON_SPACE.join((
collection_name,
format(collection.count(), COMMA),
)), 'records'))
for collection_name, collection
in zip(collection_names, collections))
)
# *Define the field of interest that is in the citations collection.*
# In[63]:
CITATION_CHARGE = 'Citation Charge'
# ## Set up the comparison.
#
# MongoDB is still new to me so I consulted [this StackOverflow post](http://stackoverflow.com/questions/28013318/mongo-count-occurrences-of-each-value-for-a-set-of-documents) to learn how to count occurrances of a value in a set of documents.
# ### Define variables.
# *Define what I am calling 'dollar_keys' for a pipeline.*
# In[64]:
dollar_keys = (
'group',
CITATION_CHARGE,
'sum',
'sort',
)
# *Add '$' in front of dollar_keys contants and define them. They will be used in a pipeline created later.*
# In[65]:
dollar_keys_ = (
_GROUP,
_CITATION_CHARGE,
_SUM,
_SORT,
) = [
'${}'.format(dollar_key) for dollar_key in dollar_keys
]
logger.info(dollar_keys_)
# *Define some contants for the pipeline that do not have '$' prefix.*
# In[66]:
(
_ID,
COUNT,
) = (
'_id',
'count',
)
# *Define the pipeline for the MongoDb query.*
# *Recall that SON was imported from bson.son package. [See the bson docs](https://api.mongodb.com/python/current/api/bson/).*
# In[67]:
pipeline = [
{_GROUP: {_ID: _CITATION_CHARGE, COUNT: {_SUM: 1}}},
{_SORT: SON([(COUNT, -1), (_ID, -1)])},
]
logger.info(pipeline)
# In[68]:
aggregated_data = list(
(citation[COUNT], citation[_ID].lower())
for citation
in citations.aggregate(pipeline)
)
# *Define a new pipeline without counts. Only Citation Charges are retrieved and sorted.*
# In[69]:
pipeline = [
{_GROUP: {_ID: _CITATION_CHARGE, COUNT: {_SUM: 1}}},
{_SORT: SON([(_ID, 1)])}
]
#
# ### Define functions to use in [timeit](https://docs.python.org/2/library/timeit.html).
#
# * `citations` is the MongoDB collection.
# * Calling `find` with `citations.find()` returns all the records.
# * The {} is a list comprehension literal in Python that creates a [set](https://docs.python.org/2/library/stdtypes.html#set).
# * `sorted` is a [built-in function in Python](https://docs.python.org/2/library/functions.html#sorted) that returns a sorted list from certain iterables.
# In[70]:
def test_sorted_set():
return sorted({
citation[CITATION_CHARGE]
for citation in citations.find()
})
# *Use MongoDb `aggregate`.*
# In[71]:
def test_aggregation():
return list(
citation['_id']
for citation in citations.aggregate(pipeline)
)
# ### Test that the functions return the same data.
# In[72]:
assert test_find() == test_aggregation(), "The sets of sorted citations differ."
# An AsssertionError is thrown if the assertion fails.
logger.info("The set of citations obtained via 2 different ways have produced equal sets.")
# *Define some constants.*
# In[73]:
SECONDS = 'seconds'
LOOPS = 10
RECORD_COUNT = '92,912'
# ### Time MongoDB `aggregate` repeated 3 times with 10 loops and log it.
# In[74]:
logger.info(
CR.join([SPACE.join((
str(trial_time),
SECONDS,
"for MongoDB to aggregate data on {} records.".format(RECORD_COUNT)
))
for trial_time in
timeit.repeat(
"test_aggregation()",
setup="from __main__ import test_aggregation",
number=LOOPS
)])
)
# ### Time Python built-in functions `sorted` and `set` repeated 3 times with 10 loops and log it.
# In[75]:
logger.info(
CR.join([SPACE.join((
str(trial_time),
SECONDS,
"for Python to aggregate data on {} records using a sorted set.".format(RECORD_COUNT)
))
for trial_time in
timeit.repeat(
"test_sorted_set()",
setup="from __main__ import test_sorted_set",
number=LOOPS
)])
)
# ### It is safe to say that MongoDB is optimized for sorting sets of large data and is approximately almost 10 times faster than Python built-in functions.
#
# Learning to use capabilities of a database like MongoDB has its speed benefits.
# ## In-notebook display of the citations and their counts.
# *Replace empty citations with "None specified." for display in an HTML table in this notebook.*
# In[76]:
citation_counts = [
(
count,
citation or "None specified",
) for count, citation in aggregated_data
]
# ### NB: A Pelican plug-in I am using to create posts with notebooks is adding extra ';' characters likely secondary to HTML escaping during rendering.
#
# It is still easier on the eyes than reading a list of tuples!
#
# The following code is particular to [Jupyter](http://jupyter.org/) notebooks for displaying HTML.
# In[77]:
# add headers
citation_counts_ = [('Count', CITATION_CHARGE, ), ] + citation_counts
# create table with make_table
make_table(citation_counts_)
# apply some styles to the table after it is created
set_column_style(0, width='100', bold=True, color='hsla(225, 80%, 94%, 1)')
set_column_style(1, width='100')
display(HTML(render()._repr_html_()))
# ## There are other blog posts about this dataset.
# * [Docker Compose container version of the data]({filename}../hackathons/dockerized-flpd-data.rst)
# * [Experimentation with CSV files]({filename}../hackathons/flpd_data_01.rst).
# ## resources
#
# * [_Talk Python to Me_ podcast about PyMongo](https://talkpython.fm/episodes/show/2/python-and-mongodb)
# * [PyMongo tutorial](https://api.mongodb.com/python/current/tutorial.html)
|
|
##
# @file weighted_average_wirelength_unitest.py
# @author Yibo Lin
# @date Mar 2019
#
import os
import sys
import time
import numpy as np
import unittest
#import pickle
import gzip
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from dreamplace.ops.weighted_average_wirelength import weighted_average_wirelength
sys.path.pop()
import pdb
import torch
from torch.autograd import Function, Variable
def unsorted_segment_sum(pin_x, pin2net_map, num_nets):
result = np.zeros(num_nets, dtype=pin_x.dtype)
for i in range(len(pin2net_map)):
result[pin2net_map[i]] += pin_x[i]
return result
def build_wirelength(pin_x, pin_y, pin2net_map, net2pin_map, gamma,
ignore_net_degree, net_weights):
# wirelength cost
# weighted-average
# temporily store exp(x)
scaled_pin_x = pin_x / gamma
scaled_pin_y = pin_y / gamma
exp_pin_x = np.exp(scaled_pin_x)
exp_pin_y = np.exp(scaled_pin_y)
nexp_pin_x = np.exp(-scaled_pin_x)
nexp_pin_y = np.exp(-scaled_pin_y)
# sum of exp(x)
sum_exp_pin_x = unsorted_segment_sum(exp_pin_x, pin2net_map,
len(net2pin_map))
sum_exp_pin_y = unsorted_segment_sum(exp_pin_y, pin2net_map,
len(net2pin_map))
sum_nexp_pin_x = unsorted_segment_sum(nexp_pin_x, pin2net_map,
len(net2pin_map))
sum_nexp_pin_y = unsorted_segment_sum(nexp_pin_y, pin2net_map,
len(net2pin_map))
# sum of x*exp(x)
sum_x_exp_pin_x = unsorted_segment_sum(pin_x * exp_pin_x, pin2net_map,
len(net2pin_map))
sum_y_exp_pin_y = unsorted_segment_sum(pin_y * exp_pin_y, pin2net_map,
len(net2pin_map))
sum_x_nexp_pin_x = unsorted_segment_sum(pin_x * nexp_pin_x, pin2net_map,
len(net2pin_map))
sum_y_nexp_pin_y = unsorted_segment_sum(pin_y * nexp_pin_y, pin2net_map,
len(net2pin_map))
sum_exp_pin_x = sum_exp_pin_x
sum_x_exp_pin_x = sum_x_exp_pin_x
wl = sum_x_exp_pin_x / sum_exp_pin_x - sum_x_nexp_pin_x / sum_nexp_pin_x \
+ sum_y_exp_pin_y / sum_exp_pin_y - sum_y_nexp_pin_y / sum_nexp_pin_y
for i in range(len(net2pin_map)):
if len(net2pin_map[i]) >= ignore_net_degree:
wl[i] = 0
wl *= net_weights
wirelength = np.sum(wl)
return wirelength
class WeightedAverageWirelengthOpTest(unittest.TestCase):
def test_weighted_average_wirelength_random(self):
dtype = torch.float32
pin_pos = np.array(
[[0.0, 0.0], [1.0, 2.0], [1.5, 0.2], [0.5, 3.1], [0.6, 1.1]],
dtype=np.float32)
net2pin_map = np.array([np.array([0, 4]), np.array([1, 2, 3])])
pin2net_map = np.zeros(len(pin_pos), dtype=np.int32)
for net_id, pins in enumerate(net2pin_map):
for pin in pins:
pin2net_map[pin] = net_id
net_weights = np.array([1, 2], dtype=np.float32)
pin_x = pin_pos[:, 0]
pin_y = pin_pos[:, 1]
gamma = 0.5
ignore_net_degree = 4
pin_mask = np.zeros(len(pin2net_map), dtype=np.uint8)
# net mask
net_mask = np.ones(len(net2pin_map), dtype=np.uint8)
for i in range(len(net2pin_map)):
if len(net2pin_map[i]) >= ignore_net_degree:
net_mask[i] = 0
# construct flat_net2pin_map and flat_net2pin_start_map
# flat netpin map, length of #pins
flat_net2pin_map = np.zeros(len(pin_pos), dtype=np.int32)
# starting index in netpin map for each net, length of #nets+1, the last entry is #pins
flat_net2pin_start_map = np.zeros(len(net2pin_map) + 1, dtype=np.int32)
count = 0
for i in range(len(net2pin_map)):
flat_net2pin_map[count:count +
len(net2pin_map[i])] = net2pin_map[i]
flat_net2pin_start_map[i] = count
count += len(net2pin_map[i])
flat_net2pin_start_map[len(net2pin_map)] = len(pin_pos)
print("flat_net2pin_map = ", flat_net2pin_map)
print("flat_net2pin_start_map = ", flat_net2pin_start_map)
golden_value = np.array([
build_wirelength(pin_x, pin_y, pin2net_map, net2pin_map, gamma,
ignore_net_degree, net_weights)
])
print("golden_value = ", golden_value)
print(np.transpose(pin_pos))
pin_pos_var = Variable(torch.tensor(np.transpose(pin_pos),
dtype=dtype).reshape([-1]),
requires_grad=True)
#pin_pos_var = torch.nn.Parameter(torch.from_numpy(np.transpose(pin_pos)).reshape([-1]))
print(pin_pos_var)
# clone is very important, because the custom op cannot deep copy the data
# test cpu net-by-net
custom = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=torch.from_numpy(flat_net2pin_map),
netpin_start=torch.from_numpy(flat_net2pin_start_map),
pin2net_map=torch.from_numpy(pin2net_map),
net_weights=torch.from_numpy(net_weights),
net_mask=torch.from_numpy(net_mask),
pin_mask=torch.from_numpy(pin_mask),
gamma=torch.tensor(gamma, dtype=dtype),
algorithm='net-by-net')
result = custom.forward(pin_pos_var)
print("custom = ", result)
result.backward()
grad = pin_pos_var.grad.clone()
print("custom_grad = ", grad)
np.testing.assert_allclose(result.data.numpy(),
golden_value,
atol=1e-6)
# test cpu atomic
pin_pos_var.grad.zero_()
custom = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)),
pin2net_map=torch.from_numpy(pin2net_map),
net_weights=torch.from_numpy(net_weights),
net_mask=torch.from_numpy(net_mask),
pin_mask=torch.from_numpy(pin_mask),
gamma=torch.tensor(gamma, dtype=dtype),
algorithm='atomic')
result = custom.forward(pin_pos_var)
print("custom_cpu_result atomic = ", result.data)
result.backward()
grad_atomic = pin_pos_var.grad.clone()
print("custom_grad_cpu atomic = ", grad_atomic.data)
np.testing.assert_allclose(result.data.numpy(),
golden_value,
atol=1e-6)
np.testing.assert_allclose(grad_atomic.data.numpy(),
grad.data.numpy(),
rtol=1e-6,
atol=1e-6)
# test cpu merged
pin_pos_var.grad.zero_()
custom = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)),
pin2net_map=torch.from_numpy(pin2net_map),
net_weights=torch.from_numpy(net_weights),
net_mask=torch.from_numpy(net_mask),
pin_mask=torch.from_numpy(pin_mask),
gamma=torch.tensor(gamma, dtype=dtype),
algorithm='merged')
result = custom.forward(pin_pos_var)
print("custom_cpu_result merged = ", result.data)
result.backward()
grad_merged = pin_pos_var.grad.clone()
print("custom_grad_cpu merged = ", grad_merged.data)
np.testing.assert_allclose(result.data.numpy(),
golden_value,
atol=1e-6)
np.testing.assert_allclose(grad_merged.data.numpy(),
grad.data.numpy(),
rtol=1e-6,
atol=1e-6)
# test gpu
if torch.cuda.device_count():
pin_pos_var.grad.zero_()
custom_cuda = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(
torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(
torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=torch.from_numpy(net_weights).cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='net-by-net')
result_cuda = custom_cuda.forward(pin_pos_var.cuda())
print("custom_cuda_result = ", result_cuda.data.cpu())
result_cuda.backward()
grad_cuda = pin_pos_var.grad.clone()
print("custom_grad_cuda = ", grad_cuda.data.cpu())
np.testing.assert_allclose(result_cuda.data.cpu().numpy(),
golden_value,
atol=1e-6)
np.testing.assert_allclose(grad_cuda.data.cpu().numpy(),
grad.data.numpy(),
rtol=1e-6,
atol=1e-6)
# test gpu atomic
if torch.cuda.device_count():
pin_pos_var.grad.zero_()
custom_cuda = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(
torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(
torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=torch.from_numpy(net_weights).cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='atomic')
result_cuda = custom_cuda.forward(pin_pos_var.cuda())
print("custom_cuda_result atomic = ", result_cuda.data.cpu())
result_cuda.backward()
grad_cuda = pin_pos_var.grad.clone()
print("custom_grad_cuda atomic = ", grad_cuda.data.cpu())
np.testing.assert_allclose(result_cuda.data.cpu().numpy(),
golden_value,
atol=1e-6)
np.testing.assert_allclose(grad_cuda.data.cpu().numpy(),
grad.data.numpy(),
rtol=1e-6,
atol=1e-6)
# test gpu merged
if torch.cuda.device_count():
pin_pos_var.grad.zero_()
custom_cuda = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(
torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(
torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=torch.from_numpy(net_weights).cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='merged')
result_cuda = custom_cuda.forward(pin_pos_var.cuda())
print("custom_cuda_result merged = ", result_cuda.data.cpu())
result_cuda.backward()
grad_cuda = pin_pos_var.grad.clone()
print("custom_grad_cuda merged = ", grad_cuda.data.cpu())
np.testing.assert_allclose(result_cuda.data.cpu().numpy(),
golden_value,
atol=1e-6)
np.testing.assert_allclose(grad_cuda.data.cpu().numpy(),
grad.data.numpy(),
rtol=1e-6,
atol=1e-6)
def eval_runtime(design):
# e.g,. adaptec1_wirelength.pklz
with gzip.open(design, "rb") as f:
flat_net2pin_map, flat_net2pin_start_map, pin2net_map, net_mask, pin_mask, gamma = pickle.load(
f)
dtype = torch.float64
net_weights = torch.Tensor()
pin_pos_var = Variable(torch.empty(len(pin2net_map) * 2,
dtype=dtype).uniform_(0, 1000),
requires_grad=True)
custom_net_by_net_cpu = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)),
pin2net_map=torch.from_numpy(pin2net_map),
net_weights=net_weights,
net_mask=torch.from_numpy(net_mask),
pin_mask=torch.from_numpy(pin_mask),
gamma=torch.tensor(gamma, dtype=dtype),
algorithm='net-by-net')
custom_atomic_cpu = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)),
pin2net_map=torch.from_numpy(pin2net_map),
net_weights=net_weights,
net_mask=torch.from_numpy(net_mask),
pin_mask=torch.from_numpy(pin_mask),
gamma=torch.tensor(gamma, dtype=dtype),
algorithm='atomic')
custom_net_by_net = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=net_weights.cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='net-by-net')
custom_atomic = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=net_weights.cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='atomic')
custom_sparse = weighted_average_wirelength.WeightedAverageWirelength(
flat_netpin=Variable(torch.from_numpy(flat_net2pin_map)).cuda(),
netpin_start=Variable(torch.from_numpy(flat_net2pin_start_map)).cuda(),
pin2net_map=torch.from_numpy(pin2net_map).cuda(),
net_weights=net_weights.cuda(),
net_mask=torch.from_numpy(net_mask).cuda(),
pin_mask=torch.from_numpy(pin_mask).cuda(),
gamma=torch.tensor(gamma, dtype=dtype).cuda(),
algorithm='sparse')
torch.cuda.synchronize()
iters = 100
tt = time.time()
for i in range(iters):
result = custom_net_by_net_cpu.forward(pin_pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_net_by_net cpu takes %.3f ms" %
((time.time() - tt) / iters * 1000))
tt = time.time()
for i in range(iters):
result = custom_atomic_cpu.forward(pin_pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_atomic cpu takes %.3f ms" %
((time.time() - tt) / iters * 1000))
pin_pos_var = pin_pos_var.cuda()
torch.cuda.synchronize()
tt = time.time()
for i in range(iters):
result = custom_net_by_net.forward(pin_pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_net_by_net takes %.3f ms" %
((time.time() - tt) / iters * 1000))
tt = time.time()
for i in range(iters):
result = custom_atomic.forward(pin_pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_atomic takes %.3f ms" % ((time.time() - tt) / iters * 1000))
tt = time.time()
for i in range(iters):
result = custom_sparse.forward(pin_pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_sparse takes %.3f ms" % ((time.time() - tt) / iters * 1000))
if __name__ == '__main__':
if len(sys.argv) < 2:
unittest.main()
else:
design = sys.argv[1]
eval_runtime(design)
|
|
import csv
import json
import re
from datetime import datetime, timedelta
from django.http import Http404, JsonResponse, StreamingHttpResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, ListView
from django.views.generic.edit import ModelFormMixin, ProcessFormView
from django_prbac.utils import has_privilege
from memoized import memoized
from requests import RequestException
from corehq import privileges, toggles
from corehq.apps.domain.decorators import login_or_api_key
from corehq.apps.domain.views.settings import BaseProjectSettingsView
from corehq.apps.hqwebapp.views import CRUDPaginatedViewMixin
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from corehq.motech.const import PASSWORD_PLACEHOLDER
from corehq.motech.forms import ConnectionSettingsForm, UnrecognizedHost
from corehq.motech.models import ConnectionSettings, RequestLog
from corehq.util.urlvalidate.urlvalidate import PossibleSSRFAttempt
from no_exceptions.exceptions import Http400
class Http409(Http400):
status = 409
meaning = 'CONFLICT'
message = "Resource is in use."
@method_decorator(require_permission(Permissions.edit_motech), name='dispatch')
class MotechLogListView(BaseProjectSettingsView, ListView):
urlname = 'motech_log_list_view'
page_title = _("Remote API Logs")
template_name = 'motech/logs.html'
context_object_name = 'logs'
paginate_by = 100
def get_queryset(self):
queryset = _get_request_log_queryset(self.request, self.domain)
return queryset.order_by('-timestamp').only(
'timestamp',
'payload_id',
'request_method',
'request_url',
'response_status',
)
def get_context_data(self, **kwargs):
context = super(MotechLogListView, self).get_context_data(**kwargs)
context.update({
"filter_from_date": self.request.GET.get("filter_from_date",
_a_week_ago()),
"filter_to_date": self.request.GET.get("filter_to_date", ""),
"filter_payload": self.request.GET.get("filter_payload", ""),
"filter_url": self.request.GET.get("filter_url", ""),
"filter_status": self.request.GET.get("filter_status", ""),
})
return context
@property
def object_list(self):
return self.get_queryset()
@method_decorator(require_permission(Permissions.edit_motech), name='dispatch')
class MotechLogDetailView(BaseProjectSettingsView, DetailView):
urlname = 'motech_log_detail_view'
page_title = _("Remote API Logs")
template_name = 'motech/log_detail.html'
context_object_name = 'log'
def get_queryset(self):
return RequestLog.objects.filter(domain=self.domain)
@property
def object(self):
return self.get_object()
@property
@memoized
def page_url(self):
pk = self.kwargs['pk']
return reverse(self.urlname, args=[self.domain, pk])
@login_or_api_key
@require_permission(Permissions.edit_motech)
def motech_log_export_view(request, domain):
"""
Download ``RequestLog``s as CSV. Uses ``StreamingHttpResponse`` to
support large file sizes.
"""
def as_utc_timestamp(timestamp):
"""
Formats a datetime as a string that Postgres recognizes as a UTC
timestamp.
"""
return timestamp.isoformat(sep=' ', timespec='milliseconds') + '+00'
def json_or_none(dict_):
if not dict_:
return None
return json.dumps(dict_)
def int_or_none(value):
if value is None:
return None
if value == '':
return None
return int(value)
def string_or_none(value):
if value is None:
return None
if value == '':
return None
return str(value)
fields = [
# attribute, column label, transform
('timestamp', _('Timestamp'), as_utc_timestamp),
('payload_id', _('Payload ID'), string_or_none),
('request_method', _('Request Method'), lambda x: x),
('request_url', _('Request URL'), lambda x: x),
('request_body', _('Request Body'), string_or_none),
('request_error', _('Error'), string_or_none),
('response_status', _('Status Code'), int_or_none),
('response_headers', _('Response Headers'), json_or_none),
('response_body', _('Response Body'), string_or_none),
]
def stream():
queryset = (_get_request_log_queryset(request, domain)
.order_by('timestamp')
.only(*[f[0] for f in fields]))
pseudo_buffer = PseudoBuffer()
writer = csv.writer(pseudo_buffer, dialect='excel')
yield writer.writerow([f[1] for f in fields]) # Header row
for request_log in queryset:
row = [f[2](getattr(request_log, f[0])) for f in fields]
yield writer.writerow(row)
response = StreamingHttpResponse(stream(), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="remote_api_logs.csv"'
return response
class ConnectionSettingsListView(BaseProjectSettingsView, CRUDPaginatedViewMixin):
urlname = 'connection_settings_list_view'
page_title = _('Connection Settings')
template_name = 'motech/connection_settings.html'
@method_decorator(require_permission(Permissions.edit_motech))
def dispatch(self, request, *args, **kwargs):
if (
toggles.INCREMENTAL_EXPORTS.enabled_for_request(request)
or has_privilege(request, privileges.DATA_FORWARDING)
):
return super().dispatch(request, *args, **kwargs)
raise Http404()
@property
def total(self):
return self.base_query.count()
@property
def base_query(self):
return ConnectionSettings.objects.filter(domain=self.domain)
@property
def column_names(self):
return [
_("Name"),
_("URL"),
_("Notify Addresses"),
_("Used By"),
]
@property
def page_context(self):
return self.pagination_context
@property
def paginated_list(self):
start, end = self.skip, self.skip + self.limit
for connection_settings in self.base_query.all()[start:end]:
yield {
"itemData": self._get_item_data(connection_settings),
"template": "connection-settings-template",
}
def _get_item_data(self, connection_settings):
data = {
'id': connection_settings.id,
'name': connection_settings.name,
'url': connection_settings.url,
'notifyAddresses': ', '.join(connection_settings.notify_addresses),
'usedBy': ', '.join(connection_settings.used_by),
}
if connection_settings.id is not None:
data['editUrl'] = reverse(
ConnectionSettingsDetailView.urlname,
kwargs={'domain': self.domain, 'pk': connection_settings.id}
)
return data
def get_deleted_item_data(self, item_id):
connection_settings = ConnectionSettings.objects.get(
pk=item_id,
domain=self.domain,
)
if connection_settings.used_by:
raise Http409
connection_settings.delete()
return {
'itemData': self._get_item_data(connection_settings),
'template': 'connection-settings-deleted-template',
}
def post(self, *args, **kwargs):
return self.paginate_crud_response
class ConnectionSettingsDetailView(BaseProjectSettingsView, ModelFormMixin, ProcessFormView):
urlname = 'connection_settings_detail_view'
page_title = _('Connection Settings')
template_name = 'motech/connection_settings_detail.html'
model = ConnectionSettings
form_class = ConnectionSettingsForm
@method_decorator(require_permission(Permissions.edit_motech))
def dispatch(self, request, *args, **kwargs):
if (
toggles.INCREMENTAL_EXPORTS.enabled_for_request(request)
or has_privilege(request, privileges.DATA_FORWARDING)
):
return super().dispatch(request, *args, **kwargs)
raise Http404()
def get_queryset(self):
return super().get_queryset().filter(domain=self.domain)
def get(self, request, *args, **kwargs):
# Allow us to update if 'pk' is given in the URL, otherwise create
self.object = self.get_object() if self.pk_url_kwarg in self.kwargs else None
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object() if self.pk_url_kwarg in self.kwargs else None
return super().post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['domain'] = self.domain
return kwargs
def get_success_url(self):
return reverse(
ConnectionSettingsListView.urlname,
kwargs={'domain': self.domain},
)
def form_valid(self, form):
form.save()
return super().form_valid(form)
@require_POST
@require_permission(Permissions.edit_motech)
def test_connection_settings(request, domain):
if not (
toggles.INCREMENTAL_EXPORTS.enabled_for_request(request)
or has_privilege(request, privileges.DATA_FORWARDING)
):
raise Http404
if request.POST.get('plaintext_password') == PASSWORD_PLACEHOLDER:
# The user is editing an existing instance, and the form is
# showing the password placeholder. (We don't tell the user what
# the API password is.)
return JsonResponse({
"success": False,
"response": _("Please enter API password again."),
})
form = ConnectionSettingsForm(domain=domain, data=request.POST)
if form.is_valid():
if isinstance(form.cleaned_data['url'], UnrecognizedHost):
return JsonResponse({"success": False, "response": "Unknown URL"})
conn = form.save(commit=False)
requests = conn.get_requests()
try:
# Send a GET request to the base URL. That should be enough
# to test the URL and authentication.
response = requests.get(endpoint=None)
if 200 <= response.status_code < 300:
return JsonResponse({
"success": True,
"status": response.status_code,
"response": response.text,
})
else:
return JsonResponse({
"success": False,
"status": response.status_code,
"response": response.text,
})
except RequestException as err:
return JsonResponse({"success": False, "response": str(err)})
except PossibleSSRFAttempt:
return JsonResponse({"success": False, "response": "Invalid URL"})
else:
if set(form.errors.keys()) == {'url'}:
return JsonResponse({
"success": False,
"response": form.errors['url'],
})
else:
return JsonResponse({
"success": False,
"response": "Try saving the connection first"
})
class PseudoBuffer:
def write(self, value):
return value
def _get_request_log_queryset(request, domain):
filter_from_date = request.GET.get("filter_from_date", _a_week_ago())
filter_to_date = request.GET.get("filter_to_date")
filter_payload = request.GET.get("filter_payload")
filter_url = request.GET.get("filter_url")
filter_status = request.GET.get("filter_status")
queryset = (RequestLog.objects
.filter(domain=domain)
.filter(timestamp__gte=filter_from_date))
if filter_to_date:
queryset = queryset.filter(timestamp__lte=filter_to_date)
if filter_payload:
queryset = queryset.filter(payload_id=filter_payload)
if filter_url:
queryset = queryset.filter(request_url__istartswith=filter_url)
if filter_status:
if re.match(r'^\d{3}$', filter_status):
queryset = queryset.filter(response_status=filter_status)
elif re.match(r'^\dxx$', filter_status.lower()):
# Filtering response status code by "2xx", "4xx", etc. will
# return all responses in that range
status_min = int(filter_status[0]) * 100
status_max = status_min + 99
queryset = (queryset.filter(response_status__gte=status_min)
.filter(response_status__lt=status_max))
elif filter_status.lower() == "none":
queryset = queryset.filter(response_status=None)
return queryset
def _a_week_ago() -> str:
last_week = datetime.today() - timedelta(days=7)
return last_week.strftime('%Y-%m-%d')
|
|
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForms
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionByTypeForm
from models import SessionBySpeakerForm
from models import SessionForms
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
DEFAULTS = {"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"]}
FIELDS = {'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees'}
OPERATORS = {'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
SessionKey=messages.StringField(1),
)
MEMCACHE_ANNOUNCEMENTS_KEY = 'announcements'
MEMCACHE_FEATUREDSPEAKER_KEY = 'featuredSpeaker'
@endpoints.api(name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Helper functions - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
# Get conferences with 5 seats or less available
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no almost sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser()
# Get conference, check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# Register
if reg:
# Check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# Check if seats available
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# Register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# Unregister
else:
# Check if user already registered
if wsck in prof.conferenceKeysToAttend:
# Unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# Write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# Convert Date to date string, just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# Convert t-shirt string to Enum, just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize,
getattr(prof, field.name)))
elif field.name == 'sessionKeysWishlist':
setattr(pf, field.name, str(getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _copySessionToForm(self, session):
"""Copy relevant fields from Conference to ConferenceForm."""
sf = SessionForm()
for field in sf.all_fields():
# Convert date and startTime to strings, just copy others
if field.name in ('date', 'startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
sf.check_initialized()
return sf
def _createConferenceObject(self, request):
"""Create or update Conference object,
returning ConferenceForm/request."""
# Preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# Check that a conference name was provided
if not request.name:
raise endpoints.BadRequestException("Conference 'name'"
"field required")
# Copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# Add default values for missing
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# Convert dates from strings to Date objects and
# set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10],
"%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10],
"%Y-%m-%d").date()
# Set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# Make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# Allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# Make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# Create Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
def _createSessionObject(self, request):
# Check that user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# Check that conference and session names are provided
if not request.name or not request.conference:
raise endpoints.BadRequestException("Session 'name' and"
"'conference' fields required")
# Copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
# Convert dates from strings to Date objects
if data['date']:
data['date'] = datetime.strptime(data['date'], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'],
"%H:%M").time()
# Make Key and get parent Conference
user_id = getUserId(user)
c_key = ndb.Key(Conference, request.conference)
conference = Conference.query(Conference.name == request.conference)
conference = conference.get()
# Check that conference exists
if not conference:
raise endpoints.BadRequestException("Conference does not exist.")
# Check that user created this conference
if conference.organizerUserId != user_id:
raise endpoints.BadRequestException("Only conference organizer"
"can create sessions.")
# Allocate new Session ID with Conference key as parent
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
# Make Session key from Session ID
s_key = ndb.Key(Session, s_id, parent=c_key)
data['key'] = s_key
# create Conference & return (modified) ConferenceForm
Session(**data).put()
# Add task to check if this speaker should be featured
taskqueue.add(params={'speaker': request.speaker,
'conference': request.conference},
url='/tasks/check_featured_speaker')
return request
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# Get user Profile
prof = self._getProfileFromUser()
# Process user-modifyable fields if save_request
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# Write profile to datastore
prof.put()
return self._copyProfileToForm(prof)
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name)
for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid"
"field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous
# filters and disallow the filter if inequality was performed
# on a different field before. track the field on which the
# inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is"
"allowed on only one"
"field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
def _getProfileFromUser(self):
"""Return user Profile from datastore or create new one."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
@staticmethod
def _updateFeaturedSpeaker(speaker, conference):
"""Update featured speaker in cache."""
# If speaker of created session already has a session at this
# conference, make them the featured speaker
sessions = Session.query(ndb.AND(Session.speaker == speaker,
Session.conference == conference))
if sessions.count() > 1:
memcache.set(MEMCACHE_FEATUREDSPEAKER_KEY, speaker)
return speaker
# - - - Endpoints - - - - - - - - - - - - - - - - - - -
@endpoints.method(SESSION_GET_REQUEST, SessionForm,
path='conference/{SessionKey}/addSessionToWishlist',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add session to user's wishlist."""
# Get Session from Key and check that it exists
s_key = request.SessionKey
session = ndb.Key(urlsafe=s_key).get()
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % s_key)
# Append Key to Profile's session wishlist
prof = self._getProfileFromUser()
prof.sessionKeysWishlist.append(s_key)
prof.put()
return self._copySessionToForm(session)
@endpoints.method(ConferenceForm, ConferenceForm,
path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
# open only to the organizer of the conference
@endpoints.method(SessionForm, SessionForm,
path='session',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session."""
return self._createSessionObject(request)
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
prof = conf.key.parent().get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# Make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# Make profile key
p_key = ndb.Key(Profile, getUserId(user))
# Create ancestor query for this user
conferences = Conference.query(ancestor=p_key)
# Get the user profile and display name
prof = self._getProfileFromUser()
displayName = getattr(prof, 'displayName')
return ConferenceForms(items=[self._copyConferenceToForm(conf,
displayName)
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='POST', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return sessions by conference."""
# Check that conference name was provided
if not request.websafeConferenceKey:
raise endpoints.BadRequestException("Must specify conference!")
# Get conference and check that it exists
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
# Get all Sessions for this Conference and check that there are > 0
sessions = Session.query(Session.conference == conf.name)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions for '
'this conference.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(SessionBySpeakerForm, SessionForms,
path='{speaker}/sessions',
http_method='POST',
name='getConferenceSessionsBySpeaker')
def getConferenceSessionsBySpeaker(self, request):
"""Return sessions by speaker."""
# Check that speaker was provided
if not request.speaker:
raise endpoints.BadRequestException("Must specify speaker!")
# Get all Sessions with this speaker and check that there are > 0
sessions = Session.query().filter(Session.speaker == request.speaker)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions with this speaker.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(SessionByTypeForm, SessionForms,
path='conference/{websafeConferenceKey}/{typeOfSession}'
'/sessions',
http_method='POST', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Return sessions by session type."""
# Make sure Conference and typeOfSession were specified
if not request.websafeConferenceKey or not request.typeOfSession:
raise endpoints.BadRequestException("Must specify conference "
"and session type!")
# Get conference and check that it exists
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
# Get all Sessions of typeOfSession and check that there are > 0
sessions = Session.query(Session.conference == conf.name)\
.filter(Session.typeOfSession == request.typeOfSession)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions of this type.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser()
conferenceKeys = prof.conferenceKeysToAttend
c_keys = [ndb.Key(urlsafe=c) for c in conferenceKeys]
conferences = ndb.get_multi(c_keys)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(message_types.VoidMessage, StringMessage,
path='featuredSpeaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return featured speaker from memcache."""
featuredSpeaker = memcache.get(MEMCACHE_FEATUREDSPEAKER_KEY)
if not featuredSpeaker:
featuredSpeaker = ""
return StringMessage(data=featuredSpeaker)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile',
http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(message_types.VoidMessage, SessionForms,
path='session/wishlist',
http_method='POST', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Return all sessions in user's wishlist."""
prof = self._getProfileFromUser()
sessionKeys = prof.sessionKeysWishlist
s_keys = [ndb.Key(urlsafe=s) for s in sessionKeys]
sessions = ndb.get_multi(s_keys)
if len(sessions) == 0:
raise endpoints.NotFoundException('No sessions in wishlist.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/morningsessions',
http_method='POST',
name='getMorningSessionsByConference')
def getMorningSessionsByConference(self, request):
"""Query for morning sessions at a given conference."""
check_time = datetime.strptime('12:00', "%H:%M").time()
sessions = Session.query(Session.startTime < check_time)
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/non_workshops_before_seven',
http_method='GET', name='getNonWorkshopsBeforeSeven')
def getNonWorkshopsBeforeSeven(self, request):
"""Query for non-workshop sessions before 7:00 P.M."""
check_time = datetime.strptime('19:00', "%H:%M").time()
sessions = Session.query(Session.typeOfSession != 'Workshop')
sessions = [session for session in sessions
if session.startTime < check_time]
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/open',
http_method='POST', name='getOpenConferences')
def getOpenConferences(self, request):
"""Query for conferences with available seats remaining."""
conferences = Conference.query(Conference.seatsAvailable > 0)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST', name='queryConferences')
def queryConferences(self, request):
"""Query for all conferences."""
conferences = self._getQuery(request)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile',
http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Save user profile."""
return self._doProfile(request)
# registers API
api = endpoints.api_server([ConferenceApi])
|
|
from threading import Thread
import pytest
from .common import * # NOQA
from rancher import ApiError
K8S_VERSION = os.environ.get('RANCHER_K8S_VERSION', "")
K8S_VERSION_UPGRADE = os.environ.get('RANCHER_K8S_VERSION_UPGRADE', "")
POD_SECURITY_POLICY_TEMPLATE = \
os.environ.get('RANCHER_POD_SECURITY_POLICY_TEMPLATE',
"restricted")
DO_ACCESSKEY = os.environ.get('DO_ACCESSKEY', "None")
AZURE_SUBSCRIPTION_ID = os.environ.get("AZURE_SUBSCRIPTION_ID")
AZURE_CLIENT_ID = os.environ.get("AZURE_CLIENT_ID")
AZURE_CLIENT_SECRET = os.environ.get("AZURE_CLIENT_SECRET")
AZURE_TENANT_ID = os.environ.get("AZURE_TENANT_ID")
worker_count = int(os.environ.get('RANCHER_STRESS_TEST_WORKER_COUNT', 1))
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "testcustom")
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
rke_config = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannel_backend_type": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_windows = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network": {
"mtu": 0,
"plugin": "flannel",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan",
"flannel_backend_port": "4789",
"flannel_backend_vni": "4096"
}
},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_windows_host_gw = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network": {
"mtu": 0,
"plugin": "flannel",
"type": "networkConfig",
"options": {
"flannel_backend_type": "host-gw"
}
},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_cis_1_4 = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannel_backend_type": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService",
"gid": 1001,
"uid": 1001},
"kubeApi": {
"alwaysPullImages": True,
"auditLog":
{"enabled": True},
"eventRateLimit":
{"enabled": True},
"extraArgs":
{"anonymous-auth": False,
"enable-admission-plugins": "ServiceAccount,"
"NamespaceLifecycle,"
"LimitRanger,"
"PersistentVolumeLabel,"
"DefaultStorageClass,"
"ResourceQuota,"
"DefaultTolerationSeconds,"
"AlwaysPullImages,"
"DenyEscalatingExec,"
"NodeRestriction,"
"PodSecurityPolicy,"
"MutatingAdmissionWebhook,"
"ValidatingAdmissionWebhook,"
"Priority,"
"TaintNodesByCondition,"
"PersistentVolumeClaimResize,"
"EventRateLimit",
"profiling": False,
"service-account-lookup": True,
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_"
"128_GCM_SHA256,"
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,"
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_ECDSA_WITH_AES_"
"256_GCM_SHA384,"
"TLS_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_RSA_WITH_AES_128_GCM_SHA256"},
"extraBinds": ["/opt/kubernetes:/opt/kubernetes"],
"podSecurityPolicy": True,
"secretsEncryptionConfig":
{"enabled": True},
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"},
"kubeController": {
"extraArgs": {
"address": "127.0.0.1",
"feature-gates": "RotateKubeletServerCertificate=true",
"profiling": "false",
"terminated-pod-gc-threshold": "1000"
},
},
"kubelet": {
"extraArgs": {
"protect-kernel-defaults": True,
"feature-gates": "RotateKubeletServerCertificate=true"
},
"generateServingCertificate": True
},
"scheduler": {
"extraArgs": {
"address": "127.0.0.1",
"profiling": False
}
}},
"sshAgentAuth": False}
rke_config_cis_1_5 = {
"addonJobTimeout": 30,
"ignoreDockerVersion": True,
"services": {
"etcd": {
"gid": 52034,
"uid": 52034,
"type": "etcdService"},
"kubeApi": {
"podSecurityPolicy": True,
"secretsEncryptionConfig":
{"enabled": True},
"auditLog":
{"enabled": True},
"eventRateLimit":
{"enabled": True},
"type": "kubeAPIService"},
"kubeController": {
"extraArgs": {
"feature-gates": "RotateKubeletServerCertificate=true",
},
},
"scheduler": {
"image": "",
"extraArgs": {},
"extraBinds": [],
"extraEnv": []
},
"kubelet": {
"generateServingCertificate": True,
"extraArgs": {
"feature-gates": "RotateKubeletServerCertificate=true",
"protect-kernel-defaults": True,
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_"
"128_GCM_SHA256,"
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,"
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_ECDSA_WITH_AES_"
"256_GCM_SHA384,"
"TLS_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_RSA_WITH_AES_128_GCM_SHA256"
},
"extraBinds": [],
"extraEnv": [],
"clusterDomain": "",
"infraContainerImage": "",
"clusterDnsServer": "",
"failSwapOn": False
},
},
"network":
{"plugin": "",
"options": {},
"mtu": 0,
"nodeSelector": {}},
"authentication": {
"strategy": "",
"sans": [],
"webhook": None,
},
"sshAgentAuth": False,
"windowsPreferredCluster": False
}
if K8S_VERSION != "":
rke_config["kubernetesVersion"] = K8S_VERSION
rke_config_cis_1_4["kubernetesVersion"] = K8S_VERSION
rke_config_cis_1_5["kubernetesVersion"] = K8S_VERSION
rke_config_windows_host_gw_aws_provider = rke_config_windows_host_gw.copy()
rke_config_windows_host_gw_aws_provider["cloudProvider"] = {"name": "aws",
"type": "cloudProvider",
"awsCloudProvider":
{"type": "awsCloudProvider"}}
rke_config_aws_provider = rke_config.copy()
rke_config_aws_provider["cloudProvider"] = {"name": "aws",
"type": "cloudProvider",
"awsCloudProvider":
{"type": "awsCloudProvider"}}
rke_config_aws_provider_2 = rke_config.copy()
rke_config_aws_provider_2["cloudProvider"] = {"name": "aws",
"type": "cloudProvider"}
rke_config_azure_provider = rke_config.copy()
rke_config_azure_provider["cloudProvider"] = {
"name": "azure",
"azureCloudProvider": {
"aadClientId": AZURE_CLIENT_ID,
"aadClientSecret": AZURE_CLIENT_SECRET,
"subscriptionId": AZURE_SUBSCRIPTION_ID,
"tenantId": AZURE_TENANT_ID}}
if_stress_enabled = pytest.mark.skipif(
not os.environ.get('RANCHER_STRESS_TEST_WORKER_COUNT'),
reason='Stress test not enabled')
if_test_edit_cluster = pytest.mark.skipif(
CLUSTER_NAME == "",
reason='Edit cluster tests not enabled')
def test_cis_complaint():
# rke_config_cis
node_roles = [
["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]
]
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
rke_config_cis = get_cis_rke_config()
client = get_admin_client()
cluster = client.create_cluster(
name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config_cis,
defaultPodSecurityPolicyTemplateId=POD_SECURITY_POLICY_TEMPLATE)
assert cluster.state == "provisioning"
configure_cis_requirements(aws_nodes,
CIS_SCAN_PROFILE,
node_roles,
client,
cluster
)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_az_host_1(node_template_az):
validate_rke_dm_host_1(node_template_az, rke_config)
def test_rke_az_host_2(node_template_az):
validate_rke_dm_host_2(node_template_az, rke_config)
def test_rke_az_host_3(node_template_az):
validate_rke_dm_host_3(node_template_az, rke_config)
def test_rke_az_host_4(node_template_az):
validate_rke_dm_host_4(node_template_az, rke_config)
def test_rke_az_host_with_provider_1(node_template_az):
validate_rke_dm_host_1(node_template_az, rke_config_azure_provider)
def test_rke_az_host_with_provider_2(node_template_az):
validate_rke_dm_host_2(node_template_az, rke_config_azure_provider)
def test_rke_do_host_1(node_template_do):
validate_rke_dm_host_1(node_template_do, rke_config)
def test_rke_do_host_2(node_template_do):
validate_rke_dm_host_2(node_template_do, rke_config)
def test_rke_do_host_3(node_template_do):
validate_rke_dm_host_3(node_template_do, rke_config)
def test_rke_do_host_4(node_template_do):
validate_rke_dm_host_4(node_template_do, rke_config)
def test_rke_linode_host_1(node_template_linode):
validate_rke_dm_host_1(node_template_linode, rke_config)
def test_rke_linode_host_2(node_template_linode):
validate_rke_dm_host_2(node_template_linode, rke_config)
def test_rke_linode_host_3(node_template_linode):
validate_rke_dm_host_3(node_template_linode, rke_config)
def test_rke_ec2_host_1(node_template_ec2):
validate_rke_dm_host_1(node_template_ec2, rke_config)
def test_rke_ec2_host_2(node_template_ec2):
validate_rke_dm_host_2(node_template_ec2, rke_config)
def test_rke_ec2_host_3(node_template_ec2):
validate_rke_dm_host_3(node_template_ec2, rke_config)
def test_rke_ec2_host_with_aws_provider_1(node_template_ec2_with_provider):
validate_rke_dm_host_1(node_template_ec2_with_provider,
rke_config_aws_provider)
def test_rke_ec2_host_with_aws_provider_2(node_template_ec2_with_provider):
validate_rke_dm_host_2(node_template_ec2_with_provider,
rke_config_aws_provider)
def test_rke_ec2_host_with_aws_provider_3(node_template_ec2_with_provider):
validate_rke_dm_host_1(node_template_ec2_with_provider,
rke_config_aws_provider_2)
def test_rke_ec2_host_4(node_template_ec2):
validate_rke_dm_host_4(node_template_ec2, rke_config)
def test_rke_custom_host_1():
node_roles = [["worker", "controlplane", "etcd"]]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_2():
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_3():
node_roles = [
["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]
]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_4():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
8, random_test_name(HOST_NAME))
node_roles = [
{"roles": ["controlplane"],
"nodes":[aws_nodes[0], aws_nodes[1]]},
{"roles": ["etcd"],
"nodes": [aws_nodes[2], aws_nodes[3], aws_nodes[4]]},
{"roles": ["worker"],
"nodes": [aws_nodes[5], aws_nodes[6], aws_nodes[7]]}
]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
delay = 120
host_threads = []
for node_role in node_roles:
host_thread = Thread(target=register_host_after_delay,
args=(client, cluster, node_role, delay))
host_threads.append(host_thread)
host_thread.start()
time.sleep(30)
for host_thread in host_threads:
host_thread.join()
cluster = validate_cluster(client, cluster,
check_intermediate_state=False,
k8s_version=K8S_VERSION)
cluster_cleanup(client, cluster, aws_nodes)
@if_stress_enabled
def test_rke_custom_host_stress():
aws_nodes = AmazonWebServices().create_multiple_nodes(
worker_count + 4, random_test_name("teststress"))
node_roles = [["controlplane"], ["etcd"], ["etcd"], ["etcd"]]
worker_role = ["worker"]
for int in range(0, worker_count):
node_roles.append(worker_role)
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster(client, cluster,
check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_etcd_plane_changes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
7, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
etcd_nodes = get_role_nodes(cluster, "etcd")
assert len(etcd_nodes) == 1
# Add 1 more etcd node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 6)
validate_cluster(client, cluster, intermediate_state="updating")
# Add 1 more etcd node
aws_node = aws_nodes[6]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 7)
validate_cluster(client, cluster, intermediate_state="updating")
# Delete the first etcd node
client.delete(etcd_nodes[0])
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_etcd_plane_changes_1():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
7, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
etcd_nodes = get_role_nodes(cluster, "etcd")
assert len(etcd_nodes) == 1
# Add 2 more etcd node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
aws_node = aws_nodes[6]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 7)
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_control_plane_changes():
aws_nodes = \
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
6, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
control_nodes = get_role_nodes(cluster, "control")
assert len(control_nodes) == 1
# Add 1 more control node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 6)
validate_cluster(client, cluster, intermediate_state="updating")
# Delete the first control node
client.delete(control_nodes[0])
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_worker_plane_changes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
4, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 3):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
worker_nodes = get_role_nodes(cluster, "worker")
assert len(worker_nodes) == 1
# Add 1 more worker node
aws_node = aws_nodes[3]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["worker"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
# Delete the first worker node
client.delete(worker_nodes[0])
validate_cluster(client, cluster, check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_control_node_power_down():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
5, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 3):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
control_nodes = get_role_nodes(cluster, "control")
assert len(control_nodes) == 1
# Add 1 more control node
aws_node = aws_nodes[3]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
# Power Down the first control node
aws_control_node = aws_nodes[0]
AmazonWebServices().stop_node(aws_control_node, wait_for_stopped=True)
control_node = control_nodes[0]
wait_for_node_status(client, control_node, "unavailable")
validate_cluster(
client, cluster,
check_intermediate_state=False,
nodes_not_in_active_state=[control_node.requestedHostname])
# Add 1 more worker node
aws_node = aws_nodes[4]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["worker"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
@if_test_edit_cluster
def test_edit_cluster_k8s_version():
client = get_user_client()
clusters = client.list_cluster(name=evaluate_clustername()).data
assert len(clusters) == 1
cluster = clusters[0]
rke_config = cluster.rancherKubernetesEngineConfig
rke_updated_config = rke_config.copy()
rke_updated_config["kubernetesVersion"] = K8S_VERSION_UPGRADE
cluster = client.update(cluster,
name=cluster.name,
rancherKubernetesEngineConfig=rke_updated_config)
cluster = validate_cluster(client, cluster, intermediate_state="updating",
k8s_version=K8S_VERSION_UPGRADE)
def test_delete_cluster():
client = get_user_client()
cluster = get_cluster_by_name(client, CLUSTER_NAME)
delete_cluster(client, cluster)
def validate_rke_dm_host_1(node_template,
rancherKubernetesEngineConfig=rke_config,
attemptDelete=True):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
if attemptDelete:
cluster_cleanup(client, cluster)
else:
return cluster, node_pools
def validate_rke_dm_host_2(node_template,
rancherKubernetesEngineConfig=rke_config,
attemptDelete=True, clusterName=None):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"quantity": 1}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"etcd": True,
"quantity": 1}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"worker": True,
"quantity": 3}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig, clusterName)
if attemptDelete:
cluster_cleanup(client, cluster)
def validate_rke_dm_host_3(node_template,
rancherKubernetesEngineConfig=rke_config):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"quantity": 2}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"etcd": True,
"quantity": 3}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"worker": True,
"quantity": 3}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
cluster_cleanup(client, cluster)
def validate_rke_dm_host_4(node_template,
rancherKubernetesEngineConfig=rke_config):
client = get_user_client()
# Create cluster and add a node pool to this cluster
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
assert len(cluster.nodes()) == 1
node1 = cluster.nodes().data[0]
assert len(node_pools) == 1
node_pool = node_pools[0]
# Increase the scale of the node pool to 3
node_pool = client.update(node_pool, nodeTemplateId=node_template.id,
quantity=3)
cluster = validate_cluster(client, cluster, intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == 3
# Delete node1
node1 = client.delete(node1)
wait_for_node_to_be_deleted(client, node1)
cluster = validate_cluster(client, cluster, intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == 3
cluster_cleanup(client, cluster)
def create_and_validate_cluster(client, nodes,
rancherKubernetesEngineConfig=rke_config,
clusterName=None):
cluster = client.create_cluster(
name=clusterName
if clusterName is not None else evaluate_clustername(),
rancherKubernetesEngineConfig=rancherKubernetesEngineConfig)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster(client, cluster)
return cluster, node_pools
def random_node_name():
if not HOST_NAME or HOST_NAME == "testcustom":
return "testauto" + "-" + str(random_int(100000, 999999))
else:
return HOST_NAME + "-" + str(random_int(100000, 999999))
def evaluate_clustername():
if CLUSTER_NAME == "":
cluster_name = random_name()
else:
cluster_name = CLUSTER_NAME
return cluster_name
@pytest.fixture(scope='session')
def node_template_az():
client = get_user_client()
ec2_cloud_credential_config = {
"clientId": AZURE_CLIENT_ID,
"clientSecret": AZURE_CLIENT_SECRET,
"subscriptionId": AZURE_SUBSCRIPTION_ID
}
azure_cloud_credential = client.create_cloud_credential(
azurecredentialConfig=ec2_cloud_credential_config
)
azConfig = {
"availabilitySet": "docker-machine",
"customData": "",
"dns": "",
"dockerPort": "2376",
"environment": "AzurePublicCloud",
"image": "canonical:UbuntuServer:16.04.0-LTS:latest",
"location": "westus",
"noPublicIp": False,
"openPort": [
"6443/tcp",
"2379/tcp",
"2380/tcp",
"8472/udp",
"4789/udp",
"10256/tcp",
"10250/tcp",
"10251/tcp",
"10252/tcp",
"80/tcp",
"443/tcp",
"9999/tcp",
"8888/tcp",
"30456/tcp",
"30457/tcp",
"30458/tcp",
"30459/tcp",
"9001/tcp"
],
"privateIpAddress": "",
"resourceGroup": "docker-machine",
"size": "Standard_A2",
"sshUser": "docker-user",
"staticPublicIp": False,
"storageType": "Standard_LRS",
"subnet": "docker-machine",
"subnetPrefix": "192.168.0.0/16",
"usePrivateIp": False,
"vnet": "docker-machine-vnet"
}
node_template = client.create_node_template(
azureConfig=azConfig,
name=random_name(),
driver="azure",
cloudCredentialId=azure_cloud_credential.id,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_do():
client = get_user_client()
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": "2gb",
"image": "ubuntu-18-04-x64"},
name=random_name(),
driver="digitalocean",
cloudCredentialId=do_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_linode():
client = get_user_client()
linode_cloud_credential_config = {"token": LINODE_ACCESSKEY}
linode_cloud_credential = client.create_cloud_credential(
linodecredentialConfig=linode_cloud_credential_config
)
node_template = client.create_node_template(
linodeConfig={"authorizedUsers": "",
"createPrivateIp": False,
"dockerPort": "2376",
"image": "linode/ubuntu18.04",
"instanceType": "g6-standard-2",
"label": "",
"region": "us-west",
"sshPort": "22",
"sshUser": "",
"stackscript": "",
"stackscriptData": "",
"swapSize": "512",
"tags": "",
"uaPrefix": "Rancher"},
name=random_name(),
driver="linode",
cloudCredentialId=linode_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_ec2():
client = get_user_client()
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"instanceType": "t3.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_ec2_with_provider():
client = get_user_client()
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"instanceType": "t3a.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE,
"iamInstanceProfile": AWS_IAM_PROFILE
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
return node_template
def register_host_after_delay(client, cluster, node_role, delay):
aws_nodes = node_role["nodes"]
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(
client, cluster, node_role["roles"], aws_node)
aws_node.execute_command(docker_run_cmd)
time.sleep(delay)
def create_and_validate_custom_host(node_roles, random_cluster_name=False,
validate=True, version=K8S_VERSION):
client = get_user_client()
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
cluster, nodes = create_custom_host_from_nodes(aws_nodes, node_roles,
random_cluster_name,
version=version)
if validate:
cluster = validate_cluster(client, cluster,
check_intermediate_state=False,
k8s_version=version)
return cluster, nodes
def create_custom_host_from_nodes(nodes, node_roles,
random_cluster_name=False, windows=False,
windows_flannel_backend='vxlan',
version=K8S_VERSION):
client = get_user_client()
cluster_name = random_name() if random_cluster_name \
else evaluate_clustername()
if windows:
if windows_flannel_backend == "host-gw":
config = rke_config_windows_host_gw_aws_provider
else:
config = rke_config_windows
else:
config = rke_config
if version != "":
config["kubernetesVersion"] = version
cluster = client.create_cluster(name=cluster_name,
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=config,
windowsPreferedCluster=windows)
assert cluster.state == "provisioning"
i = 0
for aws_node in nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
print("Docker run command: " + docker_run_cmd)
for nr in node_roles[i]:
aws_node.roles.append(nr)
result = aws_node.execute_command(docker_run_cmd)
print(result)
i += 1
cluster = validate_cluster_state(client, cluster,
check_intermediate_state=False)
return cluster, nodes
def get_cis_rke_config(profile=CIS_SCAN_PROFILE):
rke_tmp_config = None
rke_config_dict = None
try:
rke_config_dict = {
'rke-cis-1.4': rke_config_cis_1_4,
'rke-cis-1.5': rke_config_cis_1_5
}
rke_tmp_config = rke_config_dict[profile]
except KeyError:
print('Invalid RKE CIS profile. Supported profiles: ')
for k in rke_config_dict.keys():
print("{0}".format(k))
else:
print('Valid RKE CIS Profile loaded: {0}'.format(profile))
return rke_tmp_config
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@rpc_common.client_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@rpc_common.client_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
@rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
self.db.instance_destroy(context, instance['uuid'])
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@rpc_common.client_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance:
image_id = instance['%s_id' % image_type]
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in a ClientException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise rpc_common.ClientException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
return self._object_dispatch(objclass, objmethod, context,
args, kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for field in objinst.fields:
if not objinst.obj_attr_is_set(field):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(field) or
oldobj[field] != objinst[field]):
updates[field] = objinst._attr_to_primitive(field)
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
RPC_API_NAMESPACE = 'compute_task'
RPC_API_VERSION = '1.6'
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, ConductorManager(),
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"))
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, request_spec, self.db)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties, instance)
host = hosts.pop(0)['host']
self.compute_rpcapi.unshelve_instance(context, instance, host,
image)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
|
|
'''
Created on Jan 1, 2014
@author: Chris
TODO:
Sanitize all GetValue inputs
(to check that there's actual data there.
'''
import wx
from abc import ABCMeta
from abc import abstractmethod
from gooey.gui import styling
EMPTY = ''
class BuildException(RuntimeError):
pass
class AbstractGuiComponent(object):
'''
Template pattern-y abstract class for the gui.
Children must all implement the BuildWidget and getValue
methods.
'''
__metaclass__ = ABCMeta
def __init__(self):
self._widget = None
self.msg = EMPTY
def Build(self, parent):
self._widget = self.BuildInputWidget(parent, self._action)
if self.HasHelpMsg(self._action):
self._msg = self.CreateHelpMsgWidget(parent, self._action)
else:
self._msg = None
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.CreateNameLabelWidget(parent, self._action))
sizer.AddSpacer(2)
if self._msg:
sizer.Add(self._msg, 0, wx.EXPAND)
sizer.AddSpacer(2)
else:
sizer.AddStretchSpacer(1)
sizer.AddStretchSpacer(1)
sizer.Add(self._widget, 0, wx.EXPAND)
return sizer
@abstractmethod
def BuildInputWidget(self, parent, action):
''' Must construct the main widget type for the Action '''
pass
def HasHelpMsg(self, action):
return action.help is not None
def CreateHelpMsgWidget(self, parent, action):
base_text = wx.StaticText(parent, label=action.help)
if self.HasNargs(action):
base_text.SetLabelText(base_text.GetLabelText() + self.CreateNargsMsg(action))
styling.MakeDarkGrey(base_text)
return base_text
def HasNargs(self, action):
return action.nargs is not None and action.nargs is not 0
def CreateNargsMsg(self, action):
if isinstance(action.nargs, int):
return '\n(Note: exactly {0} arguments are required)'.format(action.nargs)
elif action.nargs == '+':
return '\n(Note: at least 1 or more arguments are required)'
return ''
def CreateNameLabelWidget(self, parent, action):
label = str(action.dest).title()
if len(action.option_strings) > 1:
label += ' (%s)' % action.option_strings[0]
text = wx.StaticText(parent, label=label)
styling.MakeBold(text)
return text
def AssertInitialization(self, clsname):
if not self._widget:
raise BuildException('%s was not correctly initialized' % clsname)
def __str__(self):
return str(self._action)
@abstractmethod
def GetValue(self):
''' Returns the state of the given widget '''
pass
def Update(self, size):
'''
Manually word wraps the StaticText help objects which would
otherwise not wrap on resize
Content area is based on each grid having two equally sized
columns, where the content area is defined as 87% of the halved
windows width. The wiggle room is the distance +- 10% of the
content_area.
Wrap calculation is run only when the size of the help_msg
extends outside of the wiggle_room. This was done to avoid
the "flickering" that comes from constantly resizing a
StaticText object.
'''
if self._msg is None:
return
help_msg = self._msg
width, height = size
content_area = int((width / 2) * .87)
wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05))
if help_msg.Size[0] not in wiggle_room:
self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' '))
self._msg.Wrap(content_area)
class AbstractComponent(object):
'''
Template pattern-y abstract class for the gui.
Children must all implement the BuildWidget and getValue
methods.
'''
__metaclass__ = ABCMeta
def __init__(self):
self._widget = None
self.msg = EMPTY
def Build(self, parent):
self._widget = self.BuildInputWidget(parent, self._action)
if self.HasHelpMsg(self._action):
self._msg = self.CreateHelpMsgWidget(parent, self._action)
else:
self._msg = None
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.CreateNameLabelWidget(parent, self._action))
sizer.AddSpacer(2)
if self._msg:
sizer.Add(self._msg, 0, wx.EXPAND)
sizer.AddSpacer(2)
else:
sizer.AddStretchSpacer(1)
sizer.AddStretchSpacer(1)
sizer.Add(self._widget, 0, wx.EXPAND)
return sizer
@abstractmethod
def BuildInputWidget(self, parent, action):
''' Must construct the main widget type for the Action '''
pass
def HasHelpMsg(self, action):
return action.help is not None
def CreateHelpMsgWidget(self, parent, action):
base_text = wx.StaticText(parent, label=action.help)
if self.HasNargs(action):
base_text.SetLabelText(base_text.GetLabelText() + self.CreateNargsMsg(action))
styling.MakeDarkGrey(base_text)
return base_text
def HasNargs(self, action):
return action.nargs is not None and action.nargs is not 0
def CreateNargsMsg(self, action):
if isinstance(action.nargs, int):
return '\n(Note: exactly {} arguments are required)'.format(action.nargs)
elif action.nargs == '+':
return '\n(Note: at least 1 or more arguments are required)'
return ''
def CreateNameLabelWidget(self, parent, action):
label = str(action.dest).title()
if len(action.option_strings) > 1:
label += ' (%s)' % action.option_strings[0]
text = wx.StaticText(parent, label=label)
styling.MakeBold(text)
return text
def AssertInitialization(self, clsname):
if not self._widget:
raise BuildException('%s was not correctly initialized' % clsname)
def __str__(self):
return str(self._action)
@abstractmethod
def GetValue(self):
''' Returns the state of the given widget '''
pass
def Update(self, size):
'''
Manually word wraps the StaticText help objects which would
otherwise not wrap on resize
Content area is based on each grid having two equally sized
columns, where the content area is defined as 87% of the halved
windows width. The wiggle room is the distance +- 10% of the
content_area.
Wrap calculation is run only when the size of the help_msg
extends outside of the wiggle_room. This was done to avoid
the "flickering" that comes from constantly resizing a
StaticText object.
'''
if self._msg is None:
return
help_msg = self._msg
width, height = size
content_area = int((width / 2) * .87)
wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05))
if help_msg.Size[0] not in wiggle_room:
self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' '))
self._msg.Wrap(content_area)
class Positional(AbstractComponent):
"""
Represents a positional argument in a program
e.g.
mypyfile.py param1 <-- this guy
"""
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
def BuildInputWidget(self, parent, action):
return wx.TextCtrl(parent)
def GetValue(self):
'''
Positionals have no associated options_string,
so only the supplied arguments are returned.
The order is assumed to be the same as the order
of declaration in the client code
Returns
"argument_value"
'''
self.AssertInitialization('Positional')
if str(self._widget.GetValue()) == EMPTY:
return None
return self._widget.GetValue()
class Choice(AbstractComponent):
""" A dropdown box """
_DEFAULT_VALUE = 'Select Option'
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
def GetValue(self):
'''
Returns
"--option_name argument"
'''
self.AssertInitialization('Choice')
if self._widget.GetValue() == self._DEFAULT_VALUE:
return None
return ' '.join(
[self._action.option_strings[0] if self._action.option_strings else '', # get the verbose copy if available
self._widget.GetValue()])
def BuildInputWidget(self, parent, action):
return wx.ComboBox(
parent=parent,
id=-1,
value=self._DEFAULT_VALUE,
choices=action.choices,
style=wx.CB_DROPDOWN
)
class Optional(AbstractComponent):
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
def BuildInputWidget(self, parent, action):
return wx.TextCtrl(parent)
def GetValue(self):
'''
General options are key/value style pairs (conceptually).
Thus the name of the option, as well as the argument to it
are returned
e.g.
>>> myscript --outfile myfile.txt
returns
"--Option Value"
'''
self.AssertInitialization('Optional')
value = self._widget.GetValue()
if not value or len(value) <= 0:
return None
return ' '.join(
[self._action.option_strings[0], # get the verbose copy if available
value])
class Flag(AbstractComponent):
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
def Build(self, parent):
self._widget = self.BuildInputWidget(parent, self._action)
self._msg = (self.CreateHelpMsgWidget(parent, self._action)
if self.HasHelpMsg(self._action)
else None)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.CreateNameLabelWidget(parent, self._action))
sizer.AddSpacer(6)
if self.HasNargs(self._action):
sizer.Add(self.CreateNargsMsg(parent, self._action))
if self._msg:
hsizer = self.buildHorizonalMsgSizer(parent)
sizer.Add(hsizer, 1, wx.EXPAND)
else:
sizer.AddStretchSpacer(1)
sizer.Add(self._widget, 0, wx.EXPAND)
return sizer
def BuildInputWidget(self, parent, action):
return wx.CheckBox(parent, -1, label='')
def buildHorizonalMsgSizer(self, panel):
if not self._msg:
return None
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self._widget, 0)
sizer.AddSpacer(6)
sizer.Add(self._msg, 1, wx.EXPAND)
return sizer
def GetValue(self):
'''
Flag options have no param associated with them.
Thus we only need the name of the option.
e.g
>>> Python -v myscript
returns
Options name for argument (-v)
'''
if not self._widget.GetValue() or len(self._widget.GetValue()) <= 0:
return None
else:
return self._action.option_strings[0]
def Update(self, size):
'''
Custom wrapper calculator to account for the
increased size of the _msg widget after being
inlined with the wx.CheckBox
'''
if self._msg is None:
return
help_msg = self._msg
width, height = size
content_area = int((width / 3) * .70)
wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05))
if help_msg.Size[0] not in wiggle_room:
self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' '))
self._msg.Wrap(content_area)
class Counter(AbstractComponent):
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
def BuildInputWidget(self, parent, action):
levels = [str(x) for x in range(1, 7)]
return wx.ComboBox(
parent=parent,
id=-1,
value='',
choices=levels,
style=wx.CB_DROPDOWN
)
def GetValue(self):
'''
NOTE: Added on plane. Cannot remember exact implementation
of counter objects. I believe that they count sequentail
pairings of options
e.g.
-vvvvv
But I'm not sure. That's what I'm going with for now.
Returns
str(action.options_string[0]) * DropDown Value
'''
dropdown_value = self._widget.GetValue()
if not str(dropdown_value).isdigit():
return None
arg = str(self._action.option_strings[0]).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args
class Group(AbstractComponent):
def __init__(self, action):
self._action = action
self._widget = None
self.contents = None
if __name__ == '__main__':
pass
|
|
import torch
import torch.nn as nn
import numpy as np
import pysurvival.utils.optimization as opt
# --------------------------- Activation Functions --------------------------- #
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class Gaussian(nn.Module):
def forward(self, x):
return torch.exp(- x*x/2.)
class Atan(nn.Module):
def forward(self, x):
return torch.atan(x)
class InverseSqrt(nn.Module):
def forward(self, x, alpha=1.):
return x/torch.sqrt(1.+alpha*x*x)
class Sinc(nn.Module):
def forward(self, x, epsilon=1e-9):
return torch.sin(x+epsilon)/(x+epsilon)
class SinReLU(nn.Module):
def forward(self, x):
return torch.sin(x)+torch.relu(x)
class CosReLU(nn.Module):
def forward(self, x):
return torch.cos(x)+torch.relu(x)
class LeCunTanh(nn.Module):
def forward(self, x):
return 1.7159*torch.tanh(2./3*x)
class LogLog(nn.Module):
def forward(self, x):
return 1.-torch.exp(-torch.exp(x))
class BipolarSigmoid(nn.Module):
def forward(self, x):
return (1.-torch.exp(-x))/(1.+torch.exp(-x))
class BentIdentity(nn.Module):
def forward(self, x, alpha=1.):
return x + (torch.sqrt(1.+ x*x)- 1.)/2.
class Identity(nn.Module):
def forward(self, x):
return x
class Softmax(nn.Module):
def forward(self, x):
y = torch.exp(x)
return y/torch.sum(y, dim=0)
def activation_function(activation, alpha=1., return_text=False):
""" Returns the activation function object used by the network """
if activation.lower() == 'atan':
if return_text :
return 'Atan'
else:
return Atan()
elif activation.lower().startswith('bent'):
if return_text :
return 'BentIdentity'
else:
return BentIdentity()
elif activation.lower().startswith('bipolar'):
if return_text :
return 'BipolarSigmoid'
else:
return BipolarSigmoid()
elif activation.lower().startswith('cosrelu'):
if return_text :
return 'CosReLU'
else:
return CosReLU()
elif activation.lower() == 'elu':
if return_text :
return 'ELU'
else:
return nn.ELU(alpha=alpha)
elif activation.lower() == 'gaussian':
if return_text :
return 'Gaussian'
else:
return Gaussian()
elif activation.lower() == 'hardtanh':
if return_text :
return 'Hardtanh'
else:
return nn.Hardtanh()
elif activation.lower() == 'identity':
if return_text :
return 'Identity'
else:
return Identity()
elif activation.lower().startswith('inverse'):
if return_text :
return 'InverseSqrt'
else:
return InverseSqrt()
elif activation.lower() == 'leakyrelu':
if return_text :
return 'LeakyReLU'
else:
return nn.LeakyReLU()
elif activation.lower().startswith('lecun'):
if return_text :
return 'LeCunTanh'
else:
return LeCunTanh()
elif activation.lower() == 'loglog':
if return_text :
return 'LogLog'
else:
return LogLog()
elif activation.lower() == 'logsigmoid':
if return_text :
return 'LogSigmoid'
else:
return nn.LogSigmoid()
elif activation.lower() == 'relu':
if return_text :
return 'ReLU'
else:
return nn.ReLU()
elif activation.lower() == 'selu':
if return_text :
return 'SELU'
else:
return nn.SELU()
elif activation.lower() == 'sigmoid':
if return_text :
return 'Sigmoid'
else:
return nn.Sigmoid()
elif activation.lower() == 'sinc':
if return_text :
return 'Sinc'
else:
return Sinc()
elif activation.lower().startswith('sinrelu'):
if return_text :
return 'SinReLU'
else:
return SinReLU()
elif activation.lower() == 'softmax':
if return_text :
return 'Softmax'
else:
return Softmax()
elif activation.lower() == 'softplus':
if return_text :
return 'Softplus'
else:
return nn.Softplus()
elif activation.lower() == 'softsign':
if return_text :
return 'Softsign'
else:
return nn.Softsign()
elif activation.lower() == 'swish':
if return_text :
return 'Swish'
else:
return Swish()
elif activation.lower() == 'tanh':
if return_text :
return 'Tanh'
else:
return nn.Tanh()
else:
error = "{} function isn't implemented".format(activation)
raise NotImplementedError(error)
def check_mlp_structure(structure):
""" Checking that the given MLP structure is valid """
# Checking if structure is dict
if isinstance(structure, dict):
structure = [structure]
# Checking the keys
results = []
for inner_structure in structure:
# Checking the validity of activation
activation = inner_structure.get('activation')
if activation is None:
error = 'An activation function needs to be provided '
error +='using the key "activation"'
raise KeyError(error)
else:
activation = activation_function(activation, return_text=True)
inner_structure['activation'] = activation
# Checking the validity of num_units
num_units = inner_structure.get('num_units')
if num_units is None:
error = 'The number of hidden units needs to be provided '
error +='using the key "num_units"'
raise KeyError(error)
else:
if not isinstance(num_units, int):
error = 'num_units in {} needs to be a integer'
error = error.format(inner_structure)
raise TypeError(error)
else:
inner_structure['num_units'] = num_units
results.append(inner_structure)
return results
# ----------------------------- MLP Object ----------------------------- #
class NeuralNet(nn.Module):
""" Defines a Multilayer Perceptron (MLP) that consists in
* an input layer,
* at least one fully connected neural layer (or hidden layer)
* and an output layer
Parameters:
-----------
* input_size: int
Dimension of the input tensor
* output_size: int
Size of the output layer
* structure: None or list of dictionnaries
Provides the structure of the MLP built within the N-MTLR
If None, then the model becomes the Linear MTLR
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
Here are the possible activation functions:
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
* init_method: str
Defines the type of initializer to use
* dropout: double (default=None)
Randomly sets a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.
* batch_normalization: bool (default=True)
Applying Batch Normalization or not
* bn_and_droupout: bool (default=False)
Applying Batch Normalization and Dropout at the same time
Note about Dropout and Batch Normalization:
------------------------------------------
As a rule, the dropout Layer and Batch Normalization (BN) shouldn't be used
together according to : https://arxiv.org/pdf/1801.05134.pdf
* Dropout is used to Prevent Neural Networks from Overfitting
should appears after the activation according to :
https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
* Batch Normalization can Accelerate Deep Network Training by Reducing
Internal Covariate Shift BN should appear after Fully connected but
before activation according to : https://arxiv.org/pdf/1502.03167.pdf
"""
def __init__(self, input_size, output_size, structure, init_method
, dropout=None, batch_normalization = True, bn_and_droupout = False):
# Initializing the model
super(NeuralNet, self).__init__()
# Initializing the list of layers
self.layers = []
if structure is not None and structure != []:
# Checking if structure is dict
if isinstance(structure, dict):
structure = [structure]
# Building the hidden layers
for hidden in structure:
# Extracting the hidden layer parameters
hidden_size = int(hidden.get('num_units'))
activation = hidden.get('activation')
alpha = hidden.get('alpha')
# Fully connected layer
fully_conn = nn.Linear(input_size, hidden_size)
fully_conn.weight = opt.initialization(init_method,
fully_conn.weight)
fully_conn.bias = opt.initialization(init_method,
fully_conn.bias)
self.layers.append( fully_conn )
if not bn_and_droupout:
# Batch Normalization
if batch_normalization:
self.layers.append( torch.nn.BatchNorm1d(hidden_size) )
# Activation
self.layers.append( activation_function(activation,
alpha=alpha) )
# Dropout
if (dropout is not None or 0. < dropout <= 1.) and \
not batch_normalization :
self.layers.append( torch.nn.Dropout(dropout) )
else:
# Batch Normalization
if batch_normalization:
self.layers.append( torch.nn.BatchNorm1d(hidden_size) )
# Activation
self.layers.append( activation_function(activation,
alpha=alpha) )
# Dropout
if (dropout is not None or 0. < dropout <= 1.) :
self.layers.append( torch.nn.Dropout(dropout) )
# Next layer
input_size = hidden_size
# Fully connected last layer
fully_conn = nn.Linear(input_size, output_size)
fully_conn.weight = opt.initialization(init_method, fully_conn.weight)
fully_conn.bias = opt.initialization(init_method, fully_conn.bias)
self.layers.append( fully_conn )
# Putting the model together
self.model = nn.Sequential(*self.layers).train()
def forward(self, x):
out = self.model(x)
return out
class ParametricNet(torch.nn.Module):
""" Underlying Pytorch model powering the Parametric models """
def __init__(self, num_features, init_method, init_alpha=1.,
is_beta_used = True):
super(ParametricNet, self).__init__()
# weights
W = torch.randn(num_features, 1)
self.W = opt.initialization(init_method, W)
one = torch.FloatTensor(np.array([1]))/init_alpha
self.alpha = torch.nn.Parameter( one )
self.is_beta_used = is_beta_used
if self.is_beta_used:
one = torch.FloatTensor(np.array([1.001]))/init_alpha
self.beta = torch.nn.Parameter( one )
def forward(self, x):
score = self.alpha*torch.exp(torch.matmul(x, self.W))
return score
|
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("Ah+LCAAAAAAABACT7+ZgAAEWhre3D+ZddhBguBBfOPeJkPnZXcfWbtISE38nU71hEtcKnYVKa7iKXh3fvM7LlU+TsVPqyvclFY+/VojOV9sslb/Z/XTqt3ffTi9/zR+/"
+ "efv9vHX5m48W/Pv37Fdx2FVJsPkbrrMwjIJRMAqGPKjYaAQkE0p8/xnWh55/vpGndu6+848zXzxX/LI89MVnnf6Fr06vus+38faG4mz5sIdJU+R5Gd7ckBPbpp6ff23y"
+ "ar2Q2Ak3GBNcfOtmhi/0flppnn05MVK01ir/0vIOxZK3V50fpOyeybc5VTD46b/imumnfhy9O4OfkcFk9YWn6zf/+fHjydn+naXxIaf1przVkU2yztmznKtmb73gV0uG"
+ "nucH/n6rLjJj+LP52uy3h7Oz3bv1WuxCNmrsDvDVP3TZ/J/mf26GBJ/9657NP3P1bGM3A8P+pZL3vKV/3XmU1307dd+MNeYb1zjHbyuN1Xl8MWV35qOXORLJtfY8nw8f"
+ "rYzm7y3SNLtnenmzyZzlro/nLdtp8UmZ+UD3NRmhzzeMy6+3W+md0ltSc791c2Tg9cjdPu633zJ8uJ8R9LBObtfkx1cDdq93kp+Vc8tns/rtJ4wN6Zfe8LDPXie9Uobh"
+ "X9v7FSrdxQ77+RkAAhzLy4wFAAA=")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<2000 and y<516):
return g[y*2000 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<2000 and y<516):
g[y*2000 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,0,2000)
gw(2,0,500)
gw(0,0,1000000)
gw(3,0,2)
gw(0,3,32)
gw(1,3,32)
return 1
def _1():
gw(tm(gr(3,0),gr(1,0)),(td(gr(3,0),gr(1,0)))+3,88)
sa(gr(3,0)+gr(3,0))
sa((1)if((gr(3,0)+gr(3,0))<gr(0,0))else(0))
return 2
def _2():
return (23)if(sp()!=0)else(3)
def _3():
sp();
return 4
def _4():
global t0
sa(gr(3,0)+1)
sa(gr(3,0)+1)
gw(3,0,gr(3,0)+1)
sa(tm(sp(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+3)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return 5
def _5():
global t0
return (6)if((t0)!=0)else(4)
def _6():
return (1)if(gr(0,0)>gr(3,0))else(7)
def _7():
global t0
gw(9,0,0)
gw(3,0,2)
t0=0
return 8
def _8():
sa(gr(3,0))
return (9)if(gr(tm(gr(3,0),gr(1,0)),(td(gr(3,0),gr(1,0)))+3)!=88)else(11)
def _9():
global t0
t0=gr(3,0)+1
gw(3,0,gr(3,0)+1)
t0=t0-gr(0,0)
sp();
return (8)if((t0)!=0)else(10)
def _10():
sys.stdout.write(" =")
sys.stdout.flush()
sys.stdout.write(str(gr(9,0))+" ")
sys.stdout.flush()
return 24
def _11():
gw(5,0,1)
sa(sr());
sa(1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/10);
sa(sr());
return 12
def _12():
return (19)if(sp()!=0)else(13)
def _13():
sp();
gw(6,0,sp())
sa(sr());
gw(7,0,sp())
return 14
def _14():
global t0
sa(sr());
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+3)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return (15)if((t0)!=0)else(18)
def _15():
global t0
t0=sr()/10
sa(sp()%10);
sa(sp()*gr(6,0))
sa(t0)
t0=gr(5,0)-1
gw(5,0,gr(5,0)-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+sp());
return (14)if((t0)!=0)else(16)
def _16():
sys.stdout.write(str(gr(7,0))+" ")
sys.stdout.flush()
sys.stdout.write(chr(10))
sys.stdout.flush()
gw(9,0,gr(9,0)+1)
sa(0)
return 17
def _17():
sp();
return 18
def _18():
sp();
sa(0)
return 9
def _19():
return (21)if((sr()%2)!=0)else(20)
def _20():
sp();
return 17
def _21():
return (22)if((sr()%5)!=0)else(20)
def _22():
gw(5,0,gr(5,0)+1)
sa(sp()/10);
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()*10)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return 12
def _23():
sa(sr());
sa(32)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()+gr(3,0))
sa((1)if(sr()<gr(0,0))else(0))
return 2
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23]
c=0
while c<24:
c=m[c]()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return unicode(self)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
message = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
message = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
message = _("Invalid ID received %(id)s.")
class InvalidPeriodicTaskArg(Invalid):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(NovaException):
message = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
message = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(NovaException):
message = _("No free port available for instance %(instance)s.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
message = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
message = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
message = _("Cannot disassociate auto assigined floating ip")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
message = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
message = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
message = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
message = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
message = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
message = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
message = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
message = _("No cells available matching scheduling criteria.")
class CellError(NovaException):
message = _("Exception received during cell processing: %(exc_name)s.")
class InstanceUnknownCell(NotFound):
message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerCostFunctionNotFound(NotFound):
message = _("Scheduler cost function %(cost_fn_str)s could"
" not be found.")
class SchedulerWeightFlagNotFound(NotFound):
message = _("Scheduler weight flag not found: %(flag_name)s")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MigrationPreCheckError(MigrationError):
message = _("Migration pre-check error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
message = _("Resize error: %(reason)s")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
message = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
message = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
message = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
message = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
message = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
message = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
message = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
message = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
message = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
message = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
message = _("The service from servicegroup driver %(driver) is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
message = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
message = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
message = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
message = _("Invalid Base 64 data for file %(path)s")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.