CombinedText stringlengths 4 3.42M |
|---|
# -*- coding: utf-8 -*-
# Copyright 2011 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as mod_logging
import math as mod_math
from . import utils as mod_utils
log = mod_logging.getLogger(__name__)
# Generic geo related function and class(es)
# latitude/longitude in GPX files is always in WGS84 datum
# WGS84 defined the Earth semi-major axis with 6378.137 km
EARTH_RADIUS = 6378.137 * 1000
# One degree in meters:
ONE_DEGREE = (2*mod_math.pi*6378.137) / 360 # ==> 111.319 km
def to_rad(x):
return x / 180. * mod_math.pi
def haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2):
"""
Haversine distance between two points, expressed in meters.
Implemented from http://www.movable-type.co.uk/scripts/latlong.html
"""
d_lat = to_rad(latitude_1 - latitude_2)
d_lon = to_rad(longitude_1 - longitude_2)
lat1 = to_rad(latitude_1)
lat2 = to_rad(latitude_2)
a = mod_math.sin(d_lat/2) * mod_math.sin(d_lat/2) + \
mod_math.sin(d_lon/2) * mod_math.sin(d_lon/2) * mod_math.cos(lat1) * mod_math.cos(lat2)
c = 2 * mod_math.atan2(mod_math.sqrt(a), mod_math.sqrt(1-a))
d = EARTH_RADIUS * c
return d
def length(locations=None, _3d=None):
locations = locations or []
if not locations:
return 0
length = 0
for i in range(len(locations)):
if i > 0:
previous_location = locations[i - 1]
location = locations[i]
if _3d:
d = location.distance_3d(previous_location)
else:
d = location.distance_2d(previous_location)
if d != 0 and not d:
pass
else:
length += d
return length
def length_2d(locations=None):
""" 2-dimensional length (meters) of locations (only latitude and longitude, no elevation). """
locations = locations or []
return length(locations, False)
def length_3d(locations=None):
""" 3-dimensional length (meters) of locations (it uses latitude, longitude, and elevation). """
locations = locations or []
return length(locations, True)
def calculate_max_speed(speeds_and_distances):
"""
Compute average distance and standard deviation for distance. Extremes
in distances are usually extremes in speeds, so we will ignore them,
here.
speeds_and_distances must be a list containing pairs of (speed, distance)
for every point in a track segment.
"""
assert speeds_and_distances
if len(speeds_and_distances) > 0:
assert len(speeds_and_distances[0]) == 2
# ...
assert len(speeds_and_distances[-1]) == 2
size = float(len(speeds_and_distances))
if size < 20:
log.debug('Segment too small to compute speed, size=%s', size)
return None
distances = list(map(lambda x: x[1], speeds_and_distances))
average_distance = sum(distances) / float(size)
standard_distance_deviation = mod_math.sqrt(sum(map(lambda distance: (distance-average_distance)**2, distances))/size)
# Ignore items where the distance is too big:
filtered_speeds_and_distances = filter(lambda speed_and_distance: abs(speed_and_distance[1] - average_distance) <= standard_distance_deviation * 1.5, speeds_and_distances)
# sort by speed:
speeds = list(map(lambda speed_and_distance: speed_and_distance[0], filtered_speeds_and_distances))
if not isinstance(speeds, list): # python3
speeds = list(speeds)
if not speeds:
return None
speeds.sort()
# Even here there may be some extremes => ignore the last 5%:
index = int(len(speeds) * 0.95)
if index >= len(speeds):
index = -1
return speeds[index]
def calculate_uphill_downhill(elevations):
if not elevations:
return 0, 0
size = len(elevations)
def __filter(n):
current_ele = elevations[n]
if current_ele is None:
return False
if 0 < n < size - 1:
previous_ele = elevations[n-1]
next_ele = elevations[n+1]
if previous_ele is not None and current_ele is not None and next_ele is not None:
return previous_ele*.3 + current_ele*.4 + next_ele*.3
return current_ele
smoothed_elevations = list(map(__filter, range(size)))
uphill, downhill = 0., 0.
for n, elevation in enumerate(smoothed_elevations):
if n > 0 and elevation is not None and smoothed_elevations is not None:
d = elevation - smoothed_elevations[n-1]
if d > 0:
uphill += d
else:
downhill -= d
return uphill, downhill
def distance(latitude_1, longitude_1, elevation_1, latitude_2, longitude_2, elevation_2,
haversine=None):
"""
Distance between two points. If elevation is None compute a 2d distance
if haversine==True -- haversine will be used for every computations,
otherwise...
Haversine distance will be used for distant points where elevation makes a
small difference, so it is ignored. That's because haversine is 5-6 times
slower than the dummy distance algorithm (which is OK for most GPS tracks).
"""
# If points too distant -- compute haversine distance:
if haversine or (abs(latitude_1 - latitude_2) > .2 or abs(longitude_1 - longitude_2) > .2):
return haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2)
coef = mod_math.cos(latitude_1 / 180. * mod_math.pi)
x = latitude_1 - latitude_2
y = (longitude_1 - longitude_2) * coef
distance_2d = mod_math.sqrt(x * x + y * y) * ONE_DEGREE
if elevation_1 is None or elevation_2 is None or elevation_1 == elevation_2:
return distance_2d
return mod_math.sqrt(distance_2d ** 2 + (elevation_1 - elevation_2) ** 2)
def elevation_angle(location1, location2, radians=False):
""" Uphill/downhill angle between two locations. """
if location1.elevation is None or location2.elevation is None:
return None
b = float(location2.elevation - location1.elevation)
a = location2.distance_2d(location1)
if a == 0:
return 0
angle = mod_math.atan(b / a)
if radians:
return angle
return 180 * angle / mod_math.pi
def distance_from_line(point, line_point_1, line_point_2):
""" Distance of point from a line given with two points. """
assert point, point
assert line_point_1, line_point_1
assert line_point_2, line_point_2
a = line_point_1.distance_2d(line_point_2)
if a == 0:
return line_point_1.distance_2d(point)
b = line_point_1.distance_2d(point)
c = line_point_2.distance_2d(point)
s = (a + b + c) / 2.
return 2. * mod_math.sqrt(abs(s * (s - a) * (s - b) * (s - c))) / a
def get_line_equation_coefficients(location1, location2):
"""
Get line equation coefficients for:
latitude * a + longitude * b + c = 0
This is a normal cartesian line (not spherical!)
"""
if location1.longitude == location2.longitude:
# Vertical line:
return float(0), float(1), float(-location1.longitude)
else:
a = float(location1.latitude - location2.latitude) / (location1.longitude - location2.longitude)
b = location1.latitude - location1.longitude * a
return float(1), float(-a), float(-b)
def simplify_polyline(points, max_distance):
"""Does Ramer-Douglas-Peucker algorithm for simplification of polyline """
if len(points) < 3:
return points
begin, end = points[0], points[-1]
# Use a "normal" line just to detect the most distant point (not its real distance)
# this is because this is faster to compute than calling distance_from_line() for
# every point.
#
# This is an approximation and may have some errors near the poles and if
# the points are too distant, but it should be good enough for most use
# cases...
a, b, c = get_line_equation_coefficients(begin, end)
# Initialize to safe values
tmp_max_distance = 0
tmp_max_distance_position = 1
# Check distance of all points between begin and end, exclusive
for point_no in range(1,len(points)-1):
point = points[point_no]
d = abs(a * point.latitude + b * point.longitude + c)
if d > tmp_max_distance:
tmp_max_distance = d
tmp_max_distance_position = point_no
# Now that we have the most distance point, compute its real distance:
real_max_distance = distance_from_line(points[tmp_max_distance_position], begin, end)
# If furthest point is less than max_distance, remove all points between begin and end
if real_max_distance < max_distance:
return [begin, end]
# If furthest point is more than max_distance, use it as anchor and run
# function again using (begin to anchor) and (anchor to end), remove extra anchor
return (simplify_polyline(points[:tmp_max_distance_position + 1], max_distance) +
simplify_polyline(points[tmp_max_distance_position:], max_distance)[1:])
class Location:
""" Generic geographical location """
latitude = None
longitude = None
elevation = None
def __init__(self, latitude, longitude, elevation=None):
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
def has_elevation(self):
return self.elevation or self.elevation == 0
def remove_elevation(self):
self.elevation = None
def distance_2d(self, location):
if not location:
return None
return distance(self.latitude, self.longitude, None, location.latitude, location.longitude, None)
def distance_3d(self, location):
if not location:
return None
return distance(self.latitude, self.longitude, self.elevation, location.latitude, location.longitude, location.elevation)
def elevation_angle(self, location, radians=False):
return elevation_angle(self, location, radians)
def move(self, location_delta):
self.latitude, self.longitude = location_delta.move(self)
def __add__(self, location_delta):
latitude, longitude = location_delta.move(self)
return Location(latitude, longitude)
def __str__(self):
return '[loc:%s,%s@%s]' % (self.latitude, self.longitude, self.elevation)
def __repr__(self):
if self.elevation is None:
return 'Location(%s, %s)' % (self.latitude, self.longitude)
else:
return 'Location(%s, %s, %s)' % (self.latitude, self.longitude, self.elevation)
def __hash__(self):
return mod_utils.hash_object(self, ('latitude', 'longitude', 'elevation'))
class LocationDelta:
"""
Intended to use similar to timestamp.timedelta, but for Locations.
"""
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
def __init__(self, distance=None, angle=None, latitude_diff=None, longitude_diff=None):
"""
Version 1:
Distance (in meters).
angle_from_north *clockwise*.
...must be given
Version 2:
latitude_diff and longitude_diff
...must be given
"""
if (distance is not None) and (angle is not None):
if (latitude_diff is not None) or (longitude_diff is not None):
raise Exception('No lat/lon diff if using distance and angle!')
self.distance = distance
self.angle_from_north = angle
self.move_function = self.move_by_angle_and_distance
elif (latitude_diff is not None) and (longitude_diff is not None):
if (distance is not None) or (angle is not None):
raise Exception('No distance/angle if using lat/lon diff!')
self.latitude_diff = latitude_diff
self.longitude_diff = longitude_diff
self.move_function = self.move_by_lat_lon_diff
def move(self, location):
"""
Move location by this timedelta.
"""
return self.move_function(location)
def move_by_angle_and_distance(self, location):
coef = mod_math.cos(location.latitude / 180. * mod_math.pi)
vertical_distance_diff = mod_math.sin((90 - self.angle_from_north) / 180. * mod_math.pi) / ONE_DEGREE
horizontal_distance_diff = mod_math.cos((90 - self.angle_from_north) / 180. * mod_math.pi) / ONE_DEGREE
lat_diff = self.distance * vertical_distance_diff
lon_diff = self.distance * horizontal_distance_diff / coef
return location.latitude + lat_diff, location.longitude + lon_diff
def move_by_lat_lon_diff(self, location):
return location.latitude + self.latitude_diff, location.longitude + self.longitude_diff
ONE_DEGREE fix
# -*- coding: utf-8 -*-
# Copyright 2011 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as mod_logging
import math as mod_math
from . import utils as mod_utils
log = mod_logging.getLogger(__name__)
# Generic geo related function and class(es)
# latitude/longitude in GPX files is always in WGS84 datum
# WGS84 defined the Earth semi-major axis with 6378.137 km
EARTH_RADIUS = 6378.137 * 1000
# One degree in meters:
ONE_DEGREE = (2*mod_math.pi*EARTH_RADIUS) / 360 # ==> 111.319 km
def to_rad(x):
return x / 180. * mod_math.pi
def haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2):
"""
Haversine distance between two points, expressed in meters.
Implemented from http://www.movable-type.co.uk/scripts/latlong.html
"""
d_lat = to_rad(latitude_1 - latitude_2)
d_lon = to_rad(longitude_1 - longitude_2)
lat1 = to_rad(latitude_1)
lat2 = to_rad(latitude_2)
a = mod_math.sin(d_lat/2) * mod_math.sin(d_lat/2) + \
mod_math.sin(d_lon/2) * mod_math.sin(d_lon/2) * mod_math.cos(lat1) * mod_math.cos(lat2)
c = 2 * mod_math.atan2(mod_math.sqrt(a), mod_math.sqrt(1-a))
d = EARTH_RADIUS * c
return d
def length(locations=None, _3d=None):
locations = locations or []
if not locations:
return 0
length = 0
for i in range(len(locations)):
if i > 0:
previous_location = locations[i - 1]
location = locations[i]
if _3d:
d = location.distance_3d(previous_location)
else:
d = location.distance_2d(previous_location)
if d != 0 and not d:
pass
else:
length += d
return length
def length_2d(locations=None):
""" 2-dimensional length (meters) of locations (only latitude and longitude, no elevation). """
locations = locations or []
return length(locations, False)
def length_3d(locations=None):
""" 3-dimensional length (meters) of locations (it uses latitude, longitude, and elevation). """
locations = locations or []
return length(locations, True)
def calculate_max_speed(speeds_and_distances):
"""
Compute average distance and standard deviation for distance. Extremes
in distances are usually extremes in speeds, so we will ignore them,
here.
speeds_and_distances must be a list containing pairs of (speed, distance)
for every point in a track segment.
"""
assert speeds_and_distances
if len(speeds_and_distances) > 0:
assert len(speeds_and_distances[0]) == 2
# ...
assert len(speeds_and_distances[-1]) == 2
size = float(len(speeds_and_distances))
if size < 20:
log.debug('Segment too small to compute speed, size=%s', size)
return None
distances = list(map(lambda x: x[1], speeds_and_distances))
average_distance = sum(distances) / float(size)
standard_distance_deviation = mod_math.sqrt(sum(map(lambda distance: (distance-average_distance)**2, distances))/size)
# Ignore items where the distance is too big:
filtered_speeds_and_distances = filter(lambda speed_and_distance: abs(speed_and_distance[1] - average_distance) <= standard_distance_deviation * 1.5, speeds_and_distances)
# sort by speed:
speeds = list(map(lambda speed_and_distance: speed_and_distance[0], filtered_speeds_and_distances))
if not isinstance(speeds, list): # python3
speeds = list(speeds)
if not speeds:
return None
speeds.sort()
# Even here there may be some extremes => ignore the last 5%:
index = int(len(speeds) * 0.95)
if index >= len(speeds):
index = -1
return speeds[index]
def calculate_uphill_downhill(elevations):
if not elevations:
return 0, 0
size = len(elevations)
def __filter(n):
current_ele = elevations[n]
if current_ele is None:
return False
if 0 < n < size - 1:
previous_ele = elevations[n-1]
next_ele = elevations[n+1]
if previous_ele is not None and current_ele is not None and next_ele is not None:
return previous_ele*.3 + current_ele*.4 + next_ele*.3
return current_ele
smoothed_elevations = list(map(__filter, range(size)))
uphill, downhill = 0., 0.
for n, elevation in enumerate(smoothed_elevations):
if n > 0 and elevation is not None and smoothed_elevations is not None:
d = elevation - smoothed_elevations[n-1]
if d > 0:
uphill += d
else:
downhill -= d
return uphill, downhill
def distance(latitude_1, longitude_1, elevation_1, latitude_2, longitude_2, elevation_2,
haversine=None):
"""
Distance between two points. If elevation is None compute a 2d distance
if haversine==True -- haversine will be used for every computations,
otherwise...
Haversine distance will be used for distant points where elevation makes a
small difference, so it is ignored. That's because haversine is 5-6 times
slower than the dummy distance algorithm (which is OK for most GPS tracks).
"""
# If points too distant -- compute haversine distance:
if haversine or (abs(latitude_1 - latitude_2) > .2 or abs(longitude_1 - longitude_2) > .2):
return haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2)
coef = mod_math.cos(latitude_1 / 180. * mod_math.pi)
x = latitude_1 - latitude_2
y = (longitude_1 - longitude_2) * coef
distance_2d = mod_math.sqrt(x * x + y * y) * ONE_DEGREE
if elevation_1 is None or elevation_2 is None or elevation_1 == elevation_2:
return distance_2d
return mod_math.sqrt(distance_2d ** 2 + (elevation_1 - elevation_2) ** 2)
def elevation_angle(location1, location2, radians=False):
""" Uphill/downhill angle between two locations. """
if location1.elevation is None or location2.elevation is None:
return None
b = float(location2.elevation - location1.elevation)
a = location2.distance_2d(location1)
if a == 0:
return 0
angle = mod_math.atan(b / a)
if radians:
return angle
return 180 * angle / mod_math.pi
def distance_from_line(point, line_point_1, line_point_2):
""" Distance of point from a line given with two points. """
assert point, point
assert line_point_1, line_point_1
assert line_point_2, line_point_2
a = line_point_1.distance_2d(line_point_2)
if a == 0:
return line_point_1.distance_2d(point)
b = line_point_1.distance_2d(point)
c = line_point_2.distance_2d(point)
s = (a + b + c) / 2.
return 2. * mod_math.sqrt(abs(s * (s - a) * (s - b) * (s - c))) / a
def get_line_equation_coefficients(location1, location2):
"""
Get line equation coefficients for:
latitude * a + longitude * b + c = 0
This is a normal cartesian line (not spherical!)
"""
if location1.longitude == location2.longitude:
# Vertical line:
return float(0), float(1), float(-location1.longitude)
else:
a = float(location1.latitude - location2.latitude) / (location1.longitude - location2.longitude)
b = location1.latitude - location1.longitude * a
return float(1), float(-a), float(-b)
def simplify_polyline(points, max_distance):
"""Does Ramer-Douglas-Peucker algorithm for simplification of polyline """
if len(points) < 3:
return points
begin, end = points[0], points[-1]
# Use a "normal" line just to detect the most distant point (not its real distance)
# this is because this is faster to compute than calling distance_from_line() for
# every point.
#
# This is an approximation and may have some errors near the poles and if
# the points are too distant, but it should be good enough for most use
# cases...
a, b, c = get_line_equation_coefficients(begin, end)
# Initialize to safe values
tmp_max_distance = 0
tmp_max_distance_position = 1
# Check distance of all points between begin and end, exclusive
for point_no in range(1,len(points)-1):
point = points[point_no]
d = abs(a * point.latitude + b * point.longitude + c)
if d > tmp_max_distance:
tmp_max_distance = d
tmp_max_distance_position = point_no
# Now that we have the most distance point, compute its real distance:
real_max_distance = distance_from_line(points[tmp_max_distance_position], begin, end)
# If furthest point is less than max_distance, remove all points between begin and end
if real_max_distance < max_distance:
return [begin, end]
# If furthest point is more than max_distance, use it as anchor and run
# function again using (begin to anchor) and (anchor to end), remove extra anchor
return (simplify_polyline(points[:tmp_max_distance_position + 1], max_distance) +
simplify_polyline(points[tmp_max_distance_position:], max_distance)[1:])
class Location:
""" Generic geographical location """
latitude = None
longitude = None
elevation = None
def __init__(self, latitude, longitude, elevation=None):
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
def has_elevation(self):
return self.elevation or self.elevation == 0
def remove_elevation(self):
self.elevation = None
def distance_2d(self, location):
if not location:
return None
return distance(self.latitude, self.longitude, None, location.latitude, location.longitude, None)
def distance_3d(self, location):
if not location:
return None
return distance(self.latitude, self.longitude, self.elevation, location.latitude, location.longitude, location.elevation)
def elevation_angle(self, location, radians=False):
return elevation_angle(self, location, radians)
def move(self, location_delta):
self.latitude, self.longitude = location_delta.move(self)
def __add__(self, location_delta):
latitude, longitude = location_delta.move(self)
return Location(latitude, longitude)
def __str__(self):
return '[loc:%s,%s@%s]' % (self.latitude, self.longitude, self.elevation)
def __repr__(self):
if self.elevation is None:
return 'Location(%s, %s)' % (self.latitude, self.longitude)
else:
return 'Location(%s, %s, %s)' % (self.latitude, self.longitude, self.elevation)
def __hash__(self):
return mod_utils.hash_object(self, ('latitude', 'longitude', 'elevation'))
class LocationDelta:
"""
Intended to use similar to timestamp.timedelta, but for Locations.
"""
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
def __init__(self, distance=None, angle=None, latitude_diff=None, longitude_diff=None):
"""
Version 1:
Distance (in meters).
angle_from_north *clockwise*.
...must be given
Version 2:
latitude_diff and longitude_diff
...must be given
"""
if (distance is not None) and (angle is not None):
if (latitude_diff is not None) or (longitude_diff is not None):
raise Exception('No lat/lon diff if using distance and angle!')
self.distance = distance
self.angle_from_north = angle
self.move_function = self.move_by_angle_and_distance
elif (latitude_diff is not None) and (longitude_diff is not None):
if (distance is not None) or (angle is not None):
raise Exception('No distance/angle if using lat/lon diff!')
self.latitude_diff = latitude_diff
self.longitude_diff = longitude_diff
self.move_function = self.move_by_lat_lon_diff
def move(self, location):
"""
Move location by this timedelta.
"""
return self.move_function(location)
def move_by_angle_and_distance(self, location):
coef = mod_math.cos(location.latitude / 180. * mod_math.pi)
vertical_distance_diff = mod_math.sin((90 - self.angle_from_north) / 180. * mod_math.pi) / ONE_DEGREE
horizontal_distance_diff = mod_math.cos((90 - self.angle_from_north) / 180. * mod_math.pi) / ONE_DEGREE
lat_diff = self.distance * vertical_distance_diff
lon_diff = self.distance * horizontal_distance_diff / coef
return location.latitude + lat_diff, location.longitude + lon_diff
def move_by_lat_lon_diff(self, location):
return location.latitude + self.latitude_diff, location.longitude + self.longitude_diff
|
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound
from eve_proxy.models import CachedDocument
def retrieve_xml(request):
"""
A view that forwards EVE API requests through the cache system, either
retrieving a cached document or querying and caching as needed.
"""
# This is the URL path (minus the parameters).
url_path = request.META['PATH_INFO'].replace(reverse('eve_proxy.views.retrieve_xml'),"/")
# The parameters attached to the end of the URL path.
if request.method == 'POST':
p = request.POST
else:
p = request.GET
# Convert the QuerySet object into a dict
params = {}
for key,value in p.items():
params[key] = value
if url_path == '/' or url_path == '':
# If they don't provide any kind of query, shoot a quick error message.
return HttpResponse('No API query specified.')
if not 'service' in params:
return HttpResponse('No Service ID provided.')
# The query system will retrieve a cached_doc that was either previously
# or newly cached depending on cache intervals.
cached_doc = CachedDocument.objects.api_query(url_path, params)
# Return the document's body as XML.
if cached_doc:
return HttpResponse(cached_doc.body, mimetype='text/xml')
return HttpResponseNotFound('Error retrieving the document')
Only require a service ID when asking for user specific info
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound
from eve_proxy.models import CachedDocument
def retrieve_xml(request):
"""
A view that forwards EVE API requests through the cache system, either
retrieving a cached document or querying and caching as needed.
"""
# This is the URL path (minus the parameters).
url_path = request.META['PATH_INFO'].replace(reverse('eve_proxy.views.retrieve_xml'),"/")
# The parameters attached to the end of the URL path.
if request.method == 'POST':
p = request.POST
else:
p = request.GET
# Convert the QuerySet object into a dict
params = {}
for key,value in p.items():
params[key] = value
if url_path == '/' or url_path == '':
# If they don't provide any kind of query, shoot a quick error message.
return HttpResponse('No API query specified.')
if 'userID' in params and not 'service' in params:
return HttpResponse('No Service ID provided.')
# The query system will retrieve a cached_doc that was either previously
# or newly cached depending on cache intervals.
cached_doc = CachedDocument.objects.api_query(url_path, params)
# Return the document's body as XML.
if cached_doc:
return HttpResponse(cached_doc.body, mimetype='text/xml')
return HttpResponseNotFound('Error retrieving the document')
|
#!/usr/bin/env python
"""Event Man(ager)
Your friendly manager of attendees at an event.
Copyright 2015 Davide Alberani <da@erlug.linux.it>
RaspiBO <info@raspibo.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
from tornado import gen, escape
import utils
import backend
class BaseHandler(tornado.web.RequestHandler):
"""Base class for request handlers."""
_bool_convert = {
'0': False,
'n': False,
'f': False,
'no': False,
'off': False,
'false': False
}
def tobool(self, obj):
if isinstance(obj, (list, tuple)):
obj = obj[0]
if isinstance(obj, (str, unicode)):
obj = obj.lower()
return bool(self._bool_convert.get(obj, obj))
def initialize(self, **kwargs):
"""Add every passed (key, value) as attributes of the instance."""
for key, value in kwargs.iteritems():
setattr(self, key, value)
class RootHandler(BaseHandler):
"""Handler for the / path."""
angular_app_path = os.path.join(os.path.dirname(__file__), "angular_app")
@gen.coroutine
def get(self, *args, **kwargs):
# serve the ./angular_app/index.html file
with open(self.angular_app_path + "/index.html", 'r') as fd:
self.write(fd.read())
class CollectionHandler(BaseHandler):
"""Base class for handlers that need to interact with the database backend.
Introduce basic CRUD operations."""
# set of documents we're managing (a collection in MongoDB or a table in a SQL database)
collection = None
@gen.coroutine
def get(self, id_=None, resource=None, resource_id=None, **kwargs):
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_get_%s' % resource, None)
if method and callable(method):
self.write(method(id_, resource_id, **kwargs))
return
if id_ is not None:
# read a single document
self.write(self.db.get(self.collection, id_))
else:
# return an object containing the list of all objects in the collection;
# e.g.: {'events': [{'_id': 'obj1-id, ...}, {'_id': 'obj2-id, ...}, ...]}
# Please, never return JSON lists that are not encapsulated into an object,
# to avoid XSS vulnerabilities.
self.write({self.collection: self.db.query(self.collection)})
@gen.coroutine
def post(self, id_=None, resource=None, resource_id=None, **kwargs):
data = escape.json_decode(self.request.body or '{}')
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_%s_%s' % (self.request.method.lower(), resource), None)
if method and callable(method):
self.write(method(id_, resource_id, data, **kwargs))
return
if id_ is None:
newData = self.db.add(self.collection, data)
else:
merged, newData = self.db.update(self.collection, id_, data)
self.write(newData)
# PUT (update an existing document) is handled by the POST (create a new document) method
put = post
@gen.coroutine
def delete(self, id_=None, resource=None, resource_id=None, **kwargs):
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_delete_%s' % resource, None)
if method and callable(method):
self.write(method(id_, resource_id, **kwargs))
return
if id_:
self.db.delete(self.collection, id_)
self.write({'success': True})
class PersonsHandler(CollectionHandler):
"""Handle requests for Persons."""
collection = 'persons'
object_id = 'person_id'
def handle_get_events(self, id_, resource_id=None, **kwargs):
# Get a list of events attended by this person.
# Inside the data of each event, a 'person_data' dictionary is
# created, duplicating the entry for the current person (so that
# there's no need to parse the 'persons' list on the client).
#
# If resource_id is given, only the specified event is considered.
#
# If the 'all' parameter is given, every event (also unattended ones) is returned.
args = self.request.arguments
query = {}
if id_ and not self.tobool(args.get('all')):
query = {'persons.person_id': id_}
if resource_id:
query['_id'] = resource_id
events = self.db.query('events', query)
for event in events:
person_data = {}
for persons in event.get('persons') or []:
if str(persons.get('person_id')) == id_:
person_data = persons
break
event['person_data'] = person_data
if resource_id and events:
return events[0]
return {'events': events}
class EventsHandler(CollectionHandler):
"""Handle requests for Events."""
collection = 'events'
object_id = 'event_id'
def handle_get_persons(self, id_, resource_id=None):
# Return every person registered at this event, or the information
# about a specific person.
query = {'_id': id_}
event = self.db.query('events', query)[0]
if resource_id:
for person in event.get('persons', []):
if str(person.get('person_id')) == resource_id:
return {'person': person}
return {'persons': event.get('persons') or []}
def handle_post_persons(self, id_, person_id, data):
# Add a person to the list of persons registered at this event.
doc = self.db.query('events',
{'_id': id_, 'persons.person_id': person_id})
if '_id' in data:
del data['_id']
if not doc:
merged, doc = self.db.update('events',
{'_id': id_},
{'persons': data},
operation='append',
create=False)
return {'event': doc}
def handle_put_persons(self, id_, person_id, data):
# Update an existing entry for a person registered at this event.
merged, doc = self.db.update('events',
{'_id': id_, 'persons.person_id': person_id},
data, create=False)
return {'event': doc}
def handle_delete_persons(self, id_, person_id):
# Remove a specific person from the list of persons registered at this event.
merged, doc = self.db.update('events',
{'_id': id_},
{'persons': {'person_id': person_id}},
operation='delete',
create=False)
return {'event': doc}
class EbCSVImportPersonsHandler(BaseHandler):
"""Importer for CSV files exported from eventbrite."""
csvRemap = {
'Nome evento': 'event_title',
'ID evento': 'event_id',
'N. codice a barre': 'ebqrcode',
'Cognome acquirente': 'surname',
'Nome acquirente': 'name',
'E-mail acquirente': 'email',
'Cognome': 'surname',
'Nome': 'name',
'E-mail': 'email',
'Indirizzo e-mail': 'email',
'Tipologia biglietto': 'ticket_kind',
'Data partecipazione': 'attending_datetime',
'Data check-in': 'checkin_datetime',
'Ordine n.': 'order_nr',
'ID ordine': 'order_nr',
'Prefisso (Sig., Sig.ra, ecc.)': 'name_title',
}
keepPersonData = ('name', 'surname', 'email', 'name_title')
@gen.coroutine
def post(self, **kwargs):
targetEvent = None
try:
targetEvent = self.get_body_argument('targetEvent')
except:
pass
reply = dict(total=0, valid=0, merged=0, new_in_event=0)
for fieldname, contents in self.request.files.iteritems():
for content in contents:
filename = content['filename']
parseStats, persons = utils.csvParse(content['body'], remap=self.csvRemap)
reply['total'] += parseStats['total']
reply['valid'] += parseStats['valid']
for person in persons:
person_data = dict([(k, person[k]) for k in self.keepPersonData
if k in person])
merged, person = self.db.update('persons',
[('email',), ('name', 'surname')],
person_data)
if merged:
reply['merged'] += 1
if targetEvent and person:
event_id = targetEvent
person_id = person['_id']
registered_data = {
'person_id': person_id,
'attended': False,
'from_file': filename}
person.update(registered_data)
if not self.db.query('events',
{'_id': event_id, 'persons.person_id': person_id}):
self.db.update('events', {'_id': event_id},
{'persons': person},
operation='appendUnique')
reply['new_in_event'] += 1
self.write(reply)
def run():
"""Run the Tornado web application."""
# command line arguments; can also be written in a configuration file,
# specified with the --config argument.
define("port", default=5242, help="run on the given port", type=int)
define("data", default=os.path.join(os.path.dirname(__file__), "data"),
help="specify the directory used to store the data")
define("mongodbURL", default=None,
help="URL to MongoDB server", type=str)
define("dbName", default='eventman',
help="Name of the MongoDB database to use", type=str)
define("debug", default=False, help="run in debug mode")
define("config", help="read configuration file",
callback=lambda path: tornado.options.parse_config_file(path, final=False))
tornado.options.parse_command_line()
# database backend connector
db_connector = backend.EventManDB(url=options.mongodbURL, dbName=options.dbName)
init_params = dict(db=db_connector)
application = tornado.web.Application([
(r"/persons/?(?P<id_>\w+)?/?(?P<resource>\w+)?/?(?P<resource_id>\w+)?", PersonsHandler, init_params),
(r"/events/?(?P<id_>\w+)?/?(?P<resource>\w+)?/?(?P<resource_id>\w+)?", EventsHandler, init_params),
(r"/(?:index.html)?", RootHandler, init_params),
(r"/ebcsvpersons", EbCSVImportPersonsHandler, init_params),
(r'/(.*)', tornado.web.StaticFileHandler, {"path": "angular_app"})
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=options.debug)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
run()
return empty person data if needed
#!/usr/bin/env python
"""Event Man(ager)
Your friendly manager of attendees at an event.
Copyright 2015 Davide Alberani <da@erlug.linux.it>
RaspiBO <info@raspibo.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
from tornado import gen, escape
import utils
import backend
class BaseHandler(tornado.web.RequestHandler):
"""Base class for request handlers."""
_bool_convert = {
'0': False,
'n': False,
'f': False,
'no': False,
'off': False,
'false': False
}
def tobool(self, obj):
if isinstance(obj, (list, tuple)):
obj = obj[0]
if isinstance(obj, (str, unicode)):
obj = obj.lower()
return bool(self._bool_convert.get(obj, obj))
def initialize(self, **kwargs):
"""Add every passed (key, value) as attributes of the instance."""
for key, value in kwargs.iteritems():
setattr(self, key, value)
class RootHandler(BaseHandler):
"""Handler for the / path."""
angular_app_path = os.path.join(os.path.dirname(__file__), "angular_app")
@gen.coroutine
def get(self, *args, **kwargs):
# serve the ./angular_app/index.html file
with open(self.angular_app_path + "/index.html", 'r') as fd:
self.write(fd.read())
class CollectionHandler(BaseHandler):
"""Base class for handlers that need to interact with the database backend.
Introduce basic CRUD operations."""
# set of documents we're managing (a collection in MongoDB or a table in a SQL database)
collection = None
@gen.coroutine
def get(self, id_=None, resource=None, resource_id=None, **kwargs):
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_get_%s' % resource, None)
if method and callable(method):
self.write(method(id_, resource_id, **kwargs))
return
if id_ is not None:
# read a single document
self.write(self.db.get(self.collection, id_))
else:
# return an object containing the list of all objects in the collection;
# e.g.: {'events': [{'_id': 'obj1-id, ...}, {'_id': 'obj2-id, ...}, ...]}
# Please, never return JSON lists that are not encapsulated into an object,
# to avoid XSS vulnerabilities.
self.write({self.collection: self.db.query(self.collection)})
@gen.coroutine
def post(self, id_=None, resource=None, resource_id=None, **kwargs):
data = escape.json_decode(self.request.body or '{}')
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_%s_%s' % (self.request.method.lower(), resource), None)
if method and callable(method):
self.write(method(id_, resource_id, data, **kwargs))
return
if id_ is None:
newData = self.db.add(self.collection, data)
else:
merged, newData = self.db.update(self.collection, id_, data)
self.write(newData)
# PUT (update an existing document) is handled by the POST (create a new document) method
put = post
@gen.coroutine
def delete(self, id_=None, resource=None, resource_id=None, **kwargs):
if resource:
# Handle access to sub-resources.
method = getattr(self, 'handle_delete_%s' % resource, None)
if method and callable(method):
self.write(method(id_, resource_id, **kwargs))
return
if id_:
self.db.delete(self.collection, id_)
self.write({'success': True})
class PersonsHandler(CollectionHandler):
"""Handle requests for Persons."""
collection = 'persons'
object_id = 'person_id'
def handle_get_events(self, id_, resource_id=None, **kwargs):
# Get a list of events attended by this person.
# Inside the data of each event, a 'person_data' dictionary is
# created, duplicating the entry for the current person (so that
# there's no need to parse the 'persons' list on the client).
#
# If resource_id is given, only the specified event is considered.
#
# If the 'all' parameter is given, every event (also unattended ones) is returned.
args = self.request.arguments
query = {}
if id_ and not self.tobool(args.get('all')):
query = {'persons.person_id': id_}
if resource_id:
query['_id'] = resource_id
events = self.db.query('events', query)
for event in events:
person_data = {}
for persons in event.get('persons') or []:
if str(persons.get('person_id')) == id_:
person_data = persons
break
event['person_data'] = person_data
if resource_id and events:
return events[0]
return {'events': events}
class EventsHandler(CollectionHandler):
"""Handle requests for Events."""
collection = 'events'
object_id = 'event_id'
def handle_get_persons(self, id_, resource_id=None):
# Return every person registered at this event, or the information
# about a specific person.
query = {'_id': id_}
event = self.db.query('events', query)[0]
if resource_id:
for person in event.get('persons', []):
if str(person.get('person_id')) == resource_id:
return {'person': person}
if resource_id:
return {'person': {}}
return {'persons': event.get('persons') or []}
def handle_post_persons(self, id_, person_id, data):
# Add a person to the list of persons registered at this event.
doc = self.db.query('events',
{'_id': id_, 'persons.person_id': person_id})
if '_id' in data:
del data['_id']
if not doc:
merged, doc = self.db.update('events',
{'_id': id_},
{'persons': data},
operation='append',
create=False)
return {'event': doc}
def handle_put_persons(self, id_, person_id, data):
# Update an existing entry for a person registered at this event.
merged, doc = self.db.update('events',
{'_id': id_, 'persons.person_id': person_id},
data, create=False)
return {'event': doc}
def handle_delete_persons(self, id_, person_id):
# Remove a specific person from the list of persons registered at this event.
merged, doc = self.db.update('events',
{'_id': id_},
{'persons': {'person_id': person_id}},
operation='delete',
create=False)
return {'event': doc}
class EbCSVImportPersonsHandler(BaseHandler):
"""Importer for CSV files exported from eventbrite."""
csvRemap = {
'Nome evento': 'event_title',
'ID evento': 'event_id',
'N. codice a barre': 'ebqrcode',
'Cognome acquirente': 'surname',
'Nome acquirente': 'name',
'E-mail acquirente': 'email',
'Cognome': 'surname',
'Nome': 'name',
'E-mail': 'email',
'Indirizzo e-mail': 'email',
'Tipologia biglietto': 'ticket_kind',
'Data partecipazione': 'attending_datetime',
'Data check-in': 'checkin_datetime',
'Ordine n.': 'order_nr',
'ID ordine': 'order_nr',
'Prefisso (Sig., Sig.ra, ecc.)': 'name_title',
}
keepPersonData = ('name', 'surname', 'email', 'name_title')
@gen.coroutine
def post(self, **kwargs):
targetEvent = None
try:
targetEvent = self.get_body_argument('targetEvent')
except:
pass
reply = dict(total=0, valid=0, merged=0, new_in_event=0)
for fieldname, contents in self.request.files.iteritems():
for content in contents:
filename = content['filename']
parseStats, persons = utils.csvParse(content['body'], remap=self.csvRemap)
reply['total'] += parseStats['total']
reply['valid'] += parseStats['valid']
for person in persons:
person_data = dict([(k, person[k]) for k in self.keepPersonData
if k in person])
merged, person = self.db.update('persons',
[('email',), ('name', 'surname')],
person_data)
if merged:
reply['merged'] += 1
if targetEvent and person:
event_id = targetEvent
person_id = person['_id']
registered_data = {
'person_id': person_id,
'attended': False,
'from_file': filename}
person.update(registered_data)
if not self.db.query('events',
{'_id': event_id, 'persons.person_id': person_id}):
self.db.update('events', {'_id': event_id},
{'persons': person},
operation='appendUnique')
reply['new_in_event'] += 1
self.write(reply)
def run():
"""Run the Tornado web application."""
# command line arguments; can also be written in a configuration file,
# specified with the --config argument.
define("port", default=5242, help="run on the given port", type=int)
define("data", default=os.path.join(os.path.dirname(__file__), "data"),
help="specify the directory used to store the data")
define("mongodbURL", default=None,
help="URL to MongoDB server", type=str)
define("dbName", default='eventman',
help="Name of the MongoDB database to use", type=str)
define("debug", default=False, help="run in debug mode")
define("config", help="read configuration file",
callback=lambda path: tornado.options.parse_config_file(path, final=False))
tornado.options.parse_command_line()
# database backend connector
db_connector = backend.EventManDB(url=options.mongodbURL, dbName=options.dbName)
init_params = dict(db=db_connector)
application = tornado.web.Application([
(r"/persons/?(?P<id_>\w+)?/?(?P<resource>\w+)?/?(?P<resource_id>\w+)?", PersonsHandler, init_params),
(r"/events/?(?P<id_>\w+)?/?(?P<resource>\w+)?/?(?P<resource_id>\w+)?", EventsHandler, init_params),
(r"/(?:index.html)?", RootHandler, init_params),
(r"/ebcsvpersons", EbCSVImportPersonsHandler, init_params),
(r'/(.*)', tornado.web.StaticFileHandler, {"path": "angular_app"})
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=options.debug)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
run()
|
#Note: Keep name-type-value-numchild-extra order
#return
import sys
import traceback
import gdb
#import base64
import types
import curses.ascii
verbosity = 0
verbosity = 1
def select(condition, if_expr, else_expr):
if condition:
return if_expr
return else_expr
def qmin(n, m):
if n < m:
return n
return m
def isSimpleType(typeobj):
type = str(typeobj)
return type == "bool" \
or type == "char" \
or type == "double" \
or type == "float" \
or type == "int" \
or type == "long" or type.startswith("long ") \
or type == "short" or type.startswith("short ") \
or type == "signed" or type.startswith("signed ") \
or type == "unsigned" or type.startswith("unsigned ")
def isStringType(d, typeobj):
type = str(typeobj)
return type == d.ns + "QString" \
or type == d.ns + "QByteArray" \
or type == "std::string" \
or type == "std::wstring" \
or type == "wstring"
def warn(message):
if verbosity > 0:
print "XXX: %s " % message.encode("latin1")
pass
def check(exp):
if not exp:
raise RuntimeError("Check failed")
#def couldBePointer(p, align):
# type = gdb.lookup_type("unsigned int")
# ptr = gdb.Value(p).cast(type)
# d = int(str(ptr))
# warn("CHECKING : %s %d " % (p, ((d & 3) == 0 and (d > 1000 or d == 0))))
# return (d & (align - 1)) and (d > 1000 or d == 0)
def checkAccess(p, align = 1):
return p.dereference()
def checkContents(p, expected, align = 1):
if int(p.dereference()) != expected:
raise RuntimeError("Contents check failed")
def checkPointer(p, align = 1):
if not isNull(p):
p.dereference()
def isNull(p):
s = str(p)
return s == "0x0" or s.startswith("0x0 ")
movableTypes = set([
"QBrush", "QBitArray", "QByteArray",
"QCustomTypeInfo", "QChar",
"QDate", "QDateTime",
"QFileInfo", "QFixed", "QFixedPoint", "QFixedSize",
"QHashDummyValue",
"QIcon", "QImage",
"QLine", "QLineF", "QLatin1Char", "QLocal",
"QMatrix", "QModelIndex",
"QPoint", "QPointF", "QPen", "QPersistentModelIndex",
"QResourceRoot", "QRect", "QRectF", "QRegExp",
"QSize", "QSizeF", "QString",
"QTime", "QTextBlock",
"QUrl",
"QVariant",
"QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration"])
def stripClassTag(type):
if type.startswith("class "):
return type[6:]
return type
def checkPointerRange(p, n):
for i in xrange(0, n):
checkPointer(p)
++p
def call(value, func):
#warn("CALL: %s -> %s" % (value, func))
type = stripClassTag(str(value.type))
if type.find(':') >= 0:
type = "'" + type + "'"
exp = "((%s*)%s)->%s" % (type, value.address, func)
#warn("CALL: %s" % exp)
result = gdb.parse_and_eval(exp)
#warn(" -> %s" % result)
return result
def qtNamespace():
try:
type = str(gdb.parse_and_eval("&QString::null").type.target().unqualified())
return type[0:len(type) - len("QString::null")]
except RuntimeError:
return ""
def encodeByteArray(value):
d_ptr = value['d'].dereference()
data = d_ptr['data']
size = d_ptr['size']
alloc = d_ptr['alloc']
check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
check(d_ptr["ref"]["_q_value"] > 0)
if size > 0:
checkAccess(data, 4)
checkAccess(data + size) == 0
innerType = gdb.lookup_type("char")
p = gdb.Value(data.cast(innerType.pointer()))
s = ""
for i in xrange(0, size):
s += "%02x" % int(p.dereference())
p += 1
return s
def encodeString(value):
d_ptr = value['d'].dereference()
data = d_ptr['data']
size = d_ptr['size']
alloc = d_ptr['alloc']
check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
if size > 0:
checkAccess(data, 4)
checkAccess(data + size * 2) == 0
check(d_ptr["ref"]["_q_value"] > 0)
p = gdb.Value(d_ptr["data"])
s = ""
for i in xrange(0, size):
val = int(p.dereference())
s += "%02x" % (val % 256)
s += "%02x" % (val / 256)
p += 1
return s
#######################################################################
#
# Item
#
#######################################################################
class Item:
def __init__(self, value, parentiname, iname, name):
self.value = value
if iname is None:
self.iname = parentiname
else:
self.iname = "%s.%s" % (parentiname, iname)
self.name = name
#######################################################################
#
# FrameCommand
#
#######################################################################
class FrameCommand(gdb.Command):
"""Do fancy stuff. Usage bb --verbose expandedINames"""
def __init__(self):
super(FrameCommand, self).__init__("bb", gdb.COMMAND_OBSCURE)
def invoke(self, arg, from_tty):
args = arg.split(' ')
#warn("ARG: %s" % arg)
#warn("ARGS: %s" % args)
useFancy = int(args[0])
passExceptions = int(args[1])
expandedINames = set()
if len(args) > 2:
expandedINames = set(args[2].split(','))
watchers = set()
if len(args) > 3:
watchers = set(args[3].split(','))
#warn("EXPANDED INAMES: %s" % expandedINames)
#warn("WATCHERS: %s" % watchers)
module = sys.modules[__name__]
self.dumpers = {}
if useFancy == -1:
output = "dumpers=["
for key, value in module.__dict__.items():
if key.startswith("qqDump"):
if output != "dumpers=[":
output += ","
output += '"' + key[6:] + '"'
output += "],"
#output += "qtversion=[%d,%d,%d]"
output += "qtversion=[4,6,0],"
output += "namespace=\"%s\"," % qtNamespace()
output += "dumperversion=\"2.0\","
output += "sizes=[],"
output += "expressions=[]"
output += "]"
print output
return
if useFancy:
for key, value in module.__dict__.items():
#if callable(value):
if key.startswith("qqDump"):
self.dumpers[key[6:]] = value
try:
frame = gdb.selected_frame()
except RuntimeError:
return ""
d = Dumper()
d.dumpers = self.dumpers
d.passExceptions = passExceptions
d.ns = qtNamespace()
block = frame.block()
#warn(" NAMESPACE IS: '%s'" % d.ns)
#warn("FRAME %s: " % frame)
while True:
if block is None:
warn("UNEXPECTED 'None' BLOCK")
break
for symbol in block:
name = symbol.print_name
if name == "__in_chrg":
continue
# "NotImplementedError: Symbol type not yet supported in
# Python scripts."
#warn("SYMBOL %s: " % symbol.value)
#warn("SYMBOL %s (%s): " % (symbol, name))
item = Item(0, "local", name, name)
try:
item.value = frame.read_var(name) # this is a gdb value
except RuntimeError:
# happens for void foo() { std::string s; std::wstring w; }
#warn(" FRAME READ VAR ERROR: %s (%s): " % (symbol, name))
continue
#warn("ITEM %s: " % item.value)
d.expandedINames = expandedINames
d.useFancy = useFancy
d.beginHash()
d.putField("iname", item.iname)
d.put(",")
d.safePutItemHelper(item)
d.endHash()
# The outermost block in a function has the function member
# FIXME: check whether this is guaranteed.
if not block.function is None:
break
block = block.superblock
#warn("BLOCK %s: " % block)
d.pushOutput()
print('locals={iname="local",name="Locals",value=" ",type=" ",'
+ 'children=[%s]}' % d.safeoutput)
FrameCommand()
#######################################################################
#
# The Dumper Class
#
#######################################################################
class Dumper:
def __init__(self):
self.output = ""
self.safeoutput = ""
self.childTypes = [""]
self.childNumChilds = [-1]
def put(self, value):
self.output += value
def putCommaIfNeeded(self):
c = self.output[-1:]
if c == '}' or c == '"' or c == ']' or c == '\n':
self.put(',')
#warn("C:'%s' COND:'%d' OUT:'%s'" %
# (c, c == '}' or c == '"' or c == ']' or c == '\n', self.output))
def putField(self, name, value):
self.putCommaIfNeeded()
self.put('%s="%s"' % (name, value))
def beginHash(self):
self.putCommaIfNeeded()
self.put('{')
def endHash(self):
self.put('}')
def beginItem(self, name):
self.putCommaIfNeeded()
self.put(name)
self.put('="')
def endItem(self):
self.put('"')
def beginChildren(self, numChild = 1, type = None, children = None):
childType = ""
childNumChild = -1
if numChild == 0:
type = None
if not type is None:
childType = stripClassTag(str(type))
self.putField("childtype", childType)
if isSimpleType(type) or isStringType(self, type):
self.putField("childnumchild", "0")
childNumChild = 0
elif type.code == gdb.TYPE_CODE_PTR:
self.putField("childnumchild", "1")
childNumChild = 1
if not children is None:
self.putField("childnumchild", children)
childNumChild = children
self.childTypes.append(childType)
self.childNumChilds.append(childNumChild)
#warn("BEGIN: %s" % self.childTypes)
self.putCommaIfNeeded()
self.put("children=[")
def endChildren(self):
#warn("END: %s" % self.childTypes)
self.childTypes.pop()
self.childNumChilds.pop()
self.put(']')
# convenience
def putItemCount(self, count):
self.putCommaIfNeeded()
self.put('value="<%s items>"' % count)
def putEllipsis(self):
self.putCommaIfNeeded()
self.put('{name="<incomplete>",value="",type="",numchild="0"}')
def putType(self, type):
#warn("TYPES: '%s' '%s'" % (type, self.childTypes))
#warn(" EQUAL 2: %s " % (str(type) == self.childTypes[-1]))
type = stripClassTag(str(type))
if len(type) > 0 and type != self.childTypes[-1]:
self.putField("type", type)
#self.putField("type", str(type.unqualified()))
def putNumChild(self, numchild):
#warn("NUM CHILD: '%s' '%s'" % (numchild, self.childNumChilds[-1]))
if int(numchild) != int(self.childNumChilds[-1]):
self.putField("numchild", numchild)
def putValue(self, value, encoding = None):
if not encoding is None:
self.putField("valueencoded", encoding)
self.putField("value", value)
def putStringValue(self, value):
str = encodeString(value)
self.putCommaIfNeeded()
self.put('valueencoded="%d",value="%s"' % (7, str))
def putByteArrayValue(self, value):
str = encodeByteArray(value)
self.putCommaIfNeeded()
self.put('valueencoded="%d",value="%s"' % (6, str))
def putName(self, name):
self.putCommaIfNeeded()
self.put('name="%s"' % name)
def isExpanded(self, item):
#warn("IS EXPANDED: %s in %s" % (item.iname, self.expandedINames))
if item.iname is None:
raise "Illegal iname 'None'"
if item.iname.startswith("None"):
raise "Illegal iname '%s'" % item.iname
#warn(" --> %s" % (item.iname in self.expandedINames))
return item.iname in self.expandedINames
def isExpandedIName(self, iname):
return iname in self.expandedINames
def unputField(self, name):
pos = self.output.rfind(",")
if self.output[pos + 1:].startswith(name):
self.output = self.output[0:pos]
def stripNamespaceFromType(self, typeobj):
# FIXME: pass ns from plugin
type = stripClassTag(str(typeobj))
if len(self.ns) > 0 and type.startswith(self.ns):
type = type[len(self.ns):]
pos = type.find("<")
if pos != -1:
type = type[0:pos]
return type
def isMovableType(self, type):
if type.code == gdb.TYPE_CODE_PTR:
return True
if isSimpleType(type):
return True
return self.stripNamespaceFromType(type) in movableTypes
def putIntItem(self, name, value):
self.beginHash()
self.putName(name)
self.putValue(value)
self.putType("int")
self.putNumChild(0)
self.endHash()
def putBoolItem(self, name, value):
self.beginHash()
self.putName(name)
self.putValue(value)
self.putType("bool")
self.putNumChild(0)
self.endHash()
def pushOutput(self):
#warn("PUSH OUTPUT: %s " % self.output)
self.safeoutput += self.output
self.output = ""
def dumpInnerValueHelper(self, item):
if isSimpleType(item.value.type):
self.safePutItemHelper(item)
def safePutItemHelper(self, item):
self.pushOutput()
# This is only used at the top level to ensure continuation
# after failures due to uninitialized or corrupted data.
if self.passExceptions:
# for debugging reasons propagate errors.
self.putItemHelper(item)
else:
try:
self.putItemHelper(item)
except RuntimeError:
self.output = ""
# FIXME: Only catch debugger related exceptions
#exType, exValue, exTraceback = sys.exc_info()
#tb = traceback.format_exception(exType, exValue, exTraceback)
#warn("Exception: %s" % ex.message)
# DeprecationWarning: BaseException.message
# has been deprecated
#warn("Exception.")
#for line in tb:
# warn("%s" % line)
self.putName(item.name)
self.putValue("<invalid>")
self.putType(str(item.value.type))
self.putNumChild(0)
#if self.isExpanded(item):
self.beginChildren()
self.endChildren()
self.pushOutput()
def putItem(self, item):
self.beginHash()
self.safePutItemHelper(item)
self.endHash()
def putItemOrPointer(self, item):
self.beginHash()
self.putItemOrPointerHelper(item)
self.endHash()
def putCallItem(self, name, item, func):
result = call(item.value, func)
self.putItem(Item(result, item.iname, name, name))
def putItemOrPointerHelper(self, item):
if item.value.type.code == gdb.TYPE_CODE_PTR \
and str(item.value.type.unqualified) != "char":
if not isNull(item.value):
self.putItemOrPointerHelper(
Item(item.value.dereference(), item.iname, None, None))
else:
self.putValue("(null)")
self.putNumChild(0)
else:
self.safePutItemHelper(item)
def putItemHelper(self, item):
name = getattr(item, "name", None)
if not name is None:
self.putName(name)
self.putType(item.value.type)
# FIXME: Gui shows references stripped?
#warn("REAL INAME: %s " % item.iname)
#warn("REAL TYPE: %s " % item.value.type)
#warn("REAL VALUE: %s " % item.value)
value = item.value
type = value.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
value = value.cast(type)
if type.code == gdb.TYPE_CODE_TYPEDEF:
type = type.target()
strippedType = self.stripNamespaceFromType(
type.strip_typedefs().unqualified()).replace("::", "__")
#warn(" STRIPPED: %s" % strippedType)
#warn(" DUMPERS: %s" % self.dumpers)
#warn(" DUMPERS: %s" % (strippedType in self.dumpers))
if isSimpleType(type):
self.putValue(value)
self.putNumChild(0)
elif strippedType in self.dumpers:
self.dumpers[strippedType](self, item)
elif type.code == gdb.TYPE_CODE_ENUM:
#warn("GENERIC ENUM: %s" % value)
self.putValue(value)
self.putNumChild(0)
elif type.code == gdb.TYPE_CODE_PTR:
isHandled = False
#warn("GENERIC POINTER: %s" % value)
if isNull(value):
self.putValue("0x0")
self.putNumChild(0)
isHandled = True
target = str(type.target().unqualified())
#warn("TARGET: %s" % target)
if target == "char" and not isHandled:
# Display values up to given length directly
firstNul = -1
p = value
for i in xrange(0, 10):
if p.dereference() == 0:
# Found terminating NUL
self.putField("valueencoded", "6")
self.put(',value="')
p = value
for j in xrange(0, i):
self.put('%02x' % int(p.dereference()))
p += 1
self.put('"')
self.putNumChild(0)
isHandled = True
break
p += 1
if not isHandled:
# Generic pointer type.
#warn("GENERIC POINTER: %s" % value)
if self.isExpanded(item):
#warn("GENERIC POINTER: %s" % item.value.type.target())
self.put(',')
# Temporary change to target type.
self.childTypes.append(
stripClassTag(str(item.value.type.target())))
self.putItemOrPointerHelper(
Item(item.value.dereference(), item.iname, None, None))
self.childTypes.pop()
else:
self.putValue(str(value.address))
self.putNumChild(1)
else:
#warn("COMMON TYPE: %s " % value.type)
#warn("INAME: %s " % item.iname)
#warn("INAMES: %s " % self.expandedINames)
#warn("EXPANDED: %self " % (item.iname in self.expandedINames))
# insufficient, see http://sourceware.org/bugzilla/show_bug.cgi?id=10953
#fields = value.type.fields()
fields = value.type.strip_typedefs().fields()
self.putValue("{...}")
if False:
numfields = 0
for field in fields:
bitpos = getattr(field, "bitpos", None)
if not bitpos is None:
++numfields
else:
numfields = len(fields)
self.putNumChild(numfields)
if self.isExpanded(item):
innerType = None
if len(fields) == 1 and fields[0].name is None:
innerType = value.type.target()
self.beginChildren(1, innerType)
for field in fields:
#warn("FIELD: %s" % field)
#warn(" BITSIZE: %s" % field.bitsize)
#warn(" ARTIFICIAL: %s" % field.artificial)
bitpos = getattr(field, "bitpos", None)
if bitpos is None: # FIXME: Is check correct?
continue # A static class member(?).
if field.name is None:
innerType = value.type.target()
p = value.cast(innerType.pointer())
for i in xrange(0, value.type.sizeof / innerType.sizeof):
self.putItem(Item(p.dereference(), item.iname, i, None))
p = p + 1
continue
# ignore vtable pointers for virtual inheritance
if field.name.startswith("_vptr."):
continue
child = Item(None, item.iname, field.name, field.name)
#warn("FIELD NAME: %s" % field.name)
#warn("FIELD TYPE: %s" % field.type)
if field.name == stripClassTag(str(field.type)):
# Field is base type.
child.value = value.cast(field.type)
else:
# Data member.
child.value = value[field.name]
if not child.name:
child.name = "<anon>"
self.beginHash()
#d.putField("iname", child.iname)
#d.putName(child.name)
#d.putType(child.value.type)
self.safePutItemHelper(child)
self.endHash()
self.endChildren()
debugger: fix new dumpers in the context of base class types containing
spaces in its string representation
#Note: Keep name-type-value-numchild-extra order
#return
import sys
import traceback
import gdb
import curses.ascii
verbosity = 0
verbosity = 1
def select(condition, if_expr, else_expr):
if condition:
return if_expr
return else_expr
def qmin(n, m):
if n < m:
return n
return m
def isSimpleType(typeobj):
type = str(typeobj)
return type == "bool" \
or type == "char" \
or type == "double" \
or type == "float" \
or type == "int" \
or type == "long" or type.startswith("long ") \
or type == "short" or type.startswith("short ") \
or type == "signed" or type.startswith("signed ") \
or type == "unsigned" or type.startswith("unsigned ")
def isStringType(d, typeobj):
type = str(typeobj)
return type == d.ns + "QString" \
or type == d.ns + "QByteArray" \
or type == "std::string" \
or type == "std::wstring" \
or type == "wstring"
def warn(message):
if verbosity > 0:
print "XXX: %s " % message.encode("latin1")
pass
def check(exp):
if not exp:
raise RuntimeError("Check failed")
#def couldBePointer(p, align):
# type = gdb.lookup_type("unsigned int")
# ptr = gdb.Value(p).cast(type)
# d = int(str(ptr))
# warn("CHECKING : %s %d " % (p, ((d & 3) == 0 and (d > 1000 or d == 0))))
# return (d & (align - 1)) and (d > 1000 or d == 0)
def checkAccess(p, align = 1):
return p.dereference()
def checkContents(p, expected, align = 1):
if int(p.dereference()) != expected:
raise RuntimeError("Contents check failed")
def checkPointer(p, align = 1):
if not isNull(p):
p.dereference()
def isNull(p):
s = str(p)
return s == "0x0" or s.startswith("0x0 ")
movableTypes = set([
"QBrush", "QBitArray", "QByteArray",
"QCustomTypeInfo", "QChar",
"QDate", "QDateTime",
"QFileInfo", "QFixed", "QFixedPoint", "QFixedSize",
"QHashDummyValue",
"QIcon", "QImage",
"QLine", "QLineF", "QLatin1Char", "QLocal",
"QMatrix", "QModelIndex",
"QPoint", "QPointF", "QPen", "QPersistentModelIndex",
"QResourceRoot", "QRect", "QRectF", "QRegExp",
"QSize", "QSizeF", "QString",
"QTime", "QTextBlock",
"QUrl",
"QVariant",
"QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration"])
def stripClassTag(type):
if type.startswith("class "):
return type[6:]
return type
def checkPointerRange(p, n):
for i in xrange(0, n):
checkPointer(p)
++p
def call(value, func):
#warn("CALL: %s -> %s" % (value, func))
type = stripClassTag(str(value.type))
if type.find(':') >= 0:
type = "'" + type + "'"
exp = "((%s*)%s)->%s" % (type, value.address, func)
#warn("CALL: %s" % exp)
result = gdb.parse_and_eval(exp)
#warn(" -> %s" % result)
return result
def qtNamespace():
try:
type = str(gdb.parse_and_eval("&QString::null").type.target().unqualified())
return type[0:len(type) - len("QString::null")]
except RuntimeError:
return ""
def encodeByteArray(value):
d_ptr = value['d'].dereference()
data = d_ptr['data']
size = d_ptr['size']
alloc = d_ptr['alloc']
check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
check(d_ptr["ref"]["_q_value"] > 0)
if size > 0:
checkAccess(data, 4)
checkAccess(data + size) == 0
innerType = gdb.lookup_type("char")
p = gdb.Value(data.cast(innerType.pointer()))
s = ""
for i in xrange(0, size):
s += "%02x" % int(p.dereference())
p += 1
return s
def encodeString(value):
d_ptr = value['d'].dereference()
data = d_ptr['data']
size = d_ptr['size']
alloc = d_ptr['alloc']
check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
if size > 0:
checkAccess(data, 4)
checkAccess(data + size * 2) == 0
check(d_ptr["ref"]["_q_value"] > 0)
p = gdb.Value(d_ptr["data"])
s = ""
for i in xrange(0, size):
val = int(p.dereference())
s += "%02x" % (val % 256)
s += "%02x" % (val / 256)
p += 1
return s
#######################################################################
#
# Item
#
#######################################################################
class Item:
def __init__(self, value, parentiname, iname, name):
self.value = value
if iname is None:
self.iname = parentiname
else:
self.iname = "%s.%s" % (parentiname, iname)
self.name = name
#######################################################################
#
# FrameCommand
#
#######################################################################
class FrameCommand(gdb.Command):
"""Do fancy stuff. Usage bb --verbose expandedINames"""
def __init__(self):
super(FrameCommand, self).__init__("bb", gdb.COMMAND_OBSCURE)
def invoke(self, arg, from_tty):
args = arg.split(' ')
#warn("ARG: %s" % arg)
#warn("ARGS: %s" % args)
useFancy = int(args[0])
passExceptions = int(args[1])
expandedINames = set()
if len(args) > 2:
expandedINames = set(args[2].split(','))
watchers = set()
if len(args) > 3:
watchers = set(args[3].split(','))
#warn("EXPANDED INAMES: %s" % expandedINames)
#warn("WATCHERS: %s" % watchers)
module = sys.modules[__name__]
self.dumpers = {}
if useFancy == -1:
output = "dumpers=["
for key, value in module.__dict__.items():
if key.startswith("qqDump"):
if output != "dumpers=[":
output += ","
output += '"' + key[6:] + '"'
output += "],"
#output += "qtversion=[%d,%d,%d]"
output += "qtversion=[4,6,0],"
output += "namespace=\"%s\"," % qtNamespace()
output += "dumperversion=\"2.0\","
output += "sizes=[],"
output += "expressions=[]"
output += "]"
print output
return
if useFancy:
for key, value in module.__dict__.items():
#if callable(value):
if key.startswith("qqDump"):
self.dumpers[key[6:]] = value
try:
frame = gdb.selected_frame()
except RuntimeError:
return ""
d = Dumper()
d.dumpers = self.dumpers
d.passExceptions = passExceptions
d.ns = qtNamespace()
block = frame.block()
#warn(" NAMESPACE IS: '%s'" % d.ns)
#warn("FRAME %s: " % frame)
while True:
if block is None:
warn("UNEXPECTED 'None' BLOCK")
break
for symbol in block:
name = symbol.print_name
if name == "__in_chrg":
continue
# "NotImplementedError: Symbol type not yet supported in
# Python scripts."
#warn("SYMBOL %s: " % symbol.value)
#warn("SYMBOL %s (%s): " % (symbol, name))
item = Item(0, "local", name, name)
try:
item.value = frame.read_var(name) # this is a gdb value
except RuntimeError:
# happens for void foo() { std::string s; std::wstring w; }
#warn(" FRAME READ VAR ERROR: %s (%s): " % (symbol, name))
continue
#warn("ITEM %s: " % item.value)
d.expandedINames = expandedINames
d.useFancy = useFancy
d.beginHash()
d.put('iname="%s",' % item.iname)
d.safePutItemHelper(item)
d.endHash()
# The outermost block in a function has the function member
# FIXME: check whether this is guaranteed.
if not block.function is None:
break
block = block.superblock
#warn("BLOCK %s: " % block)
d.pushOutput()
print('locals={iname="local",name="Locals",value=" ",type=" ",'
+ 'children=[%s]}' % d.safeoutput)
FrameCommand()
#######################################################################
#
# The Dumper Class
#
#######################################################################
class Dumper:
def __init__(self):
self.output = ""
self.safeoutput = ""
self.childTypes = [""]
self.childNumChilds = [-1]
def put(self, value):
self.output += value
def putCommaIfNeeded(self):
c = self.output[-1:]
if c == '}' or c == '"' or c == ']' or c == '\n':
self.put(',')
#warn("C:'%s' COND:'%d' OUT:'%s'" %
# (c, c == '}' or c == '"' or c == ']' or c == '\n', self.output))
def putField(self, name, value):
self.putCommaIfNeeded()
self.put('%s="%s"' % (name, value))
def beginHash(self):
self.putCommaIfNeeded()
self.put('{')
def endHash(self):
self.put('}')
def beginItem(self, name):
self.putCommaIfNeeded()
self.put(name)
self.put('="')
def endItem(self):
self.put('"')
def beginChildren(self, numChild = 1, type = None, children = None):
childType = ""
childNumChild = -1
if numChild == 0:
type = None
self.putCommaIfNeeded()
if not type is None:
childType = stripClassTag(str(type))
self.put('childtype="%s",' % childType)
if isSimpleType(type) or isStringType(self, type):
self.put('childnumchild="0",')
childNumChild = 0
elif type.code == gdb.TYPE_CODE_PTR:
self.put('childnumchild="1",')
childNumChild = 1
if not children is None:
self.put('childnumchild="%s",' % children)
childNumChild = children
self.childTypes.append(childType)
self.childNumChilds.append(childNumChild)
#warn("BEGIN: %s" % self.childTypes)
self.put("children=[")
def endChildren(self):
#warn("END: %s" % self.childTypes)
self.childTypes.pop()
self.childNumChilds.pop()
self.put(']')
# convenience
def putItemCount(self, count):
self.putCommaIfNeeded()
self.put('value="<%s items>"' % count)
def putEllipsis(self):
self.putCommaIfNeeded()
self.put('{name="<incomplete>",value="",type="",numchild="0"}')
def putType(self, type):
#warn("TYPES: '%s' '%s'" % (type, self.childTypes))
#warn(" EQUAL 2: %s " % (str(type) == self.childTypes[-1]))
type = stripClassTag(str(type))
if len(type) > 0 and type != self.childTypes[-1]:
self.putCommaIfNeeded()
self.put('type="%s"' % type) # str(type.unqualified()) ?
def putNumChild(self, numchild):
#warn("NUM CHILD: '%s' '%s'" % (numchild, self.childNumChilds[-1]))
if numchild != self.childNumChilds[-1]:
self.put(',numchild="%s"' % numchild)
def putValue(self, value, encoding = None):
if not encoding is None:
self.putField("valueencoded", encoding)
self.putField("value", value)
def putStringValue(self, value):
str = encodeString(value)
self.putCommaIfNeeded()
self.put('valueencoded="%d",value="%s"' % (7, str))
def putByteArrayValue(self, value):
str = encodeByteArray(value)
self.putCommaIfNeeded()
self.put('valueencoded="%d",value="%s"' % (6, str))
def putName(self, name):
self.putCommaIfNeeded()
self.put('name="%s"' % name)
def isExpanded(self, item):
#warn("IS EXPANDED: %s in %s" % (item.iname, self.expandedINames))
if item.iname is None:
raise "Illegal iname 'None'"
if item.iname.startswith("None"):
raise "Illegal iname '%s'" % item.iname
#warn(" --> %s" % (item.iname in self.expandedINames))
return item.iname in self.expandedINames
def isExpandedIName(self, iname):
return iname in self.expandedINames
def unputField(self, name):
pos = self.output.rfind(",")
if self.output[pos + 1:].startswith(name):
self.output = self.output[0:pos]
def stripNamespaceFromType(self, typeobj):
# FIXME: pass ns from plugin
type = stripClassTag(str(typeobj))
if len(self.ns) > 0 and type.startswith(self.ns):
type = type[len(self.ns):]
pos = type.find("<")
if pos != -1:
type = type[0:pos]
return type
def isMovableType(self, type):
if type.code == gdb.TYPE_CODE_PTR:
return True
if isSimpleType(type):
return True
return self.stripNamespaceFromType(type) in movableTypes
def putIntItem(self, name, value):
self.beginHash()
self.putName(name)
self.putValue(value)
self.putType("int")
self.putNumChild(0)
self.endHash()
def putBoolItem(self, name, value):
self.beginHash()
self.putName(name)
self.putValue(value)
self.putType("bool")
self.putNumChild(0)
self.endHash()
def pushOutput(self):
#warn("PUSH OUTPUT: %s " % self.output)
self.safeoutput += self.output
self.output = ""
def dumpInnerValueHelper(self, item):
if isSimpleType(item.value.type):
self.safePutItemHelper(item)
def safePutItemHelper(self, item):
self.pushOutput()
# This is only used at the top level to ensure continuation
# after failures due to uninitialized or corrupted data.
if self.passExceptions:
# for debugging reasons propagate errors.
self.putItemHelper(item)
else:
try:
self.putItemHelper(item)
except RuntimeError:
self.output = ""
# FIXME: Only catch debugger related exceptions
#exType, exValue, exTraceback = sys.exc_info()
#tb = traceback.format_exception(exType, exValue, exTraceback)
#warn("Exception: %s" % ex.message)
# DeprecationWarning: BaseException.message
# has been deprecated
#warn("Exception.")
#for line in tb:
# warn("%s" % line)
self.putName(item.name)
self.putValue("<invalid>")
self.putType(str(item.value.type))
self.putNumChild(0)
#if self.isExpanded(item):
self.beginChildren()
self.endChildren()
self.pushOutput()
def putItem(self, item):
self.beginHash()
self.safePutItemHelper(item)
self.endHash()
def putItemOrPointer(self, item):
self.beginHash()
self.putItemOrPointerHelper(item)
self.endHash()
def putCallItem(self, name, item, func):
result = call(item.value, func)
self.putItem(Item(result, item.iname, name, name))
def putItemOrPointerHelper(self, item):
if item.value.type.code == gdb.TYPE_CODE_PTR \
and str(item.value.type.unqualified) != "char":
if not isNull(item.value):
self.putItemOrPointerHelper(
Item(item.value.dereference(), item.iname, None, None))
else:
self.putValue("(null)")
self.putNumChild(0)
else:
self.safePutItemHelper(item)
def putItemHelper(self, item):
name = getattr(item, "name", None)
if not name is None:
self.putName(name)
self.putType(item.value.type)
# FIXME: Gui shows references stripped?
#warn("REAL INAME: %s " % item.iname)
#warn("REAL TYPE: %s " % item.value.type)
#warn("REAL VALUE: %s " % item.value)
value = item.value
type = value.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
value = value.cast(type)
if type.code == gdb.TYPE_CODE_TYPEDEF:
type = type.target()
strippedType = self.stripNamespaceFromType(
type.strip_typedefs().unqualified()).replace("::", "__")
#warn(" STRIPPED: %s" % strippedType)
#warn(" DUMPERS: %s" % self.dumpers)
#warn(" DUMPERS: %s" % (strippedType in self.dumpers))
if isSimpleType(type):
self.putValue(value)
self.putNumChild(0)
elif strippedType in self.dumpers:
self.dumpers[strippedType](self, item)
elif type.code == gdb.TYPE_CODE_ENUM:
#warn("GENERIC ENUM: %s" % value)
self.putValue(value)
self.putNumChild(0)
elif type.code == gdb.TYPE_CODE_PTR:
isHandled = False
#warn("GENERIC POINTER: %s" % value)
if isNull(value):
self.putValue("0x0")
self.putNumChild(0)
isHandled = True
target = str(type.target().unqualified())
#warn("TARGET: %s" % target)
if target == "char" and not isHandled:
# Display values up to given length directly
firstNul = -1
p = value
for i in xrange(0, 10):
if p.dereference() == 0:
# Found terminating NUL
self.putField("valueencoded", "6")
self.put(',value="')
p = value
for j in xrange(0, i):
self.put('%02x' % int(p.dereference()))
p += 1
self.put('"')
self.putNumChild(0)
isHandled = True
break
p += 1
if not isHandled:
# Generic pointer type.
#warn("GENERIC POINTER: %s" % value)
if self.isExpanded(item):
#warn("GENERIC POINTER: %s" % item.value.type.target())
self.put(',')
# Temporary change to target type.
self.childTypes.append(
stripClassTag(str(item.value.type.target())))
self.putItemOrPointerHelper(
Item(item.value.dereference(), item.iname, None, None))
self.childTypes.pop()
else:
self.putValue(str(value.address))
self.putNumChild(1)
else:
#warn("COMMON TYPE: %s " % value.type)
#warn("INAME: %s " % item.iname)
#warn("INAMES: %s " % self.expandedINames)
#warn("EXPANDED: %s " % (item.iname in self.expandedINames))
# insufficient, see http://sourceware.org/bugzilla/show_bug.cgi?id=10953
#fields = value.type.fields()
fields = value.type.strip_typedefs().fields()
self.putValue("{...}")
if False:
numfields = 0
for field in fields:
bitpos = getattr(field, "bitpos", None)
if not bitpos is None:
++numfields
else:
numfields = len(fields)
self.putNumChild(numfields)
if self.isExpanded(item):
innerType = None
if len(fields) == 1 and fields[0].name is None:
innerType = value.type.target()
self.beginChildren(1, innerType)
baseNumber = 0
for field in fields:
#warn("FIELD: %s" % field)
#warn(" BITSIZE: %s" % field.bitsize)
#warn(" ARTIFICIAL: %s" % field.artificial)
bitpos = getattr(field, "bitpos", None)
if bitpos is None: # FIXME: Is check correct?
continue # A static class member(?).
if field.name is None:
innerType = value.type.target()
p = value.cast(innerType.pointer())
for i in xrange(0, value.type.sizeof / innerType.sizeof):
self.putItem(Item(p.dereference(), item.iname, i, None))
p = p + 1
continue
# ignore vtable pointers for virtual inheritance
if field.name.startswith("_vptr."):
continue
#warn("FIELD NAME: %s" % field.name)
#warn("FIELD TYPE: %s" % field.type)
if field.name == stripClassTag(str(field.type)):
# Field is base type. We cannot use field.name as part
# of the iname as it might contain spaces and other
# strange characters.
child = Item(value.cast(field.type),
item.iname, "@%d" % baseNumber, field.name)
baseNumber += 1
self.beginHash()
self.putField("iname", child.iname)
self.safePutItemHelper(child)
self.endHash()
else:
# Data member.
child = Item(value[field.name],
item.iname, field.name, field.name)
if not child.name:
child.name = "<anon>"
self.beginHash()
self.safePutItemHelper(child)
self.endHash()
self.endChildren()
|
#!/usr/bin/env python3
"""EventMan(ager)
Your friendly manager of attendees at an event.
Copyright 2015-2017 Davide Alberani <da@erlug.linux.it>
RaspiBO <info@raspibo.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import glob
import json
import time
import string
import random
import logging
import datetime
import dateutil.tz
import dateutil.parser
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
import tornado.websocket
from tornado import gen, escape, process
import utils
import monco
import collections
ENCODING = 'utf-8'
PROCESS_TIMEOUT = 60
API_VERSION = '1.0'
re_env_key = re.compile('[^A-Z_]+')
re_slashes = re.compile(r'//+')
# Keep track of WebSocket connections.
_ws_clients = {}
def authenticated(method):
"""Decorator to handle forced authentication."""
original_wrapper = tornado.web.authenticated(method)
@tornado.web.functools.wraps(method)
def my_wrapper(self, *args, **kwargs):
# If no authentication was required from the command line or config file.
if not self.authentication:
return method(self, *args, **kwargs)
# unauthenticated API calls gets redirected to /v1.0/[...]
if self.is_api() and not self.current_user:
self.redirect('/v%s%s' % (API_VERSION, self.get_login_url()))
return
return original_wrapper(self, *args, **kwargs)
return my_wrapper
class BaseException(Exception):
"""Base class for EventMan custom exceptions.
:param message: text message
:type message: str
:param status: numeric http status code
:type status: int"""
def __init__(self, message, status=400):
super(BaseException, self).__init__(message)
self.message = message
self.status = status
class InputException(BaseException):
"""Exception raised by errors in input handling."""
pass
class BaseHandler(tornado.web.RequestHandler):
"""Base class for request handlers."""
permissions = {
'event|read': True,
'event:tickets|read': True,
'event:tickets|create': True,
'event:tickets|update': True,
'event:tickets-all|create': True,
'events|read': True,
'users|create': True
}
# Cache currently connected users.
_users_cache = {}
# A property to access the first value of each argument.
arguments = property(lambda self: dict([(k, v[0].decode('utf-8'))
for k, v in self.request.arguments.items()]))
# A property to access both the UUID and the clean arguments.
@property
def uuid_arguments(self):
uuid = None
arguments = self.arguments
if 'uuid' in arguments:
uuid = arguments['uuid']
del arguments['uuid']
return uuid, arguments
_bool_convert = {
'0': False,
'n': False,
'f': False,
'no': False,
'off': False,
'false': False,
'1': True,
'y': True,
't': True,
'on': True,
'yes': True,
'true': True
}
_re_split_salt = re.compile(r'\$(?P<salt>.+)\$(?P<hash>.+)')
def write_error(self, status_code, **kwargs):
"""Default error handler."""
if isinstance(kwargs.get('exc_info', (None, None))[1], BaseException):
exc = kwargs['exc_info'][1]
status_code = exc.status
message = exc.message
else:
message = 'internal error'
self.build_error(message, status=status_code)
def is_api(self):
"""Return True if the path is from an API call."""
return self.request.path.startswith('/v%s' % API_VERSION)
def tobool(self, obj):
"""Convert some textual values to boolean."""
if isinstance(obj, (list, tuple)):
obj = obj[0]
if isinstance(obj, str):
obj = obj.lower()
return self._bool_convert.get(obj, obj)
def arguments_tobool(self):
"""Return a dictionary of arguments, converted to booleans where possible."""
return dict([(k, self.tobool(v)) for k, v in self.arguments.items()])
def initialize(self, **kwargs):
"""Add every passed (key, value) as attributes of the instance."""
for key, value in kwargs.items():
setattr(self, key, value)
@property
def current_user(self):
"""Retrieve current user name from the secure cookie."""
current_user = self.get_secure_cookie("user")
if isinstance(current_user, bytes):
current_user = current_user.decode('utf-8')
return current_user
@property
def current_user_info(self):
"""Information about the current user, including their permissions."""
current_user = self.current_user
if current_user in self._users_cache:
return self._users_cache[current_user]
permissions = set([k for (k, v) in self.permissions.items() if v is True])
user_info = {'permissions': permissions}
if current_user:
user_info['_id'] = current_user
user = self.db.getOne('users', {'_id': current_user})
if user:
user_info = user
permissions.update(set(user.get('permissions') or []))
user_info['permissions'] = permissions
user_info['isRegistered'] = True
self._users_cache[current_user] = user_info
return user_info
def has_permission(self, permission):
"""Check permissions of the current user.
:param permission: the permission to check
:type permission: str
:returns: True if the user is allowed to perform the action or False
:rtype: bool
"""
user_info = self.current_user_info or {}
user_permissions = user_info.get('permissions') or []
global_permission = '%s|all' % permission.split('|')[0]
if 'admin|all' in user_permissions or global_permission in user_permissions or permission in user_permissions:
return True
collection_permission = self.permissions.get(permission)
if isinstance(collection_permission, bool):
return collection_permission
if isinstance(collection_permission, collections.Callable):
return collection_permission(permission)
return False
def user_authorized(self, username, password):
"""Check if a combination of username/password is valid.
:param username: username or email
:type username: str
:param password: password
:type password: str
:returns: tuple like (bool_user_is_authorized, dict_user_info)
:rtype: dict"""
query = [{'username': username}, {'email': username}]
res = self.db.query('users', query)
if not res:
return (False, {})
user = res[0]
db_password = user.get('password') or ''
if not db_password:
return (False, {})
match = self._re_split_salt.match(db_password)
if not match:
return (False, {})
salt = match.group('salt')
if utils.hash_password(password, salt=salt) == db_password:
return (True, user)
return (False, {})
def build_error(self, message='', status=400):
"""Build and write an error message.
:param message: textual message
:type message: str
:param status: HTTP status code
:type status: int
"""
self.set_status(status)
self.write({'error': True, 'message': message})
def logout(self):
"""Remove the secure cookie used fro authentication."""
if self.current_user in self._users_cache:
del self._users_cache[self.current_user]
self.clear_cookie("user")
class RootHandler(BaseHandler):
"""Handler for the / path."""
angular_app_path = os.path.join(os.path.dirname(__file__), "angular_app")
@gen.coroutine
def get(self, *args, **kwargs):
# serve the ./angular_app/index.html file
with open(self.angular_app_path + "/index.html", 'r') as fd:
self.write(fd.read())
class CollectionHandler(BaseHandler):
"""Base class for handlers that need to interact with the database backend.
Introduce basic CRUD operations."""
# set of documents we're managing (a collection in MongoDB or a table in a SQL database)
document = None
collection = None
# set of documents used to store incremental sequences
counters_collection = 'counters'
_id_chars = string.ascii_lowercase + string.digits
def get_next_seq(self, seq):
"""Increment and return the new value of a ever-incrementing counter.
:param seq: unique name of the sequence
:type seq: str
:returns: the next value of the sequence
:rtype: int
"""
if not self.db.query(self.counters_collection, {'seq_name': seq}):
self.db.add(self.counters_collection, {'seq_name': seq, 'seq': 0})
merged, doc = self.db.update(self.counters_collection,
{'seq_name': seq},
{'seq': 1},
operation='increment')
return doc.get('seq', 0)
def gen_id(self, seq='ids', random_alpha=32):
"""Generate a unique, non-guessable ID.
:param seq: the scope of the ever-incrementing sequence
:type seq: str
:param random_alpha: number of random lowercase alphanumeric chars
:type random_alpha: int
:returns: unique ID
:rtype: str"""
t = str(time.time()).replace('.', '_')
seq = str(self.get_next_seq(seq))
rand = ''.join([random.choice(self._id_chars) for x in range(random_alpha)])
return '-'.join((t, seq, rand))
def _filter_results(self, results, params):
"""Filter a list using keys and values from a dictionary.
:param results: the list to be filtered
:type results: list
:param params: a dictionary of items that must all be present in an original list item to be included in the return
:type params: dict
:returns: list of items that have all the keys with the same values as params
:rtype: list"""
if not params:
return results
params = monco.convert(params)
filtered = []
for result in results:
add = True
for key, value in params.items():
if key not in result or result[key] != value:
add = False
break
if add:
filtered.append(result)
return filtered
def _clean_dict(self, data):
"""Filter a dictionary (in place) to remove unwanted keywords in db queries.
:param data: dictionary to clean
:type data: dict"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, str) and key.startswith('$'):
del data[key]
return data
def _dict2env(self, data):
"""Convert a dictionary into a form suitable to be passed as environment variables.
:param data: dictionary to convert
:type data: dict"""
ret = {}
for key, value in data.items():
if isinstance(value, (list, tuple, dict)):
continue
try:
key = key.upper().encode('ascii', 'ignore')
key = re_env_key.sub('', key)
if not key:
continue
ret[key] = str(value).encode(ENCODING)
except:
continue
return ret
def apply_filter(self, data, filter_name):
"""Apply a filter to the data.
:param data: the data to filter
:returns: the modified (possibly also in place) data
"""
filter_method = getattr(self, 'filter_%s' % filter_name, None)
if filter_method is not None:
data = filter_method(data)
return data
@gen.coroutine
@authenticated
def get(self, id_=None, resource=None, resource_id=None, acl=True, **kwargs):
if resource:
# Handle access to sub-resources.
permission = '%s:%s%s|read' % (self.document, resource, '-all' if resource_id is None else '')
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
handler = getattr(self, 'handle_get_%s' % resource, None)
if handler and isinstance(handler, collections.Callable):
output = handler(id_, resource_id, **kwargs) or {}
output = self.apply_filter(output, 'get_%s' % resource)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
# read a single document
permission = '%s|read' % self.document
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
output = self.db.get(self.collection, id_)
output = self.apply_filter(output, 'get')
self.write(output)
else:
# return an object containing the list of all objects in the collection;
# e.g.: {'events': [{'_id': 'obj1-id, ...}, {'_id': 'obj2-id, ...}, ...]}
# Please, never return JSON lists that are not encapsulated into an object,
# to avoid XSS vulnerabilities.
permission = '%s|read' % self.collection
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
output = {self.collection: self.db.query(self.collection, self.arguments)}
output = self.apply_filter(output, 'get_all')
self.write(output)
@gen.coroutine
@authenticated
def post(self, id_=None, resource=None, resource_id=None, **kwargs):
data = escape.json_decode(self.request.body or '{}')
self._clean_dict(data)
method = self.request.method.lower()
crud_method = 'create' if method == 'post' else 'update'
now = datetime.datetime.now()
user_info = self.current_user_info
user_id = user_info.get('_id')
env = {}
if id_ is not None:
env['%s_ID' % self.document.upper()] = id_
if crud_method == 'create':
data['created_by'] = user_id
data['created_at'] = now
data['updated_by'] = user_id
data['updated_at'] = now
if resource:
permission = '%s:%s%s|%s' % (self.document, resource, '-all' if resource_id is None else '', crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
# Handle access to sub-resources.
handler = getattr(self, 'handle_%s_%s' % (method, resource), None)
if handler and isinstance(handler, collections.Callable):
data = self.apply_filter(data, 'input_%s_%s' % (method, resource))
output = handler(id_, resource_id, data, **kwargs)
output = self.apply_filter(output, 'get_%s' % resource)
env['RESOURCE'] = resource
if resource_id:
env['%s_ID' % resource] = resource_id
self.run_triggers('%s_%s_%s' % ('create' if resource_id is None else 'update', self.document, resource),
stdin_data=output, env=env)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
permission = '%s|%s' % (self.document, crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
data = self.apply_filter(data, 'input_%s' % method)
merged, newData = self.db.update(self.collection, id_, data)
newData = self.apply_filter(newData, method)
self.run_triggers('update_%s' % self.document, stdin_data=newData, env=env)
else:
permission = '%s|%s' % (self.collection, crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
data = self.apply_filter(data, 'input_%s_all' % method)
newData = self.db.add(self.collection, data, _id=self.gen_id())
newData = self.apply_filter(newData, '%s_all' % method)
self.run_triggers('create_%s' % self.document, stdin_data=newData, env=env)
self.write(newData)
# PUT (update an existing document) is handled by the POST (create a new document) method;
# in subclasses you can always separate sub-resources handlers like handle_post_tickets and handle_put_tickets
put = post
@gen.coroutine
@authenticated
def delete(self, id_=None, resource=None, resource_id=None, **kwargs):
env = {}
if id_ is not None:
env['%s_ID' % self.document.upper()] = id_
if resource:
# Handle access to sub-resources.
permission = '%s:%s%s|delete' % (self.document, resource, '-all' if resource_id is None else '')
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
method = getattr(self, 'handle_delete_%s' % resource, None)
if method and isinstance(method, collections.Callable):
output = method(id_, resource_id, **kwargs)
env['RESOURCE'] = resource
if resource_id:
env['%s_ID' % resource] = resource_id
self.run_triggers('delete_%s_%s' % (self.document, resource), stdin_data=env, env=env)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
permission = '%s|delete' % self.document
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
howMany = self.db.delete(self.collection, id_)
env['DELETED_ITEMS'] = howMany
self.run_triggers('delete_%s' % self.document, stdin_data=env, env=env)
else:
self.write({'success': False})
self.write({'success': True})
def on_timeout(self, cmd, pipe):
"""Kill a process that is taking too long to complete."""
logging.debug('cmd %s is taking too long: killing it' % ' '.join(cmd))
try:
pipe.proc.kill()
except:
pass
def on_exit(self, returncode, cmd, pipe):
"""Callback executed when a subprocess execution is over."""
self.ioloop.remove_timeout(self.timeout)
logging.debug('cmd: %s returncode: %d' % (' '.join(cmd), returncode))
@gen.coroutine
def run_subprocess(self, cmd, stdin_data=None, env=None):
"""Execute the given action.
:param cmd: the command to be run with its command line arguments
:type cmd: list
:param stdin_data: data to be sent over stdin
:type stdin_data: str
:param env: environment of the process
:type env: dict
"""
self.ioloop = tornado.ioloop.IOLoop.instance()
processed_env = self._dict2env(env)
p = process.Subprocess(cmd, close_fds=True, stdin=process.Subprocess.STREAM,
stdout=process.Subprocess.STREAM, stderr=process.Subprocess.STREAM, env=processed_env)
p.set_exit_callback(lambda returncode: self.on_exit(returncode, cmd, p))
self.timeout = self.ioloop.add_timeout(datetime.timedelta(seconds=PROCESS_TIMEOUT),
lambda: self.on_timeout(cmd, p))
yield gen.Task(p.stdin.write, stdin_data or '')
p.stdin.close()
out, err = yield [gen.Task(p.stdout.read_until_close),
gen.Task(p.stderr.read_until_close)]
logging.debug('cmd: %s' % ' '.join(cmd))
logging.debug('cmd stdout: %s' % out)
logging.debug('cmd strerr: %s' % err)
raise gen.Return((out, err))
@gen.coroutine
def run_triggers(self, action, stdin_data=None, env=None):
"""Asynchronously execute triggers for the given action.
:param action: action name; scripts in directory ./data/triggers/{action}.d will be run
:type action: str
:param stdin_data: a python dictionary that will be serialized in JSON and sent to the process over stdin
:type stdin_data: dict
:param env: environment of the process
:type stdin_data: dict
"""
if not hasattr(self, 'data_dir'):
return
logging.debug('running triggers for action "%s"' % action)
stdin_data = stdin_data or {}
try:
stdin_data = json.dumps(stdin_data)
except:
stdin_data = '{}'
for script in glob.glob(os.path.join(self.data_dir, 'triggers', '%s.d' % action, '*')):
if not (os.path.isfile(script) and os.access(script, os.X_OK)):
continue
out, err = yield gen.Task(self.run_subprocess, [script], stdin_data, env)
def build_ws_url(self, path, proto='ws', host=None):
"""Return a WebSocket url from a path."""
try:
args = '?uuid=%s' % self.get_argument('uuid')
except:
args = ''
return 'ws://127.0.0.1:%s/ws/%s%s' % (self.listen_port + 1, path, args)
@gen.coroutine
def send_ws_message(self, path, message):
"""Send a WebSocket message to all the connected clients.
:param path: partial path used to build the WebSocket url
:type path: str
:param message: message to send
:type message: str
"""
try:
ws = yield tornado.websocket.websocket_connect(self.build_ws_url(path))
ws.write_message(message)
ws.close()
except Exception as e:
self.logger.error('Error yielding WebSocket message: %s', e)
class EventsHandler(CollectionHandler):
"""Handle requests for Events."""
document = 'event'
collection = 'events'
def _mangle_event(self, event):
# Some in-place changes to an event
if 'tickets' in event:
event['tickets_sold'] = len([t for t in event['tickets'] if not t.get('cancelled')])
event['no_tickets_for_sale'] = False
try:
self._check_sales_datetime(event)
self._check_number_of_tickets(event)
except InputException:
event['no_tickets_for_sale'] = True
if not self.has_permission('tickets-all|read'):
event['tickets'] = []
return event
def filter_get(self, output):
return self._mangle_event(output)
def filter_get_all(self, output):
for event in output.get('events') or []:
self._mangle_event(event)
return output
def filter_input_post(self, data):
# Auto-generate the group_id, if missing.
if 'group_id' not in data:
data['group_id'] = self.gen_id()
return data
filter_input_post_all = filter_input_post
filter_input_put = filter_input_post
def filter_input_post_tickets(self, data):
# Avoid users to be able to auto-update their 'attendee' status.
if not self.has_permission('event|update'):
if 'attended' in data:
del data['attended']
return data
filter_input_put_tickets = filter_input_post_tickets
def handle_get_group_persons(self, id_, resource_id=None):
persons = []
this_query = {'_id': id_}
this_event = self.db.query('events', this_query)[0]
group_id = this_event.get('group_id')
if group_id is None:
return {'persons': persons}
this_persons = [p for p in (this_event.get('tickets') or []) if not p.get('cancelled')]
this_emails = [_f for _f in [p.get('email') for p in this_persons] if _f]
all_query = {'group_id': group_id}
events = self.db.query('events', all_query)
for event in events:
if id_ is not None and str(event.get('_id')) == id_:
continue
persons += [p for p in (event.get('tickets') or []) if p.get('email') and p.get('email') not in this_emails]
return {'persons': persons}
def _get_ticket_data(self, ticket_id_or_query, tickets):
"""Filter a list of tickets returning the first item with a given _id
or which set of keys specified in a dictionary match their respective values."""
for ticket in tickets:
if isinstance(ticket_id_or_query, dict):
if all(ticket.get(k) == v for k, v in ticket_id_or_query.items()):
return ticket
else:
if str(ticket.get('_id')) == ticket_id_or_query:
return ticket
return {}
def handle_get_tickets(self, id_, resource_id=None):
# Return every ticket registered at this event, or the information
# about a specific ticket.
query = {'_id': id_}
event = self.db.query('events', query)[0]
if resource_id:
return {'ticket': self._get_ticket_data(resource_id, event.get('tickets') or [])}
tickets = self._filter_results(event.get('tickets') or [], self.arguments)
return {'tickets': tickets}
def _check_number_of_tickets(self, event):
if self.has_permission('admin|all'):
return
number_of_tickets = event.get('number_of_tickets')
if number_of_tickets is None:
return
try:
number_of_tickets = int(number_of_tickets)
except ValueError:
return
tickets = event.get('tickets') or []
tickets = [t for t in tickets if not t.get('cancelled')]
if len(tickets) >= event['number_of_tickets']:
raise InputException('no more tickets available')
def _check_sales_datetime(self, event):
if self.has_permission('admin|all'):
return
begin_date = event.get('ticket_sales_begin_date')
begin_time = event.get('ticket_sales_begin_time')
end_date = event.get('ticket_sales_end_date')
end_time = event.get('ticket_sales_end_time')
utc = dateutil.tz.tzutc()
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
if begin_date is None:
begin_date = datetime.datetime.now(tz=utc).replace(hour=0, minute=0, second=0, microsecond=0)
else:
begin_date = dateutil.parser.parse(begin_date)
# Compensate UTC and DST offset, that otherwise would be added 2 times (one for date, one for time)
begin_date = begin_date + datetime.timedelta(seconds=utc_offset)
if begin_time is None:
begin_time_h = 0
begin_time_m = 0
else:
begin_time = dateutil.parser.parse(begin_time)
begin_time_h = begin_time.hour
begin_time_m = begin_time.minute
now = datetime.datetime.now(tz=utc)
begin_datetime = begin_date + datetime.timedelta(hours=begin_time_h, minutes=begin_time_m)
if now < begin_datetime:
raise InputException('ticket sales not yet started')
if end_date is None:
end_date = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=utc)
else:
end_date = dateutil.parser.parse(end_date)
end_date = end_date + datetime.timedelta(seconds=utc_offset)
if end_time is None:
end_time = end_date
end_time_h = 23
end_time_m = 59
else:
end_time = dateutil.parser.parse(end_time, yearfirst=True)
end_time_h = end_time.hour
end_time_m = end_time.minute
end_datetime = end_date + datetime.timedelta(hours=end_time_h, minutes=end_time_m+1)
if now > end_datetime:
raise InputException('ticket sales has ended')
def handle_post_tickets(self, id_, resource_id, data):
event = self.db.query('events', {'_id': id_})[0]
self._check_sales_datetime(event)
self._check_number_of_tickets(event)
uuid, arguments = self.uuid_arguments
self._clean_dict(data)
data['seq'] = self.get_next_seq('event_%s_tickets' % id_)
data['seq_hex'] = '%06X' % data['seq']
data['_id'] = ticket_id = self.gen_id()
ret = {'action': 'add', 'ticket': data, 'uuid': uuid}
merged, doc = self.db.update('events',
{'_id': id_},
{'tickets': data},
operation='appendUnique',
create=False)
if doc:
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
ticket = self._get_ticket_data(ticket_id, doc.get('tickets') or [])
env = dict(ticket)
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': doc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'new': ticket,
'event': doc,
'merged': merged
}
self.run_triggers('create_ticket_in_event', stdin_data=stdin_data, env=env)
return ret
def handle_put_tickets(self, id_, ticket_id, data):
# Update an existing entry for a ticket registered at this event.
self._clean_dict(data)
uuid, arguments = self.uuid_arguments
query = dict([('tickets.%s' % k, v) for k, v in arguments.items()])
query['_id'] = id_
if ticket_id is not None:
query['tickets._id'] = ticket_id
ticket_query = {'_id': ticket_id}
else:
ticket_query = self.arguments
old_ticket_data = {}
current_event = self.db.query(self.collection, query)
if current_event:
current_event = current_event[0]
else:
current_event = {}
self._check_sales_datetime(current_event)
tickets = current_event.get('tickets') or []
old_ticket_data = self._get_ticket_data(ticket_query, tickets)
# We have changed the "cancelled" status of a ticket to False; check if we still have a ticket available
if 'number_of_tickets' in current_event and old_ticket_data.get('cancelled') and not data.get('cancelled'):
self._check_number_of_tickets(current_event)
merged, doc = self.db.update('events', query,
data, updateList='tickets', create=False)
new_ticket_data = self._get_ticket_data(ticket_query,
doc.get('tickets') or [])
env = dict(new_ticket_data)
# always takes the ticket_id from the new ticket
ticket_id = str(new_ticket_data.get('_id'))
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': doc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'old': old_ticket_data,
'new': new_ticket_data,
'event': doc,
'merged': merged
}
self.run_triggers('update_ticket_in_event', stdin_data=stdin_data, env=env)
if old_ticket_data and old_ticket_data.get('attended') != new_ticket_data.get('attended'):
if new_ticket_data.get('attended'):
self.run_triggers('attends', stdin_data=stdin_data, env=env)
ret = {'action': 'update', '_id': ticket_id, 'ticket': new_ticket_data,
'uuid': uuid, 'username': self.current_user_info.get('username', '')}
if old_ticket_data != new_ticket_data:
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
return ret
def handle_delete_tickets(self, id_, ticket_id):
# Remove a specific ticket from the list of tickets registered at this event.
uuid, arguments = self.uuid_arguments
doc = self.db.query('events',
{'_id': id_, 'tickets._id': ticket_id})
ret = {'action': 'delete', '_id': ticket_id, 'uuid': uuid}
if doc:
ticket = self._get_ticket_data(ticket_id, doc[0].get('tickets') or [])
merged, rdoc = self.db.update('events',
{'_id': id_},
{'tickets': {'_id': ticket_id}},
operation='delete',
create=False)
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
env = dict(ticket)
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': rdoc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'old': ticket,
'event': rdoc,
'merged': merged
}
self.run_triggers('delete_ticket_in_event', stdin_data=stdin_data, env=env)
return ret
class UsersHandler(CollectionHandler):
"""Handle requests for Users."""
document = 'user'
collection = 'users'
def filter_get(self, data):
if 'password' in data:
del data['password']
if '_id' in data:
# Also add a 'tickets' list with all the tickets created by this user
tickets = []
events = self.db.query('events', {'tickets.created_by': data['_id']})
for event in events:
event_title = event.get('title') or ''
event_id = str(event.get('_id'))
evt_tickets = self._filter_results(event.get('tickets') or [], {'created_by': data['_id']})
for evt_ticket in evt_tickets:
evt_ticket['event_title'] = event_title
evt_ticket['event_id'] = event_id
tickets.extend(evt_tickets)
data['tickets'] = tickets
return data
def filter_get_all(self, data):
if 'users' not in data:
return data
for user in data['users']:
if 'password' in user:
del user['password']
return data
@gen.coroutine
@authenticated
def get(self, id_=None, resource=None, resource_id=None, acl=True, **kwargs):
if id_ is not None:
if (self.has_permission('user|read') or self.current_user == id_):
acl = False
super(UsersHandler, self).get(id_, resource, resource_id, acl=acl, **kwargs)
def filter_input_post_all(self, data):
username = (data.get('username') or '').strip()
password = (data.get('password') or '').strip()
email = (data.get('email') or '').strip()
if not (username and password):
raise InputException('missing username or password')
res = self.db.query('users', {'username': username})
if res:
raise InputException('username already exists')
return {'username': username, 'password': utils.hash_password(password),
'email': email, '_id': self.gen_id()}
def filter_input_put(self, data):
old_pwd = data.get('old_password')
new_pwd = data.get('new_password')
if old_pwd is not None:
del data['old_password']
if new_pwd is not None:
del data['new_password']
authorized, user = self.user_authorized(data['username'], old_pwd)
if not (self.has_permission('user|update') or (authorized and
self.current_user_info.get('username') == data['username'])):
raise InputException('not authorized to change password')
data['password'] = utils.hash_password(new_pwd)
if '_id' in data:
del data['_id']
if 'username' in data:
del data['username']
if not self.has_permission('admin|all'):
if 'permissions' in data:
del data['permissions']
else:
if 'isAdmin' in data:
if not 'permissions' in data:
data['permissions'] = []
if 'admin|all' in data['permissions'] and not data['isAdmin']:
data['permissions'].remove('admin|all')
elif 'admin|all' not in data['permissions'] and data['isAdmin']:
data['permissions'].append('admin|all')
del data['isAdmin']
return data
@gen.coroutine
@authenticated
def put(self, id_=None, resource=None, resource_id=None, **kwargs):
if id_ is None:
return self.build_error(status=404, message='unable to access the resource')
if not (self.has_permission('user|update') or self.current_user == id_):
return self.build_error(status=401, message='insufficient permissions: user|update or current user')
super(UsersHandler, self).put(id_, resource, resource_id, **kwargs)
class EbCSVImportPersonsHandler(BaseHandler):
"""Importer for CSV files exported from Eventbrite."""
csvRemap = {
'Nome evento': 'event_title',
'ID evento': 'event_id',
'N. codice a barre': 'ebqrcode',
'Cognome acquirente': 'surname',
'Nome acquirente': 'name',
'E-mail acquirente': 'email',
'Cognome': 'surname',
'Nome': 'name',
'E-mail': 'email',
'Indirizzo e-mail': 'email',
'Tipologia biglietto': 'ticket_kind',
'Data partecipazione': 'attending_datetime',
'Data check-in': 'checkin_datetime',
'Ordine n.': 'order_nr',
'ID ordine': 'order_nr',
'Titolo professionale': 'job title',
'Azienda': 'company',
'Prefisso': 'name_title',
'Prefisso (Sig., Sig.ra, ecc.)': 'name title',
'Order #': 'order_nr',
'Prefix': 'name title',
'First Name': 'name',
'Last Name': 'surname',
'Suffix': 'name suffix',
'Email': 'email',
'Attendee #': 'attendee_nr',
'Barcode #': 'ebqrcode',
'Company': 'company'
}
@gen.coroutine
@authenticated
def post(self, **kwargs):
# import a CSV list of persons
event_handler = EventsHandler(self.application, self.request)
event_handler.db = self.db
event_handler.logger = self.logger
event_id = None
try:
event_id = self.get_body_argument('targetEvent')
except:
pass
if event_id is None:
return self.build_error('invalid event')
reply = dict(total=0, valid=0, merged=0, new_in_event=0)
event_details = event_handler.db.query('events', {'_id': event_id})
if not event_details:
return self.build_error('invalid event')
all_emails = set()
#[x.get('email') for x in (event_details[0].get('tickets') or []) if x.get('email')])
for ticket in (event_details[0].get('tickets') or []):
all_emails.add('%s_%s_%s' % (ticket.get('name'), ticket.get('surname'), ticket.get('email')))
for fieldname, contents in self.request.files.items():
for content in contents:
filename = content['filename']
parseStats, persons = utils.csvParse(content['body'], remap=self.csvRemap)
reply['total'] += parseStats['total']
for person in persons:
if not person:
continue
reply['valid'] += 1
person['attended'] = False
person['from_file'] = filename
duplicate_check = '%s_%s_%s' % (person.get('name'), person.get('surname'), person.get('email'))
if duplicate_check in all_emails:
continue
all_emails.add(duplicate_check)
event_handler.handle_post_tickets(event_id, None, person)
reply['new_in_event'] += 1
self.write(reply)
class SettingsHandler(BaseHandler):
"""Handle requests for Settings."""
@gen.coroutine
@authenticated
def get(self, **kwargs):
query = self.arguments_tobool()
settings = self.db.query('settings', query)
self.write({'settings': settings})
class InfoHandler(BaseHandler):
"""Handle requests for information about the logged in user."""
@gen.coroutine
def get(self, **kwargs):
info = {}
user_info = self.current_user_info
if user_info:
info['user'] = user_info
info['authentication_required'] = self.authentication
self.write({'info': info})
class WebSocketEventUpdatesHandler(tornado.websocket.WebSocketHandler):
"""Manage WebSockets."""
def _clean_url(self, url):
url = re_slashes.sub('/', url)
ridx = url.rfind('?')
if ridx != -1:
url = url[:ridx]
return url
def open(self, event_id, *args, **kwargs):
try:
self.uuid = self.get_argument('uuid')
except:
self.uuid = None
url = self._clean_url(self.request.uri)
logging.debug('WebSocketEventUpdatesHandler.on_open event_id:%s url:%s' % (event_id, url))
_ws_clients.setdefault(url, {})
if self.uuid and self.uuid not in _ws_clients[url]:
_ws_clients[url][self.uuid] = self
logging.debug('WebSocketEventUpdatesHandler.on_open %s clients connected' % len(_ws_clients[url]))
def on_message(self, message):
url = self._clean_url(self.request.uri)
logging.debug('WebSocketEventUpdatesHandler.on_message url:%s' % url)
count = 0
_to_delete = set()
for uuid, client in _ws_clients.get(url, {}).items():
try:
client.write_message(message)
except:
_to_delete.add(uuid)
continue
count += 1
for uuid in _to_delete:
try:
del _ws_clients[url][uuid]
except KeyError:
pass
logging.debug('WebSocketEventUpdatesHandler.on_message sent message to %d clients' % count)
class LoginHandler(RootHandler):
"""Handle user authentication requests."""
@gen.coroutine
def get(self, **kwargs):
# show the login page
if self.is_api():
self.set_status(401)
self.write({'error': True,
'message': 'authentication required'})
@gen.coroutine
def post(self, *args, **kwargs):
# authenticate a user
try:
password = self.get_body_argument('password')
username = self.get_body_argument('username')
except tornado.web.MissingArgumentError:
data = escape.json_decode(self.request.body or '{}')
username = data.get('username')
password = data.get('password')
if not (username and password):
self.set_status(401)
self.write({'error': True, 'message': 'missing username or password'})
return
authorized, user = self.user_authorized(username, password)
if authorized and 'username' in user and '_id' in user:
id_ = str(user['_id'])
username = user['username']
logging.info('successful login for user %s (id: %s)' % (username, id_))
self.set_secure_cookie("user", id_)
self.write({'error': False, 'message': 'successful login'})
return
logging.info('login failed for user %s' % username)
self.set_status(401)
self.write({'error': True, 'message': 'wrong username and password'})
class LogoutHandler(BaseHandler):
"""Handle user logout requests."""
@gen.coroutine
def get(self, **kwargs):
# log the user out
logging.info('logout')
self.logout()
self.write({'error': False, 'message': 'logged out'})
def run():
"""Run the Tornado web application."""
# command line arguments; can also be written in a configuration file,
# specified with the --config argument.
define("port", default=5242, help="run on the given port", type=int)
define("address", default='', help="bind the server at the given address", type=str)
define("data_dir", default=os.path.join(os.path.dirname(__file__), "data"),
help="specify the directory used to store the data")
define("ssl_cert", default=os.path.join(os.path.dirname(__file__), 'ssl', 'eventman_cert.pem'),
help="specify the SSL certificate to use for secure connections")
define("ssl_key", default=os.path.join(os.path.dirname(__file__), 'ssl', 'eventman_key.pem'),
help="specify the SSL private key to use for secure connections")
define("mongo_url", default=None,
help="URL to MongoDB server", type=str)
define("db_name", default='eventman',
help="Name of the MongoDB database to use", type=str)
define("authentication", default=False, help="if set to true, authentication is required")
define("debug", default=False, help="run in debug mode")
define("config", help="read configuration file",
callback=lambda path: tornado.options.parse_config_file(path, final=False))
tornado.options.parse_command_line()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if options.debug:
logger.setLevel(logging.DEBUG)
ssl_options = {}
if os.path.isfile(options.ssl_key) and os.path.isfile(options.ssl_cert):
ssl_options = dict(certfile=options.ssl_cert, keyfile=options.ssl_key)
# database backend connector
db_connector = monco.Monco(url=options.mongo_url, dbName=options.db_name)
init_params = dict(db=db_connector, data_dir=options.data_dir, listen_port=options.port,
authentication=options.authentication, logger=logger, ssl_options=ssl_options)
# If not present, we store a user 'admin' with password 'eventman' into the database.
if not db_connector.query('users', {'username': 'admin'}):
db_connector.add('users',
{'username': 'admin', 'password': utils.hash_password('eventman'),
'permissions': ['admin|all']})
# If present, use the cookie_secret stored into the database.
cookie_secret = db_connector.query('settings', {'setting': 'server_cookie_secret'})
if cookie_secret:
cookie_secret = cookie_secret[0]['cookie_secret']
else:
# the salt guarantees its uniqueness
cookie_secret = utils.hash_password('__COOKIE_SECRET__')
db_connector.add('settings',
{'setting': 'server_cookie_secret', 'cookie_secret': cookie_secret})
_ws_handler = (r"/ws/+event/+(?P<event_id>[\w\d_-]+)/+tickets/+updates/?", WebSocketEventUpdatesHandler)
_events_path = r"/events/?(?P<id_>[\w\d_-]+)?/?(?P<resource>[\w\d_-]+)?/?(?P<resource_id>[\w\d_-]+)?"
_users_path = r"/users/?(?P<id_>[\w\d_-]+)?/?(?P<resource>[\w\d_-]+)?/?(?P<resource_id>[\w\d_-]+)?"
application = tornado.web.Application([
(_events_path, EventsHandler, init_params),
(r'/v%s%s' % (API_VERSION, _events_path), EventsHandler, init_params),
(_users_path, UsersHandler, init_params),
(r'/v%s%s' % (API_VERSION, _users_path), UsersHandler, init_params),
(r"/(?:index.html)?", RootHandler, init_params),
(r"/ebcsvpersons", EbCSVImportPersonsHandler, init_params),
(r"/settings", SettingsHandler, init_params),
(r"/info", InfoHandler, init_params),
_ws_handler,
(r'/login', LoginHandler, init_params),
(r'/v%s/login' % API_VERSION, LoginHandler, init_params),
(r'/logout', LogoutHandler),
(r'/v%s/logout' % API_VERSION, LogoutHandler),
(r'/(.*)', tornado.web.StaticFileHandler, {"path": "angular_app"})
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=cookie_secret,
login_url='/login',
debug=options.debug)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options or None)
logger.info('Start serving on %s://%s:%d', 'https' if ssl_options else 'http',
options.address if options.address else '127.0.0.1',
options.port)
http_server.listen(options.port, options.address)
# Also listen on options.port+1 for our local ws connection.
ws_application = tornado.web.Application([_ws_handler], debug=options.debug)
ws_http_server = tornado.httpserver.HTTPServer(ws_application)
ws_http_server.listen(options.port+1, address='127.0.0.1')
logger.debug('Starting WebSocket on ws://127.0.0.1:%d', options.port+1)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
try:
run()
except KeyboardInterrupt:
print('Stop server')
improve collection of modification time/author information
#!/usr/bin/env python3
"""EventMan(ager)
Your friendly manager of attendees at an event.
Copyright 2015-2017 Davide Alberani <da@erlug.linux.it>
RaspiBO <info@raspibo.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import glob
import json
import time
import string
import random
import logging
import datetime
import dateutil.tz
import dateutil.parser
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
import tornado.websocket
from tornado import gen, escape, process
import utils
import monco
import collections
ENCODING = 'utf-8'
PROCESS_TIMEOUT = 60
API_VERSION = '1.0'
re_env_key = re.compile('[^A-Z_]+')
re_slashes = re.compile(r'//+')
# Keep track of WebSocket connections.
_ws_clients = {}
def authenticated(method):
"""Decorator to handle forced authentication."""
original_wrapper = tornado.web.authenticated(method)
@tornado.web.functools.wraps(method)
def my_wrapper(self, *args, **kwargs):
# If no authentication was required from the command line or config file.
if not self.authentication:
return method(self, *args, **kwargs)
# unauthenticated API calls gets redirected to /v1.0/[...]
if self.is_api() and not self.current_user:
self.redirect('/v%s%s' % (API_VERSION, self.get_login_url()))
return
return original_wrapper(self, *args, **kwargs)
return my_wrapper
class BaseException(Exception):
"""Base class for EventMan custom exceptions.
:param message: text message
:type message: str
:param status: numeric http status code
:type status: int"""
def __init__(self, message, status=400):
super(BaseException, self).__init__(message)
self.message = message
self.status = status
class InputException(BaseException):
"""Exception raised by errors in input handling."""
pass
class BaseHandler(tornado.web.RequestHandler):
"""Base class for request handlers."""
permissions = {
'event|read': True,
'event:tickets|read': True,
'event:tickets|create': True,
'event:tickets|update': True,
'event:tickets-all|create': True,
'events|read': True,
'users|create': True
}
# Cache currently connected users.
_users_cache = {}
# A property to access the first value of each argument.
arguments = property(lambda self: dict([(k, v[0].decode('utf-8'))
for k, v in self.request.arguments.items()]))
# A property to access both the UUID and the clean arguments.
@property
def uuid_arguments(self):
uuid = None
arguments = self.arguments
if 'uuid' in arguments:
uuid = arguments['uuid']
del arguments['uuid']
return uuid, arguments
_bool_convert = {
'0': False,
'n': False,
'f': False,
'no': False,
'off': False,
'false': False,
'1': True,
'y': True,
't': True,
'on': True,
'yes': True,
'true': True
}
_re_split_salt = re.compile(r'\$(?P<salt>.+)\$(?P<hash>.+)')
def write_error(self, status_code, **kwargs):
"""Default error handler."""
if isinstance(kwargs.get('exc_info', (None, None))[1], BaseException):
exc = kwargs['exc_info'][1]
status_code = exc.status
message = exc.message
else:
message = 'internal error'
self.build_error(message, status=status_code)
def is_api(self):
"""Return True if the path is from an API call."""
return self.request.path.startswith('/v%s' % API_VERSION)
def tobool(self, obj):
"""Convert some textual values to boolean."""
if isinstance(obj, (list, tuple)):
obj = obj[0]
if isinstance(obj, str):
obj = obj.lower()
return self._bool_convert.get(obj, obj)
def arguments_tobool(self):
"""Return a dictionary of arguments, converted to booleans where possible."""
return dict([(k, self.tobool(v)) for k, v in self.arguments.items()])
def initialize(self, **kwargs):
"""Add every passed (key, value) as attributes of the instance."""
for key, value in kwargs.items():
setattr(self, key, value)
@property
def current_user(self):
"""Retrieve current user name from the secure cookie."""
current_user = self.get_secure_cookie("user")
if isinstance(current_user, bytes):
current_user = current_user.decode('utf-8')
return current_user
@property
def current_user_info(self):
"""Information about the current user, including their permissions."""
current_user = self.current_user
if current_user in self._users_cache:
return self._users_cache[current_user]
permissions = set([k for (k, v) in self.permissions.items() if v is True])
user_info = {'permissions': permissions}
if current_user:
user_info['_id'] = current_user
user = self.db.getOne('users', {'_id': current_user})
if user:
user_info = user
permissions.update(set(user.get('permissions') or []))
user_info['permissions'] = permissions
user_info['isRegistered'] = True
self._users_cache[current_user] = user_info
return user_info
def add_access_info(self, doc):
"""Add created/updated by/at to a document (modified in place and returned).
:param doc: the doc to be updated
:type doc: dict
:returns: the updated document
:rtype: dict"""
user_id = self.current_user
now = datetime.datetime.utcnow()
if 'created_by' not in doc:
doc['created_by'] = user_id
if 'created_at' not in doc:
doc['created_at'] = now
doc['updated_by'] = user_id
doc['updated_at'] = now
return doc
def has_permission(self, permission):
"""Check permissions of the current user.
:param permission: the permission to check
:type permission: str
:returns: True if the user is allowed to perform the action or False
:rtype: bool
"""
user_info = self.current_user_info or {}
user_permissions = user_info.get('permissions') or []
global_permission = '%s|all' % permission.split('|')[0]
if 'admin|all' in user_permissions or global_permission in user_permissions or permission in user_permissions:
return True
collection_permission = self.permissions.get(permission)
if isinstance(collection_permission, bool):
return collection_permission
if isinstance(collection_permission, collections.Callable):
return collection_permission(permission)
return False
def user_authorized(self, username, password):
"""Check if a combination of username/password is valid.
:param username: username or email
:type username: str
:param password: password
:type password: str
:returns: tuple like (bool_user_is_authorized, dict_user_info)
:rtype: dict"""
query = [{'username': username}, {'email': username}]
res = self.db.query('users', query)
if not res:
return (False, {})
user = res[0]
db_password = user.get('password') or ''
if not db_password:
return (False, {})
match = self._re_split_salt.match(db_password)
if not match:
return (False, {})
salt = match.group('salt')
if utils.hash_password(password, salt=salt) == db_password:
return (True, user)
return (False, {})
def build_error(self, message='', status=400):
"""Build and write an error message.
:param message: textual message
:type message: str
:param status: HTTP status code
:type status: int
"""
self.set_status(status)
self.write({'error': True, 'message': message})
def logout(self):
"""Remove the secure cookie used fro authentication."""
if self.current_user in self._users_cache:
del self._users_cache[self.current_user]
self.clear_cookie("user")
class RootHandler(BaseHandler):
"""Handler for the / path."""
angular_app_path = os.path.join(os.path.dirname(__file__), "angular_app")
@gen.coroutine
def get(self, *args, **kwargs):
# serve the ./angular_app/index.html file
with open(self.angular_app_path + "/index.html", 'r') as fd:
self.write(fd.read())
class CollectionHandler(BaseHandler):
"""Base class for handlers that need to interact with the database backend.
Introduce basic CRUD operations."""
# set of documents we're managing (a collection in MongoDB or a table in a SQL database)
document = None
collection = None
# set of documents used to store incremental sequences
counters_collection = 'counters'
_id_chars = string.ascii_lowercase + string.digits
def get_next_seq(self, seq):
"""Increment and return the new value of a ever-incrementing counter.
:param seq: unique name of the sequence
:type seq: str
:returns: the next value of the sequence
:rtype: int
"""
if not self.db.query(self.counters_collection, {'seq_name': seq}):
self.db.add(self.counters_collection, {'seq_name': seq, 'seq': 0})
merged, doc = self.db.update(self.counters_collection,
{'seq_name': seq},
{'seq': 1},
operation='increment')
return doc.get('seq', 0)
def gen_id(self, seq='ids', random_alpha=32):
"""Generate a unique, non-guessable ID.
:param seq: the scope of the ever-incrementing sequence
:type seq: str
:param random_alpha: number of random lowercase alphanumeric chars
:type random_alpha: int
:returns: unique ID
:rtype: str"""
t = str(time.time()).replace('.', '_')
seq = str(self.get_next_seq(seq))
rand = ''.join([random.choice(self._id_chars) for x in range(random_alpha)])
return '-'.join((t, seq, rand))
def _filter_results(self, results, params):
"""Filter a list using keys and values from a dictionary.
:param results: the list to be filtered
:type results: list
:param params: a dictionary of items that must all be present in an original list item to be included in the return
:type params: dict
:returns: list of items that have all the keys with the same values as params
:rtype: list"""
if not params:
return results
params = monco.convert(params)
filtered = []
for result in results:
add = True
for key, value in params.items():
if key not in result or result[key] != value:
add = False
break
if add:
filtered.append(result)
return filtered
def _clean_dict(self, data):
"""Filter a dictionary (in place) to remove unwanted keywords in db queries.
:param data: dictionary to clean
:type data: dict"""
if isinstance(data, dict):
for key in list(data.keys()):
if (isinstance(key, str) and key.startswith('$')) or key in ('_id', 'created_by', 'created_at',
'updated_by', 'updated_at', 'isRegistered'):
del data[key]
return data
def _dict2env(self, data):
"""Convert a dictionary into a form suitable to be passed as environment variables.
:param data: dictionary to convert
:type data: dict"""
ret = {}
for key, value in data.items():
if isinstance(value, (list, tuple, dict)):
continue
try:
key = key.upper().encode('ascii', 'ignore')
key = re_env_key.sub('', key)
if not key:
continue
ret[key] = str(value).encode(ENCODING)
except:
continue
return ret
def apply_filter(self, data, filter_name):
"""Apply a filter to the data.
:param data: the data to filter
:returns: the modified (possibly also in place) data
"""
filter_method = getattr(self, 'filter_%s' % filter_name, None)
if filter_method is not None:
data = filter_method(data)
return data
@gen.coroutine
@authenticated
def get(self, id_=None, resource=None, resource_id=None, acl=True, **kwargs):
if resource:
# Handle access to sub-resources.
permission = '%s:%s%s|read' % (self.document, resource, '-all' if resource_id is None else '')
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
handler = getattr(self, 'handle_get_%s' % resource, None)
if handler and isinstance(handler, collections.Callable):
output = handler(id_, resource_id, **kwargs) or {}
output = self.apply_filter(output, 'get_%s' % resource)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
# read a single document
permission = '%s|read' % self.document
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
output = self.db.get(self.collection, id_)
output = self.apply_filter(output, 'get')
self.write(output)
else:
# return an object containing the list of all objects in the collection;
# e.g.: {'events': [{'_id': 'obj1-id, ...}, {'_id': 'obj2-id, ...}, ...]}
# Please, never return JSON lists that are not encapsulated into an object,
# to avoid XSS vulnerabilities.
permission = '%s|read' % self.collection
if acl and not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
output = {self.collection: self.db.query(self.collection, self.arguments)}
output = self.apply_filter(output, 'get_all')
self.write(output)
@gen.coroutine
@authenticated
def post(self, id_=None, resource=None, resource_id=None, **kwargs):
data = escape.json_decode(self.request.body or '{}')
self._clean_dict(data)
method = self.request.method.lower()
crud_method = 'create' if method == 'post' else 'update'
user_info = self.current_user_info
user_id = user_info.get('_id')
env = {}
if id_ is not None:
env['%s_ID' % self.document.upper()] = id_
self.add_access_info(data)
if resource:
permission = '%s:%s%s|%s' % (self.document, resource, '-all' if resource_id is None else '', crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
# Handle access to sub-resources.
handler = getattr(self, 'handle_%s_%s' % (method, resource), None)
if handler and isinstance(handler, collections.Callable):
data = self.apply_filter(data, 'input_%s_%s' % (method, resource))
output = handler(id_, resource_id, data, **kwargs)
output = self.apply_filter(output, 'get_%s' % resource)
env['RESOURCE'] = resource
if resource_id:
env['%s_ID' % resource] = resource_id
self.run_triggers('%s_%s_%s' % ('create' if resource_id is None else 'update', self.document, resource),
stdin_data=output, env=env)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
permission = '%s|%s' % (self.document, crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
data = self.apply_filter(data, 'input_%s' % method)
merged, newData = self.db.update(self.collection, id_, data)
newData = self.apply_filter(newData, method)
self.run_triggers('update_%s' % self.document, stdin_data=newData, env=env)
else:
permission = '%s|%s' % (self.collection, crud_method)
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
data = self.apply_filter(data, 'input_%s_all' % method)
newData = self.db.add(self.collection, data, _id=self.gen_id())
newData = self.apply_filter(newData, '%s_all' % method)
self.run_triggers('create_%s' % self.document, stdin_data=newData, env=env)
self.write(newData)
# PUT (update an existing document) is handled by the POST (create a new document) method;
# in subclasses you can always separate sub-resources handlers like handle_post_tickets and handle_put_tickets
put = post
@gen.coroutine
@authenticated
def delete(self, id_=None, resource=None, resource_id=None, **kwargs):
env = {}
if id_ is not None:
env['%s_ID' % self.document.upper()] = id_
if resource:
# Handle access to sub-resources.
permission = '%s:%s%s|delete' % (self.document, resource, '-all' if resource_id is None else '')
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
method = getattr(self, 'handle_delete_%s' % resource, None)
if method and isinstance(method, collections.Callable):
output = method(id_, resource_id, **kwargs)
env['RESOURCE'] = resource
if resource_id:
env['%s_ID' % resource] = resource_id
self.run_triggers('delete_%s_%s' % (self.document, resource), stdin_data=env, env=env)
self.write(output)
return
return self.build_error(status=404, message='unable to access resource: %s' % resource)
if id_ is not None:
permission = '%s|delete' % self.document
if not self.has_permission(permission):
return self.build_error(status=401, message='insufficient permissions: %s' % permission)
howMany = self.db.delete(self.collection, id_)
env['DELETED_ITEMS'] = howMany
self.run_triggers('delete_%s' % self.document, stdin_data=env, env=env)
else:
self.write({'success': False})
self.write({'success': True})
def on_timeout(self, cmd, pipe):
"""Kill a process that is taking too long to complete."""
logging.debug('cmd %s is taking too long: killing it' % ' '.join(cmd))
try:
pipe.proc.kill()
except:
pass
def on_exit(self, returncode, cmd, pipe):
"""Callback executed when a subprocess execution is over."""
self.ioloop.remove_timeout(self.timeout)
logging.debug('cmd: %s returncode: %d' % (' '.join(cmd), returncode))
@gen.coroutine
def run_subprocess(self, cmd, stdin_data=None, env=None):
"""Execute the given action.
:param cmd: the command to be run with its command line arguments
:type cmd: list
:param stdin_data: data to be sent over stdin
:type stdin_data: str
:param env: environment of the process
:type env: dict
"""
self.ioloop = tornado.ioloop.IOLoop.instance()
processed_env = self._dict2env(env)
p = process.Subprocess(cmd, close_fds=True, stdin=process.Subprocess.STREAM,
stdout=process.Subprocess.STREAM, stderr=process.Subprocess.STREAM, env=processed_env)
p.set_exit_callback(lambda returncode: self.on_exit(returncode, cmd, p))
self.timeout = self.ioloop.add_timeout(datetime.timedelta(seconds=PROCESS_TIMEOUT),
lambda: self.on_timeout(cmd, p))
yield gen.Task(p.stdin.write, stdin_data or '')
p.stdin.close()
out, err = yield [gen.Task(p.stdout.read_until_close),
gen.Task(p.stderr.read_until_close)]
logging.debug('cmd: %s' % ' '.join(cmd))
logging.debug('cmd stdout: %s' % out)
logging.debug('cmd strerr: %s' % err)
raise gen.Return((out, err))
@gen.coroutine
def run_triggers(self, action, stdin_data=None, env=None):
"""Asynchronously execute triggers for the given action.
:param action: action name; scripts in directory ./data/triggers/{action}.d will be run
:type action: str
:param stdin_data: a python dictionary that will be serialized in JSON and sent to the process over stdin
:type stdin_data: dict
:param env: environment of the process
:type stdin_data: dict
"""
if not hasattr(self, 'data_dir'):
return
logging.debug('running triggers for action "%s"' % action)
stdin_data = stdin_data or {}
try:
stdin_data = json.dumps(stdin_data)
except:
stdin_data = '{}'
for script in glob.glob(os.path.join(self.data_dir, 'triggers', '%s.d' % action, '*')):
if not (os.path.isfile(script) and os.access(script, os.X_OK)):
continue
out, err = yield gen.Task(self.run_subprocess, [script], stdin_data, env)
def build_ws_url(self, path, proto='ws', host=None):
"""Return a WebSocket url from a path."""
try:
args = '?uuid=%s' % self.get_argument('uuid')
except:
args = ''
return 'ws://127.0.0.1:%s/ws/%s%s' % (self.listen_port + 1, path, args)
@gen.coroutine
def send_ws_message(self, path, message):
"""Send a WebSocket message to all the connected clients.
:param path: partial path used to build the WebSocket url
:type path: str
:param message: message to send
:type message: str
"""
try:
ws = yield tornado.websocket.websocket_connect(self.build_ws_url(path))
ws.write_message(message)
ws.close()
except Exception as e:
self.logger.error('Error yielding WebSocket message: %s', e)
class EventsHandler(CollectionHandler):
"""Handle requests for Events."""
document = 'event'
collection = 'events'
def _mangle_event(self, event):
# Some in-place changes to an event
if 'tickets' in event:
event['tickets_sold'] = len([t for t in event['tickets'] if not t.get('cancelled')])
event['no_tickets_for_sale'] = False
try:
self._check_sales_datetime(event)
self._check_number_of_tickets(event)
except InputException:
event['no_tickets_for_sale'] = True
if not self.has_permission('tickets-all|read'):
event['tickets'] = []
return event
def filter_get(self, output):
return self._mangle_event(output)
def filter_get_all(self, output):
for event in output.get('events') or []:
self._mangle_event(event)
return output
def filter_input_post(self, data):
# Auto-generate the group_id, if missing.
if 'group_id' not in data:
data['group_id'] = self.gen_id()
return data
filter_input_post_all = filter_input_post
filter_input_put = filter_input_post
def filter_input_post_tickets(self, data):
# Avoid users to be able to auto-update their 'attendee' status.
if not self.has_permission('event|update'):
if 'attended' in data:
del data['attended']
self.add_access_info(data)
return data
filter_input_put_tickets = filter_input_post_tickets
def handle_get_group_persons(self, id_, resource_id=None):
persons = []
this_query = {'_id': id_}
this_event = self.db.query('events', this_query)[0]
group_id = this_event.get('group_id')
if group_id is None:
return {'persons': persons}
this_persons = [p for p in (this_event.get('tickets') or []) if not p.get('cancelled')]
this_emails = [_f for _f in [p.get('email') for p in this_persons] if _f]
all_query = {'group_id': group_id}
events = self.db.query('events', all_query)
for event in events:
if id_ is not None and str(event.get('_id')) == id_:
continue
persons += [p for p in (event.get('tickets') or []) if p.get('email') and p.get('email') not in this_emails]
return {'persons': persons}
def _get_ticket_data(self, ticket_id_or_query, tickets):
"""Filter a list of tickets returning the first item with a given _id
or which set of keys specified in a dictionary match their respective values."""
for ticket in tickets:
if isinstance(ticket_id_or_query, dict):
if all(ticket.get(k) == v for k, v in ticket_id_or_query.items()):
return ticket
else:
if str(ticket.get('_id')) == ticket_id_or_query:
return ticket
return {}
def handle_get_tickets(self, id_, resource_id=None):
# Return every ticket registered at this event, or the information
# about a specific ticket.
query = {'_id': id_}
event = self.db.query('events', query)[0]
if resource_id:
return {'ticket': self._get_ticket_data(resource_id, event.get('tickets') or [])}
tickets = self._filter_results(event.get('tickets') or [], self.arguments)
return {'tickets': tickets}
def _check_number_of_tickets(self, event):
if self.has_permission('admin|all'):
return
number_of_tickets = event.get('number_of_tickets')
if number_of_tickets is None:
return
try:
number_of_tickets = int(number_of_tickets)
except ValueError:
return
tickets = event.get('tickets') or []
tickets = [t for t in tickets if not t.get('cancelled')]
if len(tickets) >= event['number_of_tickets']:
raise InputException('no more tickets available')
def _check_sales_datetime(self, event):
if self.has_permission('admin|all'):
return
begin_date = event.get('ticket_sales_begin_date')
begin_time = event.get('ticket_sales_begin_time')
end_date = event.get('ticket_sales_end_date')
end_time = event.get('ticket_sales_end_time')
utc = dateutil.tz.tzutc()
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
if begin_date is None:
begin_date = datetime.datetime.now(tz=utc).replace(hour=0, minute=0, second=0, microsecond=0)
else:
begin_date = dateutil.parser.parse(begin_date)
# Compensate UTC and DST offset, that otherwise would be added 2 times (one for date, one for time)
begin_date = begin_date + datetime.timedelta(seconds=utc_offset)
if begin_time is None:
begin_time_h = 0
begin_time_m = 0
else:
begin_time = dateutil.parser.parse(begin_time)
begin_time_h = begin_time.hour
begin_time_m = begin_time.minute
now = datetime.datetime.now(tz=utc)
begin_datetime = begin_date + datetime.timedelta(hours=begin_time_h, minutes=begin_time_m)
if now < begin_datetime:
raise InputException('ticket sales not yet started')
if end_date is None:
end_date = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=utc)
else:
end_date = dateutil.parser.parse(end_date)
end_date = end_date + datetime.timedelta(seconds=utc_offset)
if end_time is None:
end_time = end_date
end_time_h = 23
end_time_m = 59
else:
end_time = dateutil.parser.parse(end_time, yearfirst=True)
end_time_h = end_time.hour
end_time_m = end_time.minute
end_datetime = end_date + datetime.timedelta(hours=end_time_h, minutes=end_time_m+1)
if now > end_datetime:
raise InputException('ticket sales has ended')
def handle_post_tickets(self, id_, resource_id, data):
event = self.db.query('events', {'_id': id_})[0]
self._check_sales_datetime(event)
self._check_number_of_tickets(event)
uuid, arguments = self.uuid_arguments
self._clean_dict(data)
data['seq'] = self.get_next_seq('event_%s_tickets' % id_)
data['seq_hex'] = '%06X' % data['seq']
data['_id'] = ticket_id = self.gen_id()
self.add_access_info(data)
ret = {'action': 'add', 'ticket': data, 'uuid': uuid}
merged, doc = self.db.update('events',
{'_id': id_},
{'tickets': data},
operation='appendUnique',
create=False)
if doc:
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
ticket = self._get_ticket_data(ticket_id, doc.get('tickets') or [])
env = dict(ticket)
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': doc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'new': ticket,
'event': doc,
'merged': merged
}
self.run_triggers('create_ticket_in_event', stdin_data=stdin_data, env=env)
return ret
def handle_put_tickets(self, id_, ticket_id, data):
# Update an existing entry for a ticket registered at this event.
self._clean_dict(data)
uuid, arguments = self.uuid_arguments
query = dict([('tickets.%s' % k, v) for k, v in arguments.items()])
query['_id'] = id_
if ticket_id is not None:
query['tickets._id'] = ticket_id
ticket_query = {'_id': ticket_id}
else:
ticket_query = self.arguments
old_ticket_data = {}
current_event = self.db.query(self.collection, query)
if current_event:
current_event = current_event[0]
else:
current_event = {}
self._check_sales_datetime(current_event)
tickets = current_event.get('tickets') or []
old_ticket_data = self._get_ticket_data(ticket_query, tickets)
# We have changed the "cancelled" status of a ticket to False; check if we still have a ticket available
if 'number_of_tickets' in current_event and old_ticket_data.get('cancelled') and not data.get('cancelled'):
self._check_number_of_tickets(current_event)
self.add_access_info(data)
merged, doc = self.db.update('events', query,
data, updateList='tickets', create=False)
new_ticket_data = self._get_ticket_data(ticket_query,
doc.get('tickets') or [])
env = dict(new_ticket_data)
# always takes the ticket_id from the new ticket
ticket_id = str(new_ticket_data.get('_id'))
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': doc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'old': old_ticket_data,
'new': new_ticket_data,
'event': doc,
'merged': merged
}
self.run_triggers('update_ticket_in_event', stdin_data=stdin_data, env=env)
if old_ticket_data and old_ticket_data.get('attended') != new_ticket_data.get('attended'):
if new_ticket_data.get('attended'):
self.run_triggers('attends', stdin_data=stdin_data, env=env)
ret = {'action': 'update', '_id': ticket_id, 'ticket': new_ticket_data,
'uuid': uuid, 'username': self.current_user_info.get('username', '')}
if old_ticket_data != new_ticket_data:
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
return ret
def handle_delete_tickets(self, id_, ticket_id):
# Remove a specific ticket from the list of tickets registered at this event.
uuid, arguments = self.uuid_arguments
doc = self.db.query('events',
{'_id': id_, 'tickets._id': ticket_id})
ret = {'action': 'delete', '_id': ticket_id, 'uuid': uuid}
if doc:
ticket = self._get_ticket_data(ticket_id, doc[0].get('tickets') or [])
merged, rdoc = self.db.update('events',
{'_id': id_},
{'tickets': {'_id': ticket_id}},
operation='delete',
create=False)
self.send_ws_message('event/%s/tickets/updates' % id_, json.dumps(ret))
env = dict(ticket)
env.update({'PERSON_ID': ticket_id, 'TICKED_ID': ticket_id, 'EVENT_ID': id_,
'EVENT_TITLE': rdoc.get('title', ''), 'WEB_USER': self.current_user_info.get('username', ''),
'WEB_REMOTE_IP': self.request.remote_ip})
stdin_data = {'old': ticket,
'event': rdoc,
'merged': merged
}
self.run_triggers('delete_ticket_in_event', stdin_data=stdin_data, env=env)
return ret
class UsersHandler(CollectionHandler):
"""Handle requests for Users."""
document = 'user'
collection = 'users'
def filter_get(self, data):
if 'password' in data:
del data['password']
if '_id' in data:
# Also add a 'tickets' list with all the tickets created by this user
tickets = []
events = self.db.query('events', {'tickets.created_by': data['_id']})
for event in events:
event_title = event.get('title') or ''
event_id = str(event.get('_id'))
evt_tickets = self._filter_results(event.get('tickets') or [], {'created_by': data['_id']})
for evt_ticket in evt_tickets:
evt_ticket['event_title'] = event_title
evt_ticket['event_id'] = event_id
tickets.extend(evt_tickets)
data['tickets'] = tickets
return data
def filter_get_all(self, data):
if 'users' not in data:
return data
for user in data['users']:
if 'password' in user:
del user['password']
return data
@gen.coroutine
@authenticated
def get(self, id_=None, resource=None, resource_id=None, acl=True, **kwargs):
if id_ is not None:
if (self.has_permission('user|read') or self.current_user == id_):
acl = False
super(UsersHandler, self).get(id_, resource, resource_id, acl=acl, **kwargs)
def filter_input_post_all(self, data):
username = (data.get('username') or '').strip()
password = (data.get('password') or '').strip()
email = (data.get('email') or '').strip()
if not (username and password):
raise InputException('missing username or password')
res = self.db.query('users', {'username': username})
if res:
raise InputException('username already exists')
return {'username': username, 'password': utils.hash_password(password),
'email': email, '_id': self.gen_id()}
def filter_input_put(self, data):
old_pwd = data.get('old_password')
new_pwd = data.get('new_password')
if old_pwd is not None:
del data['old_password']
if new_pwd is not None:
del data['new_password']
authorized, user = self.user_authorized(data['username'], old_pwd)
if not (self.has_permission('user|update') or (authorized and
self.current_user_info.get('username') == data['username'])):
raise InputException('not authorized to change password')
data['password'] = utils.hash_password(new_pwd)
if '_id' in data:
del data['_id']
if 'username' in data:
del data['username']
if not self.has_permission('admin|all'):
if 'permissions' in data:
del data['permissions']
else:
if 'isAdmin' in data:
if not 'permissions' in data:
data['permissions'] = []
if 'admin|all' in data['permissions'] and not data['isAdmin']:
data['permissions'].remove('admin|all')
elif 'admin|all' not in data['permissions'] and data['isAdmin']:
data['permissions'].append('admin|all')
del data['isAdmin']
return data
@gen.coroutine
@authenticated
def put(self, id_=None, resource=None, resource_id=None, **kwargs):
if id_ is None:
return self.build_error(status=404, message='unable to access the resource')
if not (self.has_permission('user|update') or self.current_user == id_):
return self.build_error(status=401, message='insufficient permissions: user|update or current user')
super(UsersHandler, self).put(id_, resource, resource_id, **kwargs)
class EbCSVImportPersonsHandler(BaseHandler):
"""Importer for CSV files exported from Eventbrite."""
csvRemap = {
'Nome evento': 'event_title',
'ID evento': 'event_id',
'N. codice a barre': 'ebqrcode',
'Cognome acquirente': 'surname',
'Nome acquirente': 'name',
'E-mail acquirente': 'email',
'Cognome': 'surname',
'Nome': 'name',
'E-mail': 'email',
'Indirizzo e-mail': 'email',
'Tipologia biglietto': 'ticket_kind',
'Data partecipazione': 'attending_datetime',
'Data check-in': 'checkin_datetime',
'Ordine n.': 'order_nr',
'ID ordine': 'order_nr',
'Titolo professionale': 'job title',
'Azienda': 'company',
'Prefisso': 'name_title',
'Prefisso (Sig., Sig.ra, ecc.)': 'name title',
'Order #': 'order_nr',
'Prefix': 'name title',
'First Name': 'name',
'Last Name': 'surname',
'Suffix': 'name suffix',
'Email': 'email',
'Attendee #': 'attendee_nr',
'Barcode #': 'ebqrcode',
'Company': 'company'
}
@gen.coroutine
@authenticated
def post(self, **kwargs):
# import a CSV list of persons
event_handler = EventsHandler(self.application, self.request)
event_handler.db = self.db
event_handler.logger = self.logger
event_id = None
try:
event_id = self.get_body_argument('targetEvent')
except:
pass
if event_id is None:
return self.build_error('invalid event')
reply = dict(total=0, valid=0, merged=0, new_in_event=0)
event_details = event_handler.db.query('events', {'_id': event_id})
if not event_details:
return self.build_error('invalid event')
all_emails = set()
#[x.get('email') for x in (event_details[0].get('tickets') or []) if x.get('email')])
for ticket in (event_details[0].get('tickets') or []):
all_emails.add('%s_%s_%s' % (ticket.get('name'), ticket.get('surname'), ticket.get('email')))
for fieldname, contents in self.request.files.items():
for content in contents:
filename = content['filename']
parseStats, persons = utils.csvParse(content['body'], remap=self.csvRemap)
reply['total'] += parseStats['total']
for person in persons:
if not person:
continue
reply['valid'] += 1
person['attended'] = False
person['from_file'] = filename
self.add_access_info(person)
duplicate_check = '%s_%s_%s' % (person.get('name'), person.get('surname'), person.get('email'))
if duplicate_check in all_emails:
continue
all_emails.add(duplicate_check)
event_handler.handle_post_tickets(event_id, None, person)
reply['new_in_event'] += 1
self.write(reply)
class SettingsHandler(BaseHandler):
"""Handle requests for Settings."""
@gen.coroutine
@authenticated
def get(self, **kwargs):
query = self.arguments_tobool()
settings = self.db.query('settings', query)
self.write({'settings': settings})
class InfoHandler(BaseHandler):
"""Handle requests for information about the logged in user."""
@gen.coroutine
def get(self, **kwargs):
info = {}
user_info = self.current_user_info
if user_info:
info['user'] = user_info
info['authentication_required'] = self.authentication
self.write({'info': info})
class WebSocketEventUpdatesHandler(tornado.websocket.WebSocketHandler):
"""Manage WebSockets."""
def _clean_url(self, url):
url = re_slashes.sub('/', url)
ridx = url.rfind('?')
if ridx != -1:
url = url[:ridx]
return url
def open(self, event_id, *args, **kwargs):
try:
self.uuid = self.get_argument('uuid')
except:
self.uuid = None
url = self._clean_url(self.request.uri)
logging.debug('WebSocketEventUpdatesHandler.on_open event_id:%s url:%s' % (event_id, url))
_ws_clients.setdefault(url, {})
if self.uuid and self.uuid not in _ws_clients[url]:
_ws_clients[url][self.uuid] = self
logging.debug('WebSocketEventUpdatesHandler.on_open %s clients connected' % len(_ws_clients[url]))
def on_message(self, message):
url = self._clean_url(self.request.uri)
logging.debug('WebSocketEventUpdatesHandler.on_message url:%s' % url)
count = 0
_to_delete = set()
for uuid, client in _ws_clients.get(url, {}).items():
try:
client.write_message(message)
except:
_to_delete.add(uuid)
continue
count += 1
for uuid in _to_delete:
try:
del _ws_clients[url][uuid]
except KeyError:
pass
logging.debug('WebSocketEventUpdatesHandler.on_message sent message to %d clients' % count)
class LoginHandler(RootHandler):
"""Handle user authentication requests."""
@gen.coroutine
def get(self, **kwargs):
# show the login page
if self.is_api():
self.set_status(401)
self.write({'error': True,
'message': 'authentication required'})
@gen.coroutine
def post(self, *args, **kwargs):
# authenticate a user
try:
password = self.get_body_argument('password')
username = self.get_body_argument('username')
except tornado.web.MissingArgumentError:
data = escape.json_decode(self.request.body or '{}')
username = data.get('username')
password = data.get('password')
if not (username and password):
self.set_status(401)
self.write({'error': True, 'message': 'missing username or password'})
return
authorized, user = self.user_authorized(username, password)
if authorized and 'username' in user and '_id' in user:
id_ = str(user['_id'])
username = user['username']
logging.info('successful login for user %s (id: %s)' % (username, id_))
self.set_secure_cookie("user", id_)
self.write({'error': False, 'message': 'successful login'})
return
logging.info('login failed for user %s' % username)
self.set_status(401)
self.write({'error': True, 'message': 'wrong username and password'})
class LogoutHandler(BaseHandler):
"""Handle user logout requests."""
@gen.coroutine
def get(self, **kwargs):
# log the user out
logging.info('logout')
self.logout()
self.write({'error': False, 'message': 'logged out'})
def run():
"""Run the Tornado web application."""
# command line arguments; can also be written in a configuration file,
# specified with the --config argument.
define("port", default=5242, help="run on the given port", type=int)
define("address", default='', help="bind the server at the given address", type=str)
define("data_dir", default=os.path.join(os.path.dirname(__file__), "data"),
help="specify the directory used to store the data")
define("ssl_cert", default=os.path.join(os.path.dirname(__file__), 'ssl', 'eventman_cert.pem'),
help="specify the SSL certificate to use for secure connections")
define("ssl_key", default=os.path.join(os.path.dirname(__file__), 'ssl', 'eventman_key.pem'),
help="specify the SSL private key to use for secure connections")
define("mongo_url", default=None,
help="URL to MongoDB server", type=str)
define("db_name", default='eventman',
help="Name of the MongoDB database to use", type=str)
define("authentication", default=False, help="if set to true, authentication is required")
define("debug", default=False, help="run in debug mode")
define("config", help="read configuration file",
callback=lambda path: tornado.options.parse_config_file(path, final=False))
tornado.options.parse_command_line()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if options.debug:
logger.setLevel(logging.DEBUG)
ssl_options = {}
if os.path.isfile(options.ssl_key) and os.path.isfile(options.ssl_cert):
ssl_options = dict(certfile=options.ssl_cert, keyfile=options.ssl_key)
# database backend connector
db_connector = monco.Monco(url=options.mongo_url, dbName=options.db_name)
init_params = dict(db=db_connector, data_dir=options.data_dir, listen_port=options.port,
authentication=options.authentication, logger=logger, ssl_options=ssl_options)
# If not present, we store a user 'admin' with password 'eventman' into the database.
if not db_connector.query('users', {'username': 'admin'}):
db_connector.add('users',
{'username': 'admin', 'password': utils.hash_password('eventman'),
'permissions': ['admin|all']})
# If present, use the cookie_secret stored into the database.
cookie_secret = db_connector.query('settings', {'setting': 'server_cookie_secret'})
if cookie_secret:
cookie_secret = cookie_secret[0]['cookie_secret']
else:
# the salt guarantees its uniqueness
cookie_secret = utils.hash_password('__COOKIE_SECRET__')
db_connector.add('settings',
{'setting': 'server_cookie_secret', 'cookie_secret': cookie_secret})
_ws_handler = (r"/ws/+event/+(?P<event_id>[\w\d_-]+)/+tickets/+updates/?", WebSocketEventUpdatesHandler)
_events_path = r"/events/?(?P<id_>[\w\d_-]+)?/?(?P<resource>[\w\d_-]+)?/?(?P<resource_id>[\w\d_-]+)?"
_users_path = r"/users/?(?P<id_>[\w\d_-]+)?/?(?P<resource>[\w\d_-]+)?/?(?P<resource_id>[\w\d_-]+)?"
application = tornado.web.Application([
(_events_path, EventsHandler, init_params),
(r'/v%s%s' % (API_VERSION, _events_path), EventsHandler, init_params),
(_users_path, UsersHandler, init_params),
(r'/v%s%s' % (API_VERSION, _users_path), UsersHandler, init_params),
(r"/(?:index.html)?", RootHandler, init_params),
(r"/ebcsvpersons", EbCSVImportPersonsHandler, init_params),
(r"/settings", SettingsHandler, init_params),
(r"/info", InfoHandler, init_params),
_ws_handler,
(r'/login', LoginHandler, init_params),
(r'/v%s/login' % API_VERSION, LoginHandler, init_params),
(r'/logout', LogoutHandler),
(r'/v%s/logout' % API_VERSION, LogoutHandler),
(r'/(.*)', tornado.web.StaticFileHandler, {"path": "angular_app"})
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=cookie_secret,
login_url='/login',
debug=options.debug)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options or None)
logger.info('Start serving on %s://%s:%d', 'https' if ssl_options else 'http',
options.address if options.address else '127.0.0.1',
options.port)
http_server.listen(options.port, options.address)
# Also listen on options.port+1 for our local ws connection.
ws_application = tornado.web.Application([_ws_handler], debug=options.debug)
ws_http_server = tornado.httpserver.HTTPServer(ws_application)
ws_http_server.listen(options.port+1, address='127.0.0.1')
logger.debug('Starting WebSocket on ws://127.0.0.1:%d', options.port+1)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
try:
run()
except KeyboardInterrupt:
print('Stop server')
|
from django_summernote.utils import SUMMERNOTE_THEME_FILES
SUMMERNOTE_THEME_FILES['bs3'] = {
'base_css': (
'bootstrap.min.css',
),
'base_js': (
'/static/assets/admin/js/vendor/jquery/jquery.min.js',
'summernote/bootstrap.min.js'
),
'default_css': (
'summernote/summernote.min.css',
'summernote/django_summernote.css',
),
'default_js': (
'summernote/jquery.ui.widget.js',
'summernote/jquery.iframe-transport.js',
'summernote/jquery.fileupload.js',
'summernote/summernote.min.js',
'summernote/ResizeSensor.js',
),
}
Fix summernote
from django_summernote.utils import SUMMERNOTE_THEME_FILES
SUMMERNOTE_THEME_FILES['bs3'] = {
'base_css': (
'/static/assets/summernote/bootstrap.min.css',
),
'base_js': (
'/static/assets/admin/js/vendor/jquery/jquery.min.js',
'/static/assets/summernote/bootstrap.min.js'
),
'default_css': (
'summernote/summernote.min.css',
'summernote/django_summernote.css',
),
'default_js': (
'summernote/jquery.ui.widget.js',
'summernote/jquery.iframe-transport.js',
'summernote/jquery.fileupload.js',
'summernote/summernote.min.js',
'summernote/ResizeSensor.js',
),
}
|
# -*- coding: utf-8 -*-
timestamp = 1395616822 #1395609292 # 1395616822#1395609952 #1395608362.0 #get the time when user kick in(talk to Brittany)
def school_tweet_rate(timestamp): #time in number of minutes, this function calculates tweet rate of each school every minute #time=part of data, taimstamp=specific time
#timestamp=entered time by a user/no timestamp
dataset = []
with open('/data/graphData.txt', 'r') as f:
for aline in f:
data = aline.split(" ")
dataset.append(data)
# print len(dataset)
for i in range(0,len(dataset)):
if float(dataset[i][2]) == timestamp:
schoolname = dataset[i][0]
smallesttime = float(dataset[i][2])
for j in range(0,len(dataset)):
if dataset[j][0] == schoolname:
if float(dataset[j][2]) < smallesttime:
smallesttime = float(dataset[j][2])
schools = dataset[i][0].split('$')
school_1 = schools[0]
school_2 = schools[1]
if timestamp-smallesttime == 0:
print "The Game between" + school_1 + " and " + school_2 + "is starting now."
return 0
else:
average_rate = float(dataset[i][1])/(abs(timestamp-smallesttime)/2) #use minute as time format 1st loop down
s = "The Game between " + school_1 + " and " + school_2 + ' has ' + 'average tweet rate '+str("%.2f" % average_rate)
print s
return average_rate
school_tweet_rate = school_tweet_rate(timestamp)
def ave_tweet_rate_total(timestamp): #updating every minute, requires the given time in
dataset_1=[]
with open('/Users/Sunhwa/Desktop/testing/totalPerInterval.txt', 'r') as f:
for aline in f:
data=aline.split(" ")
dataset_1.append(data)
timestamp=1395608272.0
smallesttime=dataset_1[0][1]
for i in range(0,len(dataset_1)):
if smallesttime> dataset_1[i][1]:
smallesttime=dataset_1[i][1]
print smallesttime
smallesttime = float(smallesttime)
tweets=0
for j in range(0,len(dataset_1)):
if (float(dataset_1[j][1]) <= timestamp) and (float(dataset_1[j][1])>=smallesttime):
tweets = tweets + float(dataset_1[j][0])
print "The number of tweets is " + str(tweets)
if timestamp-smallesttime == 0:
print "No game has started. "
return 0
else:
average_rate = float(tweets)/(abs(timestamp-smallesttime)/2) #use minute as time format 1st loop down
s = "The average tweet rate for all games is " + str("%.2f" % average_rate)
print s
return average_rate
ave_tweet_rate_total = ave_tweet_rate_total(timestamp)
def compare_tweet_rate (school_tweet_rate, ave_tweet_rate_total):# works for both whole day v. a given time
# for a particular school
if school_tweet_rate > ave_tweet_rate_total:
print "Tweet rate for this school is above average tweet rate for all school. So this game is worth watching."
else:
print "The game must be boring since the tweet rate is below average."
print compare_tweet_rate(school_tweet_rate, ave_tweet_rate_total)
def school_tweet_volume(school):
dataset = []
with open('/Users/Sunhwa/Desktop/testing/graphData.txt', 'r') as f:
for aline in f:
data = aline.split(" ")
dataset.append(data)
outfile=open('/Users/Sunhwa/Desktop/school_tweet_volume.txt', 'w')
for i in range (0, len(dataset)):
if school == dataset[i][0]:
time = dataset[i][1]
tweet = dataset[i][2]
string_to_write = school + " "+ str(time) + " " + str(tweet)# modify,, data structure which enables to show muti graphs
outfile.write(string_to_write)
outfile.close()
print school_tweet_volume("Iowa$Unc")
import numpy as np
import pylab as pl
infile=open('/Users/Sunhwa/Desktop/graphData.txt', 'r')
outfile=open('/Users/Sunhwa/Desktop/dataset_2.txt', 'w')
aline=infile.readline()
#user = raw_input("Which school game are you interested in?: ")
print "Would you like to see game changing moments by checking out tweet volume graph?"
print "There are five games to choose from: "
print "Enter 1 for WichitaSt$Kentucky; 2 for IowaSt$UNC; 3 for Tennessee$Mercer;"
print "4 for UCLA$StephenFAustin; 5 for Creighton$Baylor"
user = int(raw_input("Which school game are you interested in?: "))
if user==1:
user="WichitaSt$Kentucky"
elif user==2:
user="IowaSt$UNC"
elif user==3:
user="Tennessee$Mercer"
print user
elif user==4:
user="UCLA$StephenFAustin"
print user
elif user==5:
user="Creighton$Baylor"
else:
print "Whoops! That is an invalid value. Please reenter the valid one."
#user codes end here
data = ""
while aline:
items=aline.split()
if items[0] == user:
data = data + "; " + items[2]+ " " +items[1]
#outfile.write(dataline + '\n')
aline=infile.readline()
#print(dataline)
# add codes here for multiple graph
#print data
data = np.array(np.mat(data[1:]))
#print data
pl.plot(data[:,0],data[:,1])
pl.title(user)
#pl.legend("user")
pl.xlabel("Time")
pl.ylabel("Number of Tweet")
pl.show()
open('text59.txt', 'w').close()
Changed save paths
# -*- coding: utf-8 -*-
timestamp = 1395616822 #1395609292 # 1395616822#1395609952 #1395608362.0 #get the time when user kick in(talk to Brittany)
def school_tweet_rate(timestamp): #time in number of minutes, this function calculates tweet rate of each school every minute #time=part of data, taimstamp=specific time
#timestamp=entered time by a user/no timestamp
dataset = []
with open('/data/graphData.txt', 'r') as f:
for aline in f:
data = aline.split(" ")
dataset.append(data)
# print len(dataset)
for i in range(0,len(dataset)):
if float(dataset[i][2]) == timestamp:
schoolname = dataset[i][0]
smallesttime = float(dataset[i][2])
for j in range(0,len(dataset)):
if dataset[j][0] == schoolname:
if float(dataset[j][2]) < smallesttime:
smallesttime = float(dataset[j][2])
schools = dataset[i][0].split('$')
school_1 = schools[0]
school_2 = schools[1]
if timestamp-smallesttime == 0:
print "The Game between" + school_1 + " and " + school_2 + "is starting now."
return 0
else:
average_rate = float(dataset[i][1])/(abs(timestamp-smallesttime)/2) #use minute as time format 1st loop down
s = "The Game between " + school_1 + " and " + school_2 + ' has ' + 'average tweet rate '+str("%.2f" % average_rate)
print s
return average_rate
school_tweet_rate = school_tweet_rate(timestamp)
def ave_tweet_rate_total(timestamp): #updating every minute, requires the given time in
dataset_1=[]
with open('/data/totalPerInterval.txt', 'r') as f:
for aline in f:
data=aline.split(" ")
dataset_1.append(data)
timestamp=1395608272.0
smallesttime=dataset_1[0][1]
for i in range(0,len(dataset_1)):
if smallesttime> dataset_1[i][1]:
smallesttime=dataset_1[i][1]
print smallesttime
smallesttime = float(smallesttime)
tweets=0
for j in range(0,len(dataset_1)):
if (float(dataset_1[j][1]) <= timestamp) and (float(dataset_1[j][1])>=smallesttime):
tweets = tweets + float(dataset_1[j][0])
print "The number of tweets is " + str(tweets)
if timestamp-smallesttime == 0:
print "No game has started. "
return 0
else:
average_rate = float(tweets)/(abs(timestamp-smallesttime)/2) #use minute as time format 1st loop down
s = "The average tweet rate for all games is " + str("%.2f" % average_rate)
print s
return average_rate
ave_tweet_rate_total = ave_tweet_rate_total(timestamp)
def compare_tweet_rate (school_tweet_rate, ave_tweet_rate_total):# works for both whole day v. a given time
# for a particular school
if school_tweet_rate > ave_tweet_rate_total:
print "Tweet rate for this school is above average tweet rate for all school. So this game is worth watching."
else:
print "The game must be boring since the tweet rate is below average."
print compare_tweet_rate(school_tweet_rate, ave_tweet_rate_total)
import numpy as np
import pylab as pl
save_path = raw_input("Enter a save path to be able run the graphing: for instance '/Users/Desktop/yourname'")
infile=open('/data/graphData.txt', 'r')
outfile=open(save_path + '/dataset_2.txt', 'w')
#outfile=open('/Users/Sunhwa/Desktop/dataset_2.txt', 'w')
aline=infile.readline()
#user = raw_input("Which school game are you interested in?: ")
print "Would you like to see game changing moments by checking out tweet volume graph?"
print "There are five games to choose from: "
print "Enter 1 for WichitaSt$Kentucky; 2 for IowaSt$UNC; 3 for Tennessee$Mercer;"
print "4 for UCLA$StephenFAustin; 5 for Creighton$Baylor"
user = int(raw_input("Which school game are you interested in?: "))
if user==1:
user="WichitaSt$Kentucky"
elif user==2:
user="IowaSt$UNC"
elif user==3:
user="Tennessee$Mercer"
print user
elif user==4:
user="UCLA$StephenFAustin"
print user
elif user==5:
user="Creighton$Baylor"
else:
print "Whoops! That is an invalid value. Please reenter the valid one."
#user codes end here
data = ""
while aline:
items=aline.split()
if items[0] == user:
data = data + "; " + items[2]+ " " +items[1]
#outfile.write(dataline + '\n')
aline=infile.readline()
#print(dataline)
# add codes here for multiple graph
#print data
data = np.array(np.mat(data[1:]))
#print data
pl.plot(data[:,0],data[:,1])
pl.title(user)
#pl.legend("user")
pl.xlabel("Time")
pl.ylabel("Number of Tweet")
pl.show()
open('text59.txt', 'w').close()
|
from collections import defaultdict
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
_NONE_TYPE = type(None)
_EMPTY_TYPE = type('', (object,), {})
_MIXED_TYPE = type('<mixed-type>', (object,), {})
class AttrDict(dict):
"""A dict with keys accessible as attributes."""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Structure(object):
"""An object that reflects the structure of any python data structure.
Any object may be passed to the constructor and a complete traverse
of the structure occurs, making all the appropriate links so that
a simple hierarchy may be retrieved. The str function will turn these
objects into a simple hierarchy with notations for lists and whether or
not an attribute will be guaranteed for each particular branch. Also
supported is adding different Structure objects, which will show only
the structure common to both. Wherever the structure differs, it will
be noted as a '<mixed-type>'
"""
def __init__(self, value, key=None, parent=None):
self.key = key
self.parent = parent
self.type_ = type(value)
self.key_guaranteed = True
self.val_guaranteed = True
self.children = []
if self.is_list:
# Make a structure out of each item in the list
list_items = [Structure(item, parent=self) for item in value]
if list_items:
# Add all structures together to get the common structure
merged_structure = list_items[0]
for item in list_items[1:]:
merged_structure += item
# Set the only list child to the common structure
merged_structure.parent = self
self.children.append(merged_structure)
else:
self.children.append(Structure(_EMPTY_TYPE(), parent=self))
elif self.is_tuple:
# Make a structure out of each item in the tuple
tuple_items = [Structure(item, parent=self) for item in value]
if tuple_items:
self.children = tuple_items
elif self.is_dict:
for key, val in value.items():
self.children.append(Structure(val, key, self))
self.children.sort(key=lambda child: child.key)
def __add__(self, other):
assert self.key == other.key
key_guaranteed = self.key_guaranteed and other.key_guaranteed
val_guaranteed = self.val_guaranteed and other.val_guaranteed
if self.type_ is other.type_:
new = Structure(None, key=self.key)
new.type_ = self.type_
new.key_guaranteed = key_guaranteed
new.val_guaranteed = val_guaranteed
if self.is_list and other.is_list:
listchild = self.children[0] + other.children[0]
listchild.parent = new
new.children.append(listchild)
return new
elif self.is_tuple and other.is_tuple:
zipped = zip_longest(self.children, other.children)
for schild, ochild in zipped:
if schild is None:
newchild = ochild.copy(new)
newchild.val_guaranteed = False
new.children.append(newchild)
elif ochild is None:
newchild = schild.copy(new)
newchild.val_guaranteed = False
new.children.append(newchild)
else:
newchild = schild + ochild
newchild.parent = new
new.children.append(newchild)
return new
elif self.is_dict and other.is_dict:
keysdict = defaultdict(lambda: [None, None])
for child in self.children:
keysdict[child.key][0] = child
for child in other.children:
keysdict[child.key][1] = child
for c1, c2 in keysdict.values():
if c1 is None:
newchild = c2.copy(new)
newchild.key_guaranteed = False
elif c2 is None:
newchild = c1.copy(new)
newchild.key_guaranteed = False
else:
newchild = c1 + c2
newchild.parent = new
new.children.append(newchild)
new.children.sort(key=lambda child: child.key)
return new
else:
return new
else:
if self.type_ is _EMPTY_TYPE:
new = other.copy()
elif other.type_ is _EMPTY_TYPE:
new = self.copy()
elif self.type_ is _NONE_TYPE:
new = other.copy()
new.key_guaranteed = key_guaranteed
new.val_guaranteed = False
elif other.type_ is _NONE_TYPE:
new = self.copy()
new.key_guaranteed = key_guaranteed
new.val_guaranteed = False
else:
new = Structure(None, key=self.key)
new.key_guaranteed = key_guaranteed
new.val_guaranteed = val_guaranteed
new.type_ = _MIXED_TYPE
return new
def __contains__(self, item):
for child in self.children:
if child.key == item.key:
return True
return False
def __str__(self):
if self.parent:
string = '{}{}{} - {}\n'.format(
' ' * (self.generation - 1),
'' if self.key_guaranteed else '*',
self.key,
self.type_string)
else:
string = '=== {} ===\n'.format(self.type_string)
if self.children:
if self.is_list:
sub = self.children[0]
while sub.is_list:
sub = sub.children[0]
if sub.is_dict:
for child in sub.children:
string += str(child) + '\n'
elif self.is_dict:
for child in self.children:
string += str(child) + '\n'
return string[:-1]
def copy(self, parent=None):
new = Structure(None, parent=parent)
new.key = self.key
new.type_ = self.type_
new.val_guaranteed = self.val_guaranteed
new.key_guaranteed = self.key_guaranteed
for child in self.children:
new.children.append(child.copy(new))
return new
@property
def generation(self):
if not self.parent:
return 0
elif self.parent.is_dict:
return 1 + self.parent.generation
else:
return self.parent.generation
@property
def type_string(self):
if self.is_tuple:
subtypes = [item.type_string for item in self.children]
return '{}({})'.format(
'' if self.val_guaranteed else '*',
', '.join(subtypes))
elif self.is_list:
return '{}[{}]'.format(
'' if self.val_guaranteed else '*',
self.children[0].type_string)
else:
return '{}{}'.format(
'' if self.val_guaranteed else '*',
self.type_.__name__)
@property
def is_list(self):
return issubclass(self.type_, list)
@property
def is_tuple(self):
return issubclass(self.type_, tuple)
@property
def is_dict(self):
return issubclass(self.type_, dict)
Addition of some docstrings
from collections import defaultdict
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
_NONE_TYPE = type(None)
_EMPTY_TYPE = type('', (object,), {})
_MIXED_TYPE = type('<mixed-type>', (object,), {})
class AttrDict(dict):
"""A dict with keys accessible as attributes."""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Structure(object):
"""An object that reflects the structure of any python data structure.
Any object may be passed to the constructor and a complete traverse of the
structure occurs, making all the appropriate links so that the hierarchy
may be examined. The str function will turn these objects into a simple
representation of the structure with notations for lists and whether or
not an attribute will be guaranteed for each particular branch.
Also supported is adding different Structure objects, which will show only
the structure common to both. Wherever the structure differs, it will
be noted as a '<mixed-type>'
"""
def __init__(self, value, key=None, parent=None):
"""Initialize a structure
Takes the type of the value, and recursively creates sub-structures
as children if the type is a list, tuple, or dict.
"""
self.key = key
self.parent = parent
self.type_ = type(value)
self.key_guaranteed = True
self.val_guaranteed = True
self.children = []
if self.is_list:
# Make a structure out of each item in the list
list_items = [Structure(item, parent=self) for item in value]
if list_items:
# Add all structures together to get the common structure
merged_structure = list_items[0]
for item in list_items[1:]:
merged_structure += item
# Set the only list child to the common structure
merged_structure.parent = self
self.children.append(merged_structure)
else:
self.children.append(Structure(_EMPTY_TYPE(), parent=self))
elif self.is_tuple:
# Make a structure out of each item in the tuple
tuple_items = [Structure(item, parent=self) for item in value]
if tuple_items:
self.children = tuple_items
elif self.is_dict:
for key, val in value.items():
self.children.append(Structure(val, key, self))
self.children.sort(key=lambda child: child.key)
def __add__(self, other):
"""Add structures to get a new structure that is common to both.
The returned structure is a newly created structure that reflects the
similarities and differences between the two structures added.
"""
key_guaranteed = self.key_guaranteed and other.key_guaranteed
val_guaranteed = self.val_guaranteed and other.val_guaranteed
if self.type_ is other.type_:
new = Structure(None, key=self.key)
new.type_ = self.type_
new.key_guaranteed = key_guaranteed
new.val_guaranteed = val_guaranteed
if self.is_list and other.is_list:
listchild = self.children[0] + other.children[0]
listchild.parent = new
new.children.append(listchild)
return new
elif self.is_tuple and other.is_tuple:
zipped = zip_longest(self.children, other.children)
for schild, ochild in zipped:
if schild is None:
newchild = ochild.copy(new)
newchild.val_guaranteed = False
new.children.append(newchild)
elif ochild is None:
newchild = schild.copy(new)
newchild.val_guaranteed = False
new.children.append(newchild)
else:
newchild = schild + ochild
newchild.parent = new
new.children.append(newchild)
return new
elif self.is_dict and other.is_dict:
keysdict = defaultdict(lambda: [None, None])
for child in self.children:
keysdict[child.key][0] = child
for child in other.children:
keysdict[child.key][1] = child
for c1, c2 in keysdict.values():
if c1 is None:
newchild = c2.copy(new)
newchild.key_guaranteed = False
elif c2 is None:
newchild = c1.copy(new)
newchild.key_guaranteed = False
else:
newchild = c1 + c2
newchild.parent = new
new.children.append(newchild)
new.children.sort(key=lambda child: child.key)
return new
else:
return new
else:
if self.type_ is _EMPTY_TYPE:
new = other.copy()
elif other.type_ is _EMPTY_TYPE:
new = self.copy()
elif self.type_ is _NONE_TYPE:
new = other.copy()
new.key_guaranteed = key_guaranteed
new.val_guaranteed = False
elif other.type_ is _NONE_TYPE:
new = self.copy()
new.key_guaranteed = key_guaranteed
new.val_guaranteed = False
else:
new = Structure(None, key=self.key)
new.key_guaranteed = key_guaranteed
new.val_guaranteed = val_guaranteed
new.type_ = _MIXED_TYPE
return new
def __str__(self):
"""A structured representation of the underlying structure"""
if self.parent:
string = '{}{}{} - {}\n'.format(
' ' * (self.generation - 1),
'' if self.key_guaranteed else '*',
self.key,
self.type_string)
else:
string = '=== {} ===\n'.format(self.type_string)
if self.children:
if self.is_list:
sub = self.children[0]
while sub.is_list:
sub = sub.children[0]
if sub.is_dict:
for child in sub.children:
string += str(child) + '\n'
elif self.is_dict:
for child in self.children:
string += str(child) + '\n'
return string[:-1]
def copy(self, parent=None):
"""Copies an existing structure and all of it's children"""
new = Structure(None, parent=parent)
new.key = self.key
new.type_ = self.type_
new.val_guaranteed = self.val_guaranteed
new.key_guaranteed = self.key_guaranteed
for child in self.children:
new.children.append(child.copy(new))
return new
@property
def generation(self):
"""Returns the number of ancestors that are dictionaries"""
if not self.parent:
return 0
elif self.parent.is_dict:
return 1 + self.parent.generation
else:
return self.parent.generation
@property
def type_string(self):
"""Returns a string representing the type of the structure"""
if self.is_tuple:
subtypes = [item.type_string for item in self.children]
return '{}({})'.format(
'' if self.val_guaranteed else '*',
', '.join(subtypes))
elif self.is_list:
return '{}[{}]'.format(
'' if self.val_guaranteed else '*',
self.children[0].type_string)
else:
return '{}{}'.format(
'' if self.val_guaranteed else '*',
self.type_.__name__)
@property
def is_list(self):
return issubclass(self.type_, list)
@property
def is_tuple(self):
return issubclass(self.type_, tuple)
@property
def is_dict(self):
return issubclass(self.type_, dict)
|
import os, sys
from time import sleep
import pyDMCC
import bot.lib.lib as lib
class Rail_Mover:
def __init__(self):
self.bot_config = lib.get_config()
rail_motor_conf = self.bot_config["dagu_arm"]["rail_cape"]["rail_motor"]
board_num = rail_motor_conf["board_num"]
motor_num = rail_motor_conf["motor_num"]
self.rail_DMCC = pyDMCC.DMCC(1)
self.rail_motor = self.rail_DMCC.motors[motor_num]
def Orientor(self,Position):
self.rail_motor.power = 0
if Position == 1:
Displacement = 200 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 2:
Displacement = 2250 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 3:
Displacement = 4600 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 4:
Displacement = 7100 - self.rail_motor.position
return self.DisplacementMover(Displacement)
def DisplacementMover(self,Displacement):
if (self.rail_motor.position + Displacement) > 7200:
print "Cannot move beyond range"
return 0
elif (self.rail_motor.position + Displacement) < 0:
print "Cannot move beyond range"
return 0
StartPOS = self.rail_motor.position
if Displacement == 0:
return 1
elif Displacement < 0: #Negative movement
power = 40
self.rail_motor.power = power
print self.rail_motor.position
i = 0
while self.rail_motor.position > (StartPOS + Displacement):
i = i +1
print self.rail_motor.position
self.rail_motor.power = 0
elif Displacement > 0: #Positive movement
power = -40
self.rail_motor.power = power
print self.rail_motor.position
i = 0
while self.rail_motor.position < (StartPOS + Displacement):
i = i +1
print self.rail_motor.position
self.rail_motor.power = 0
return 1
def DisplacementConverter(self,RawDisplacement):
ConversionRatio = 1020
Displacement = RawDisplacement*ConversionRatio
return self.DisplacementMover(Displacement)
def ResetToHome(self):
if self.rail_motor.position < 0:
self.rail_motor.reset()
return 0
power = 40
self.rail_motor.power = power
while self.rail_motor.position > 20:
print self.rail_motor.position
print "velocity: ", self.rail_motor.velocity
self.rail_motor.power = 0
self.rail_motor.reset()
return 1
def RunIntoWall(self):
print "I am trying to run into the wall."
power = 40
self.rail_motor.power = power
print "velocity: ", self.rail_motor.velocity
sleep(.5)
print "velocity: ", self.rail_motor.velocity
while self.rail_motor.velocity < 0:
print self.rail_motor.position
print "velocity: ", self.rail_motor.velocity
self.rail_motor.power = 0
self.rail_motor.reset()
return 1
@lib.api_call
def MoveToPosition(self,Position):
power = 60
current_position = self.rail_motor.position
if Position < 0 or Position > 7150:
print "Invalid Position"
return 0
if Position == current_position:
return 1
if Position < current_position:
self.rail_motor.power = power
while self.rail_motor.position > (Position - 20):
print self.rail_motor.position
if Position > current_position:
self.rail_motor.power = -power
while self.rail_motor.position < (Position + 20):
print self.rail_motor.position
self.rail_motor.power = 0
fixing bugs
import os, sys
from time import sleep
import pyDMCC
import bot.lib.lib as lib
class Rail_Mover:
def __init__(self):
self.bot_config = lib.get_config()
rail_motor_conf = self.bot_config["dagu_arm"]["rail_cape"]["rail_motor"]
board_num = rail_motor_conf["board_num"]
motor_num = rail_motor_conf["motor_num"]
self.rail_DMCC = pyDMCC.DMCC(1)
self.rail_motor = self.rail_DMCC.motors[motor_num]
def Orientor(self,Position):
self.rail_motor.power = 0
if Position == 1:
Displacement = 200 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 2:
Displacement = 2250 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 3:
Displacement = 4600 - self.rail_motor.position
return self.DisplacementMover(Displacement)
elif Position == 4:
Displacement = 7000 - self.rail_motor.position
return self.DisplacementMover(Displacement)
def DisplacementMover(self,Displacement):
if (self.rail_motor.position + Displacement) > 7200:
print "Cannot move beyond range"
return 0
elif (self.rail_motor.position + Displacement) < 0:
print "Cannot move beyond range"
return 0
StartPOS = self.rail_motor.position
if Displacement == 0:
return 1
elif Displacement < 0: #Negative movement
power = 40
self.rail_motor.power = power
print self.rail_motor.position
i = 0
while self.rail_motor.position > (StartPOS + Displacement):
i = i +1
print self.rail_motor.position
self.rail_motor.power = 0
elif Displacement > 0: #Positive movement
power = -40
self.rail_motor.power = power
print self.rail_motor.position
i = 0
while self.rail_motor.position < (StartPOS + Displacement):
i = i +1
print self.rail_motor.position
self.rail_motor.power = 0
return 1
def DisplacementConverter(self,RawDisplacement):
ConversionRatio = 1020
Displacement = RawDisplacement*ConversionRatio
return self.DisplacementMover(Displacement)
def ResetToHome(self):
if self.rail_motor.position < 0:
self.rail_motor.reset()
return 0
power = 40
self.rail_motor.power = power
while self.rail_motor.position > 20:
print self.rail_motor.position
print "velocity: ", self.rail_motor.velocity
self.rail_motor.power = 0
self.rail_motor.reset()
return 1
def RunIntoWall(self):
print "I am trying to run into the wall."
power = 40
self.rail_motor.power = power
print "velocity: ", self.rail_motor.velocity
sleep(.5)
print "velocity: ", self.rail_motor.velocity
while self.rail_motor.velocity < 0:
print self.rail_motor.position
print "velocity: ", self.rail_motor.velocity
self.rail_motor.power = 0
self.rail_motor.reset()
return 1
@lib.api_call
def MoveToPosition(self,Position):
power = 60
current_position = self.rail_motor.position
if Position < 0 or Position > 7150:
print "Invalid Position"
return 0
if Position == current_position:
return 1
if Position < current_position:
self.rail_motor.power = power
while self.rail_motor.position > (Position - 20):
print self.rail_motor.position
if Position > current_position:
self.rail_motor.power = -power
while self.rail_motor.position < (Position + 20):
print self.rail_motor.position
self.rail_motor.power = 0
|
import os
import sys
import hashlib
import json
import requests
import socket
import boto
from app import requests_cache_bucket
MAX_PAYLOAD_SIZE_BYTES = 1000*1000 # 1mb
CACHE_FOLDER_NAME = "tng-requests-cache"
class CachedResponse:
def __init__(self, **kwargs):
self.headers = {}
self.status_code = 200
for (k, v) in kwargs.iteritems():
setattr(self, k, v)
@property
def content(self):
return self.file_contents
@property
def text(self):
return self.file_contents
# allows it to be treated the same way as a streaming response object
def close(self):
pass
def http_get(url, headers={}, timeout=20, stream=False, cache_enabled=True, allow_redirects=True, doi=None):
if not requests_cache_bucket:
cache_enabled = False
if cache_enabled:
cached_response = get_page_from_cache(url)
if cached_response:
print u"CACHE HIT on {url}".format(url=url)
return cached_response
try:
try:
print u"LIVE GET on {url}".format(url=url)
except UnicodeDecodeError:
print u"LIVE GET on an url that throws UnicodeDecodeError"
r = requests.get(url,
headers=headers,
timeout=timeout,
stream=stream,
allow_redirects=allow_redirects,
verify=False)
if r and not r.encoding:
r.encoding = "utf-8"
if r and cache_enabled:
store_page_in_cache(url, r, doi)
except (requests.exceptions.Timeout, socket.timeout) as e:
print u"timed out on GET on {url}".format(url=url)
raise
except requests.exceptions.RequestException as e:
print u"RequestException on GET on {url}".format(url=url)
raise
return r
def get_page_from_cache(url):
cache_key = url
cache_data = get_cache_entry(cache_key)
if cache_data:
url = cache_data["headers"].get("url", None)
requested_url = cache_data["headers"].get("requested-url", None)
return CachedResponse(**{"content": cache_data["content"],
"requested-url": requested_url,
"url": url,
"headers": cache_data["headers"]})
return None
def store_page_in_cache(url, response, doi):
metadata = {}
for (k, v) in response.headers.iteritems():
if k.lower() in ["content-type", "content-disposition"]:
metadata[k] = v
metadata["url"] = response.url
metadata["doi"] = doi
metadata["requested-url"] = url
cache_key = url
set_cache_entry(cache_key, response.content, metadata)
def _build_hash_key(key):
json_key = json.dumps(key)
hash_key = hashlib.md5(json_key.encode("utf-8")).hexdigest()
return hash_key
def get_cache_entry(url):
""" Get an entry from the cache, returns None if not found """
hash_key = _build_hash_key(url)
k = boto.s3.key.Key(requests_cache_bucket)
k.key = hash_key
headers = {}
try:
file_contents = k.get_contents_as_string()
remote_key = requests_cache_bucket.get_key(hash_key)
headers = remote_key.metadata
headers["content-type"] = k.content_type
headers["content-disposition"] = k.content_disposition
except boto.exception.S3ResponseError:
# print u"CACHE MISS: couldn't find {}, aka {}".format(hash_key, url)
# not in cache
return None
# print "***", url, hash_key, headers
return {"content": file_contents, "headers": headers}
def set_cache_entry(url, content, metadata):
if sys.getsizeof(content) > MAX_PAYLOAD_SIZE_BYTES:
print u"Not caching because payload is too large"
return
hash_key = _build_hash_key(url)
# print "***", url, hash_key
k = boto.s3.key.Key(requests_cache_bucket)
k.key = hash_key
k.set_contents_from_string(content)
k.set_remote_metadata(metadata, {}, True)
# remote_key = requests_cache_bucket.get_key(hash_key)
# print "metadata:", remote_key.metadata
return
only send metatadata if not null
import os
import sys
import hashlib
import json
import requests
import socket
import boto
from app import requests_cache_bucket
MAX_PAYLOAD_SIZE_BYTES = 1000*1000 # 1mb
CACHE_FOLDER_NAME = "tng-requests-cache"
class CachedResponse:
def __init__(self, **kwargs):
self.headers = {}
self.status_code = 200
for (k, v) in kwargs.iteritems():
setattr(self, k, v)
@property
def content(self):
return self.file_contents
@property
def text(self):
return self.file_contents
# allows it to be treated the same way as a streaming response object
def close(self):
pass
def http_get(url, headers={}, timeout=20, stream=False, cache_enabled=True, allow_redirects=True, doi=None):
if not requests_cache_bucket:
cache_enabled = False
if cache_enabled:
cached_response = get_page_from_cache(url)
if cached_response:
print u"CACHE HIT on {url}".format(url=url)
return cached_response
try:
try:
print u"LIVE GET on {url}".format(url=url)
except UnicodeDecodeError:
print u"LIVE GET on an url that throws UnicodeDecodeError"
r = requests.get(url,
headers=headers,
timeout=timeout,
stream=stream,
allow_redirects=allow_redirects,
verify=False)
if r and not r.encoding:
r.encoding = "utf-8"
if r and cache_enabled:
store_page_in_cache(url, r, doi)
except (requests.exceptions.Timeout, socket.timeout) as e:
print u"timed out on GET on {url}".format(url=url)
raise
except requests.exceptions.RequestException as e:
print u"RequestException on GET on {url}".format(url=url)
raise
return r
def get_page_from_cache(url):
cache_key = url
cache_data = get_cache_entry(cache_key)
if cache_data:
url = cache_data["headers"].get("url", None)
requested_url = cache_data["headers"].get("requested-url", None)
return CachedResponse(**{"content": cache_data["content"],
"requested-url": requested_url,
"url": url,
"headers": cache_data["headers"]})
return None
def store_page_in_cache(url, response, doi):
metadata = {}
for (k, v) in response.headers.iteritems():
if k.lower() in ["content-type", "content-disposition"]:
metadata[k] = v
metadata["url"] = response.url
metadata["doi"] = doi
metadata["requested-url"] = url
cache_key = url
set_cache_entry(cache_key, response.content, metadata)
def _build_hash_key(key):
json_key = json.dumps(key)
hash_key = hashlib.md5(json_key.encode("utf-8")).hexdigest()
return hash_key
def get_cache_entry(url):
""" Get an entry from the cache, returns None if not found """
hash_key = _build_hash_key(url)
k = boto.s3.key.Key(requests_cache_bucket)
k.key = hash_key
headers = {}
try:
file_contents = k.get_contents_as_string()
remote_key = requests_cache_bucket.get_key(hash_key)
headers = remote_key.metadata
headers["content-type"] = k.content_type
headers["content-disposition"] = k.content_disposition
except boto.exception.S3ResponseError:
# print u"CACHE MISS: couldn't find {}, aka {}".format(hash_key, url)
# not in cache
return None
# print "***", url, hash_key, headers
return {"content": file_contents, "headers": headers}
def set_cache_entry(url, content, metadata):
if sys.getsizeof(content) > MAX_PAYLOAD_SIZE_BYTES:
print u"Not caching because payload is too large"
return
hash_key = _build_hash_key(url)
# print "***", url, hash_key
k = boto.s3.key.Key(requests_cache_bucket)
k.key = hash_key
k.set_contents_from_string(content)
if metadata:
k.set_remote_metadata(metadata, {}, True)
# remote_key = requests_cache_bucket.get_key(hash_key)
# print "metadata:", remote_key.metadata
return
|
# -*- coding: utf-8 -*-
"""Provides Attribute class."""
__docformat__ = "restructuredtext en"
__copyright__ = """
Copyright (C) 2008-2013 Hendrikx-ITC B.V.
Distributed under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option) any later
version. The full license is in the file COPYING, distributed as part of
this software.
"""
from psycopg2.extensions import adapt, register_adapter
from minerva.storage.attribute import schema
class Attribute(object):
"""Describes one attribute of an atttributestore."""
def __init__(self, name, datatype="smallint", description=None):
self.id = None
self.attributestore = None
self.name = name
self.description = description
self.datatype = datatype
@classmethod
def get(cls, cursor, id):
"""Load and return attribute by its Id."""
query = (
"SELECT name, datatype, description "
"FROM attribute.attribute "
"WHERE id = %s")
args = id,
cursor.execute(query, args)
name, datatype, description = cursor.fetchone()
attribute = Attribute(name, datatype, description)
attribute.id = id
return attribute
def create(self, cursor):
"""Create the attribute in the database."""
if self.attributestore is None:
raise Exception("attributestore not set")
query = (
"INSERT INTO {0.name}.attribute "
"(attributestore_id, name, datatype, description) "
"VALUES (%s, %s, %s, %s)").format(schema)
args = (self.attributestore.id, self.name, self.datatype,
self.description)
cursor.execute(query, args)
def __repr__(self):
return "<Attribute({0} {1})>".format(self.name, self.datatype)
def __str__(self):
return "{0.name}({0.datatype})".format(self)
def adapt_attribute(attribute):
"""Return psycopg2 compatible representation of `attribute`."""
if not attribute.attributestore is None:
attributestore_id = attribute.attributestore.id
else:
attributestore_id = None
attrs = (attribute.id, attributestore_id, attribute.description,
attribute.name, attribute.datatype)
return adapt(attrs)
register_adapter(Attribute, adapt_attribute)
Adapt Python code for attribute.
# -*- coding: utf-8 -*-
"""Provides Attribute class."""
__docformat__ = "restructuredtext en"
__copyright__ = """
Copyright (C) 2008-2013 Hendrikx-ITC B.V.
Distributed under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option) any later
version. The full license is in the file COPYING, distributed as part of
this software.
"""
from psycopg2.extensions import adapt, register_adapter
class Attribute(object):
"""Describes one attribute of an atttributestore."""
def __init__(self, name, datatype="smallint", description=None):
self.id = None
self.attributestore = None
self.name = name
self.description = description
self.datatype = datatype
@classmethod
def get(cls, cursor, id):
"""Load and return attribute by its Id."""
query = (
"SELECT name, datatype, description "
"FROM attribute_directory.attribute "
"WHERE id = %s")
args = id,
cursor.execute(query, args)
name, datatype, description = cursor.fetchone()
attribute = Attribute(name, datatype, description)
attribute.id = id
return attribute
def create(self, cursor):
"""Create the attribute in the database."""
if self.attributestore is None:
raise Exception("attributestore not set")
query = (
"INSERT INTO attribute_directory.attribute "
"(attributestore_id, name, datatype, description) "
"VALUES (%s, %s, %s, %s)")
args = (self.attributestore.id, self.name, self.datatype,
self.description)
cursor.execute(query, args)
def __repr__(self):
return "<Attribute({0} {1})>".format(self.name, self.datatype)
def __str__(self):
return "{0.name}({0.datatype})".format(self)
def adapt_attribute(attribute):
"""Return psycopg2 compatible representation of `attribute`."""
if not attribute.attributestore is None:
attributestore_id = attribute.attributestore.id
else:
attributestore_id = None
attrs = (attribute.id, attributestore_id, attribute.description,
attribute.name, attribute.datatype)
return adapt(attrs)
register_adapter(Attribute, adapt_attribute)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utility functions to work with Accept-* headers as defined by HTTP 1.1.
This module provides some utility functions useful for writing
websites which want to deal with some of the HTTP protocol specifics;
especially the correct interpretation of the various Accept-* style
headers, content negotiation, and so forth.
The main functions this modules defines are:
* parse_accept_header()
* parse_media_type()
* acceptable_content_type()
* acceptable_charset()
* acceptable_language()
It also defines a helper class, language_tag, which can be used to
help interpret and compare languages.
See also:
* RFC 2616, "Hypertext Transfer Protocol -- HTTP/1.1", June 1999.
<http://www.ietf.org/rfc/rfc2616.txt>
* RFC 3066, "Tags for the Identification of Languages", January 2001.
<http://www.ietf.org/rfc/rfc3066.txt>
"""
__author__ = """Deron Meranda <http://deron.meranda.us/>"""
__date__ = "2005-12-19"
__version__ = "1.0"
__credits__ = """Copyright (c) 2005 Deron E. Meranda <http://deron.meranda.us/>
Licensed under GNU LGPL 2.1 or later. See <http://www.fsf.org/>.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
def _split_at_qfactor( s ):
"""Splits a string at the quality factor (;q=) parameter.
Returns the left and right substrings as a two-member tuple.
"""
# It may be faster, but incorrect, to use s.split(';q=',1), since
# HTTP allows any amount of linear white space (LWS) to appear
# between the parts, so it could also be "; q = ".
# We do this parsing 'manually' for speed rather than using a
# regex, which would be r';[ \t\r\n]*q[ \t\r\n]*=[ \t\r\n]*'
LWS = ' \t\n\r'
pos = 0
while 0 <= pos < len(s):
pos = s.find(';', pos)
if pos < 0:
break # no more parameters
startpos = pos
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == 'q':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == '=':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
return ( s[:startpos], s[pos:] )
return (s, '')
def parse_accept_header( header_value ):
"""Parses the value of an HTTP Accept-*: style header with quality factors.
The value of the header as a string should be passed in; without
the header name itself.
This will parse the value of any of the HTTP headers "Accept",
"Accept-Charset", "Accept-Encoding", or "Accept-Language". These
headers are similarly formatted, in that they are a list of items
with associated quality factors. The quality factor, or qvalue,
is a number in the range [0.0..1.0] which indicates the relative
preference of each item.
This function returns a list of those items, sorted by preference
(from most-prefered to least-prefered). Each item in the returned
list is actually a tuple consisting of:
( item_name, qvalue, accept_parms )
The item name depends upon which header is being parsed, but for
example may be a MIME content or media type, a language tag, or so
on. No processing of the item strings is performed by this
function, so MIME types for example may still have paramters
attached to them, e.g., "text/plain;charset=iso-2022-kr".
The qvalue is a float in the inclusive range 0.0 to 1.0. Values
outside this range will be capped to the closest extreme. Note
that a qvalue of 0 indicates that the item is explicitly NOT
acceptable to the user agent, and should be handled differently by
the caller.
The accept_parms will usually be an empty string, but it may
optionally carry a value with the Accept header (the HTTP spec
allows these extra parameters in the syntax, but does not
currently define any possible values). If present it will be
preserved as a single string, internally formatted as a
semicolon-separated list of param=value qualifiers.
Note that empty items will be removed. However, duplicate values
are not detected or handled in any way.
"""
accept_list = []
# itemnum is used to insure a stable-sort later. Could use enumerate(),
# but we want to preserve Python 2.2 compatibility.
itemnum = 0
for item, qvalue in [ _split_at_qfactor(v.strip()) for v in header_value.split(',') ]:
if not item[0]:
continue # an empty list item
if not qvalue:
qvalue, accept_ext = 1, ''
else:
if ';' in qvalue:
qvalue, accept_ext = qvalue.split(';', 1)
else:
accept_ext = ''
try:
qvalue = float(qvalue.strip())
except ValueError:
qvalue = 0.1 # Mangled q-value, assume low quality
if qvalue < 0: # Insure in range 0 to 1.
qvalue = 0
elif qvalue > 1:
qvalue = 1
accept_list.append( (qvalue, itemnum, item, accept_ext) )
itemnum = itemnum + 1
accept_list.sort()
accept_list.reverse()
# Reformat the tuples in the list, so the name comes first. We also
# discard the itemnum ordinal, since it was only there to enforce
# a stable sort.
accept_list = [(x[2], x[0], x[3]) for x in accept_list]
return accept_list
class content_type(object):
def __init__(self, content_type_string):
major, minor, pdict = self._parse_media_type( content_type_string )
self.major = major
self.minor = minor
self.parmdict = pdict
def __str__(self):
s = '%s/%s' % (self.major, self.minor)
if self.parmdict:
extra = '; '.join([ '%s=%s' % (a[0],self._quote(a[1])) \
for a in self.parmdict.items()])
s += '; ' + extra
return s
def __repr__(self):
s = '%s(%s)' % (self.__class__.__name__, repr(self.__str__()))
return s
separators = '()<>@,;:\\"/[]?={} \t' # RFC 2616 sect 2.2
lws = '\r\n \t'
def _quote(self, val):
"""Produces a token, or a quoted string if necessary.
"""
need_quotes = False
s = ''
for c in val:
if c in self.separators or ord(c)<32 or ord(c)>127:
need_quotes = True
s += "\\%s" % c
else:
s += c
if need_quotes:
s = '"%s"' % s
return s
def _parse_token(self, s, start=0, allow_quoted=True):
"""Parses a token or a quoted string. Returns tuple (token,chars_consumed).
"""
if start >= len(s):
return ('',0)
has_quote = (s[start] == '"')
if has_quote and not allow_quoted:
return ('',0)
s2 = ''
if has_quote:
start += 1
pos = start
while pos < len(s):
c = s[pos]
if c == '\\' and has_quote:
pos += 1
s2 += s[pos]
elif c == '"' and has_quote:
pos += 1
break
elif c in self.separators or ord(c)<32 or ord(c)>127:
break
else:
s2 += c
pos += 1
if has_quote and (pos >= len(s) or s[pos] != '"'):
raise ValueError('Quoted string is missing closing quote mark')
return s2, pos
def _parse_media_type(self, media_type):
"""Parses a media type (MIME type) designator into it's parts.
Given a media type string, returns a tuple of it's parts.
(major,minor,parmlist).
Examples:
image/png -> ('image','png',[])
text/plain; charset="utf-16be" -> ('text','plain',[('charset,'utf-16be')])
"""
ctmaj, ctmin = media_type.split('/', 1)
parmlist = []
if ';' in ctmin:
ctmin, ctparms = ctmin.split(';', 1)
i = 0
while i < len(ctparms):
while i < len(ctparms) and ctparms[i] in self.lws:
i += 1
pname, i = self._parse_token( ctparms, start=i, allow_quoted=False )
while i < len(ctparms) and ctparms[i] in self.lws:
i += 1
#print 'pname=[%s]' % pname, 'at', i
if i < len(ctparms) and ctparms[i] == '=':
i += 1
while i < len(ctparms) and ctparms[i] in self.lws:
i += 1
#print 'found = at', i
pval, i = self._parse_token( ctparms, start=i, allow_quoted=True )
else:
pval = ''
#print 'pval=[%s]' % pval, 'at', i
while i < len(ctparms) and ctparms[i] in self.lws:
i += 1
if i < len(ctparms):
if ctparms[i] == ';':
i += 1
else:
raise ValueError('Content type parmeters not separated with semicolons at "%s"' % ctparms[i:])
parmlist.append( (pname, pval) )
if i < len(ctparms):
raise ValueError('Syntax error in content type parmeters')
return (ctmaj, ctmin, parmlist)
def is_universal_wildcard(self):
return self.major == '*' and self.minor == '*'
def acceptable_content_type( accept_header, content_types, ignore_wildcard=True ):
"""Determines if the given content type is acceptable to the user agent.
The accept_header should be the value present in the HTTP
"Accept:" header. In mod_python this is typically obtained from
the req.http_headers_in table; in WSGI it is environ["Accept"];
other web frameworks may provide other methods of obtaining it.
Optionally the accept_header parameter can instead be the list
returned from the parse_accept_header() function in this module.
The content_types argument should either be a single MIME media type
string, or a sequence of them. It represents the set of content
types that the caller (server) is willing to send.
This function determines the content type which is the most prefered
and is acceptable to both the user agent and the caller. If one
is negotiated, it will return a tuple of:
(content_type, accept_parms)
In most cases accept_parms will be an empty string (see
description of parse_accept_header() for more details). If no
content type could be negotiated, then this function will return
None (and the caller should typically cause an HTTP 406 Not
Acceptable as a response).
Note that the wildcarded content type "*/*" will be ignored, since
it is often incorrectly sent by web browsers that don't really
mean it. To override this, call with ignore_wildcard=False.
Partial wildcards such as "image/*" will always be processed,
but be at a lower priority than a complete matching type.
See also: RFC 2616 section 14.1, and
<http://www.iana.org/assignments/media-types/>
"""
if isinstance(accept_header,str) or isinstance(accept_header,unicode):
accept_list = parse_accept_header(accept_header)
else:
accept_list = accept_header
if isinstance(content_types,str) or isinstance(content_types,unicode):
content_types = [content_types]
server_ctlist = [parse_media_type(ct) for ct in content_types]
best = None
for ct, qvalue, aargs in accept_list:
try:
# The content type is like "major/minor;parms...", parse it apart.
ctmaj, ctmin, ctparms = parse_media_type(ct)
except:
continue # content type is malformed, skip it
if ignore_wildcard and ctmaj=='*' and ctmin=='*':
continue # */* being ignored
for server_ct in server_ctlist:
test_ctmaj, test_ctmin, test_ctparms = server_ct
# The best match is determined first by the quality factor,
# and then by the most specific match.
print "comparing", server_ct
matchlen = 0 # how specifically this one matches (0 is a non-match)
if ctmaj == '*' and ctmin == '*':
matchlen = 1 # */* is a 1
elif ctmaj == test_ctmaj:
if ctmin == '*': # something/* is a 2
matchlen = 2
elif ctmin == test_ctmin: # something/something is a 3
matchlen = 3
if ctparms: # must make sure all the parms match too
for pname, pval in ctparms.items():
if test_ctparms.get(pname) == pval:
matchlen = matchlen + 1
else:
matchlen = 0
break
else:
matchlen = 0
if matchlen:
if not best \
or matchlen > best[3] \
or (matchlen == best[3] and qvalue > best[1]):
# This match is better
best = (ct, qvalue, aargs, matchlen)
if not best or best[1] <= 0:
return None
return (best[0], best[2])
def _canonical_charset( charset ):
return charset.upper()
def acceptable_charset( accept_charset_header, charsets, ignore_wildcard=True, default='ISO-8859-1' ):
"""
Determines if the given charset is acceptable to the user agent.
The accept_charset_header should be the value present in the HTTP
"Accept-Charset:" header. In mod_python this is typically
obtained from the req.http_headers table; in WSGI it is
environ["Accept-Charset"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_charset_header parameter can instead be the
list returned from the parse_accept_header() function in this
module.
The charsets argument should either be a charset identifier string,
or a sequence of them.
This function returns the charset identifier string which is the
most prefered and is acceptable to both the user agent and the
caller. It will return the default value if no charset is negotiable.
Note that the wildcarded charset "*" will be ignored. To override
this, call with ignore_wildcard=False.
See also: RFC 2616 section 14.2, and
<http://www.iana.org/assignments/character-sets>
"""
if default:
default = _canonical_charset(default)
if type(accept_charset_header) is type('') or type(accept_charset_header) is type(u''):
accept_list = parse_accept_header(accept_charset_header)
else:
accept_list = accept_charset_header
if type(charsets) is type('') or type(charsets) is type(u''):
charsets = [_canonical_charset(charsets)]
else:
charsets = [_canonical_charset(c) for c in charsets]
# Note per RFC that 'ISO-8859-1' is special, and is implictly in the
# accept list with q=1; unless it is already in the list, or '*' is in the list.
best = None
for c, qvalue, junk in accept_list:
if c == '*':
default = None
if ignore_wildcard:
continue
if not best or qvalue > best[1]:
best = (c, qvalue)
else:
c = _canonical_charset(c)
for test_c in charsets:
if c == default:
default = None
if c == test_c and (not best or best[0]=='*' or qvalue > best[1]):
best = (c, qvalue)
if default and default in [test_c.upper() for test_c in charsets]:
best = (default, 1)
if best[0] == '*':
best = (charsets[0], best[1])
return best
class language_tag:
"""This class represents an RFC 3066 language tag.
Initialize objects of this class with a single string representing
the language tag, such as "en-US".
Case is insensitive. Wildcarded subtags are ignored or stripped as
they have no significance, so that "en-*" is the same as "en".
However the universal wildcard "*" language tag is kept as-is.
Note that although relational operators such as < are defined,
they only form a partial order based upon specialization.
Thus for example,
"en" <= "en-US"
but,
not "en" <= "de", and
not "de" <= "en".
"""
def __init__(self, tagname):
"""Initialize objects of this class with a single string representing
the language tag, such as "en-US". Case is insensitive.
"""
self.parts = tagname.lower().split('-')
while len(self.parts) > 1 and self.parts[-1] == '*':
del self.parts[-1]
def __len__(self):
"""Number of subtags in this tag."""
if len(self.parts) == 1 and self.parts[0] == '*':
return 0
return len(self.parts)
def __str__(self):
"""The standard string form of this language tag."""
a = []
if len(self.parts) >= 1:
a.append(self.parts[0])
if len(self.parts) >= 2:
if len(self.parts[1]) == 2:
a.append( self.parts[1].upper() )
else:
a.append( self.parts[1] )
a.extend( self.parts[2:] )
return '-'.join(a)
def __unicode__(self):
"""The unicode string form of this language tag."""
return unicode(self.__str__())
def __repr__(self):
"""The python representation of this language tag."""
s = '%s("%s")' % (self.__class__.__name__, self.__str__())
return s
def superior(self):
"""Returns another instance of language_tag which is the superior.
Thus en-US gives en, and en gives *.
"""
if len(self) <= 1:
return self.__class__('*')
return self.__class__( '-'.join(self.parts[:-1]) )
def all_superiors(self, include_wildcard=False):
"""Returns a list of this language and all it's superiors.
If include_wildcard is False, then "*" will not be among the
output list, unless this language is itself "*".
"""
langlist = [ self ]
l = self
while not l.is_universal_wildcard():
l = l.superior()
if l.is_universal_wildcard() and not include_wildcard:
continue
langlist.append(l)
return langlist
def is_universal_wildcard(self):
"""Returns True if this language tag represents all possible
languages, by using the reserved tag of "*".
"""
return len(self.parts) == 1 and self.parts[0] == '*'
def dialect_of(self, other, ignore_wildcard=True):
"""Is this language a dialect (or subset/specialization) of another.
This method returns True if this language is the same as or a
specialization (dialect) of the other language_tag.
If ignore_wildcard is False, then all languages will be
considered to be a dialect of the special language tag of "*".
"""
if not ignore_wildcard and self.is_universal_wildcard():
return True
for i in range( min(len(self), len(other)) ):
if self.parts[i] != other.parts[i]:
return False
if len(self) >= len(other):
return True
return False
def __eq__(self, other):
"""== operator. Are the two languages the same?"""
return self.parts == other.parts
def __neq__(self, other):
"""!= operator. Are the two languages different?"""
return not self.__eq__(other)
def __lt__(self, other):
"""< operator. Returns True if the other language is a more
specialized dialect of this one."""
return other.dialect_of(self) and self != other
def __le__(self, other):
"""<= operator. Returns True if the other language is the same
as or a more specialized dialect of this one."""
return other.dialect_of(self)
def __gt__(self, other):
"""> operator. Returns True if this language is a more
specialized dialect of the other one."""
return self.dialect_of(other) and self != other
def __ge__(self, other):
""">= operator. Returns True if this language is the same as
or a more specialized dialect of the other one."""
return self.dialect_of(other)
def acceptable_language( accept_language, languages, ignore_wildcard=True, assume_superiors=True ):
"""Determines if the given language is acceptable to the user agent.
The accept_language should be the value present in the HTTP
"Accept-Language:" header. In mod_python this is typically
obtained from the req.http_headers_in table; in WSGI it is
environ["Accept-Language"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_language parameter can instead be the list
resulting from the parse_accept_header() function defined in this
module.
The languages argument should either be a single language string,
a language_tag object, or a sequence of them. It represents the
set of languages that the caller is willing to send.
Note that the wildcarded language tag "*" will be ignored. To
override this, call with ignore_wildcard=False, and even then
it will be the lowest-priority choice regardless of it's
quality factor (as per HTTP spec).
If the assume_superiors is True then it the languages that the
browser accepts will automatically include all superior languages.
Any superior languages which must be added are done so with one
half the qvalue of the language which is present. For example, if
the accept string is "en-US", then it will be treated as if it
were "en-US, en;q=0.5". Note that although the HTTP 1.1 spec says
that browsers are supposed to encourage users to configure all
acceptable languages, sometimes they don't, thus the ability
for this function to assume this. But setting assume_superiors
to False will insure strict adherence to the HTTP 1.1 spec; which
means that if the browser accepts "en-US", then it will not
be acceptable to send just "en" to it.
This function returns the language which is the most prefered and
is acceptable to both the user agent and the caller. It will
return None if no language is negotiable, otherwise the return
value is always an instance of language_tag.
See also: RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>, and
ISO 639, links at <http://en.wikipedia.org/wiki/ISO_639>, and
<http://www.iana.org/assignments/language-tags>.
"""
# Note special instructions from RFC 2616 sect. 14.1:
# "The language quality factor assigned to a language-tag by the
# Accept-Language field is the quality value of the longest
# language- range in the field that matches the language-tag."
if isinstance(accept_language,str) or isinstance(accept_language,unicode):
accept_list = parse_accept_header(accept_language)
else:
accept_list = accept_header
# Possibly add in any "missing" languages that the browser may
# have forgotten to include in the list. Insure list is sorted so
# more general languages come before more specific ones.
accept_list.sort()
all_tags = [a[0] for a in accept_list]
if assume_superiors:
to_add = []
for lang, qvalue, aargs in accept_list:
try:
langtag = language_tag(lang)
except:
continue
if len(langtag) >= 2:
for sup in langtag.all_superiors( include_wildcard=False ):
suptag = str(sup)
if suptag not in all_tags:
to_add.append( (suptag, qvalue / 2, '') )
all_tags.append( suptag )
accept_list.extend( to_add )
if isinstance(languages,str) or isinstance(languages,unicode):
server_languages = [language_tag(languages)]
else:
server_languages = [language_tag(lang) for lang in languages]
#print 'accept_list', repr(accept_list)
#print 'server_languages', repr(server_languages)
best = None # tuple (langtag, qvalue, matchlen)
for lang, qvalue, aargs in accept_list:
# aargs is ignored for Accept-Language
if qvalue <= 0:
continue # UA doesn't accept this language
try:
# The content type is like "major/minor;parms...", parse it apart.
langtag = language_tag(lang)
except:
continue # language tag is malformed, skip it
if ignore_wildcard and langtag.is_universal_wildcard():
continue # "*" being ignored
for svrlang in server_languages:
# The best match is determined first by the quality factor,
# and then by the most specific match.
#print 'comparing UA language', repr(langtag), 'with server language', repr(svrlang)
matchlen = -1 # how specifically this one matches (0 is a non-match)
#if langtag.dialect_of( svrlang ):
if svrlang.dialect_of( langtag, ignore_wildcard=ignore_wildcard ):
matchlen = len(langtag)
#print ' matches', repr(langtag), ', len', matchlen, ', qvalue', qvalue
if not best \
or matchlen > best[2] \
or (matchlen == best[2] and qvalue > best[1]):
# This match is better
best = (langtag, qvalue, matchlen)
if not best:
return None
return best[0]
Import version 0.2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utility functions to work with Accept-* headers as defined by HTTP 1.1.
This module provides some utility functions useful for writing
websites which want to deal with some of the HTTP protocol headers;
especially the correct interpretation of the various Accept-* style
headers, content negotiation, and so forth.
There are a few classes defined by this module:
* class content_type -- media types such as 'text/plain'
* class language_tag -- language tags such as 'en-US'
The primary functions this modules may be categorized as follows:
* Content negotiation functions...
* acceptable_content_type()
* acceptable_language()
* acceptable_charset()
* acceptable_encoding()
* Low-level string parsing functions...
* parse_accept_header()
* parse_comment()
* parse_token_or_quoted_string()
See also:
* RFC 2046, "(MIME) Part Two: Media Types", November 1996.
<http://www.ietf.org/rfc/rfc2046.txt>
* RFC 2616, "Hypertext Transfer Protocol -- HTTP/1.1", June 1999.
<http://www.ietf.org/rfc/rfc2616.txt>
* RFC 3066, "Tags for the Identification of Languages", January 2001.
<http://www.ietf.org/rfc/rfc3066.txt>
"""
__author__ = """Deron Meranda <http://deron.meranda.us/>"""
__date__ = "2005-12-19"
__version__ = "1.0"
__credits__ = """Copyright (c) 2005 Deron E. Meranda <http://deron.meranda.us/>
Licensed under GNU LGPL 2.1 or later. See <http://www.fsf.org/>.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
# Character classes from RFC 2616 section 2.2
separators = '()<>@,;:\\"/[]?={} \t'
LWS = ' \t\n\r' # linear white space
CRLF = '\r\n'
DIGIT = '0123456789'
HEX = '0123456789ABCDEFabcdef'
try:
# Turn into set types (for Python 2.4 or greater)
separators = frozenset([c for c in separators])
LWS = frozenset([c for c in LWS])
CRLF = frozenset([c for c in CRLF])
DIGIT = frozenset([c for c in DIGIT])
HEX = frozenset([c for c in HEX])
del c
except NameError:
# Python 2.3 or earlier, leave as simple strings
pass
def _is_string( obj ):
"""Returns True if the object is a string or unicode type."""
return isinstance(obj,str) or isinstance(obj,unicode)
def is_token(s):
"""Determines if the string is a legal token."""
for c in s:
if ord(c) < 32 or ord(c) > 128 or c in separators:
return False
return True
def parse_token_or_quoted_string(s, start=0, allow_quoted=True, allow_token=True):
"""Parses a token or a quoted-string.
's' is the string to parse, while start is the position within the
string where parsing should begin. It will returns a tuple
(token, chars_consumed), with all \-escapes and quotation already
processed.
Syntax is according to BNF rules in RFC 2161 section 2.2,
specifically the 'token' and 'quoted-string' declarations.
Syntax errors in the input string will result in ValueError
being raised.
If allow_quoted is False, then only tokens will be parsed instead
of either a token or quoted-string.
If allow_token is False, then only quoted-strings will be parsed
instead of either a token or quoted-string.
"""
if not allow_quoted and not allow_token:
raise ValueError('Parsing can not continue with options provided')
if start >= len(s):
return IndexError('Starting position is beyond the end of the string')
has_quote = (s[start] == '"')
if has_quote and not allow_quoted:
return ValueError('A quoted string was not expected')
if not has_quote and not allow_token:
return ValueError('Expected a quotation mark')
s2 = ''
pos = start
if has_quote:
pos += 1
while pos < len(s):
c = s[pos]
if c == '\\' and has_quote:
# Note this is not C-style escaping; the character after the \ is
# taken literally.
pos += 1
if pos == len(s):
raise ValueError("End of string while expecting a character after '\\'")
s2 += s[pos]
pos += 1
elif c == '"' and has_quote:
break
elif not has_quote and (c in self.separators or ord(c)<32 or ord(c)>127):
break
else:
s2 += c
pos += 1
if has_quote:
# Make sure we have a closing quote mark
if pos >= len(s) or s[pos] != '"':
raise ValueError('Quoted string is missing closing quote mark')
else:
pos += 1
return s2, (pos - start)
def parse_comment(s, start=0):
"""Parses a ()-style comment from a header value.
Returns tuple (comment, chars_consumed), where the comment will
have had the outer-most parentheses and white space stripped. Any
nested comments will still have their parentheses and whitespace
left intact.
All \-escaped quoted pairs will have been replaced with the actual
characters they represent, even within the inner nested comments.
You should note that only a few HTTP headers, such as User-Agent
or Via, allow ()-style comments within the header value.
"""
if start >= len(s):
return IndexError('Starting position is beyond the end of the string')
if s[start] != '(':
raise ValueError('Comment must begin with opening parenthesis')
s2 = ''
nestlevel = 1
pos = start + 1
while pos < len(s) and s[pos] in LWS:
pos += 1
while pos < len(s):
c = s[pos]
if c == '\\':
# Note this is not C-style escaping; the character after the \ is
# taken literally.
pos += 1
if pos == len(s):
raise ValueError("End of string while expecting a character after '\\'")
s2 += s[pos]
pos += 1
elif c == '(':
nestlevel += 1
s2 += c
pos += 1
elif c == ')':
nestlevel -= 1
if nestlevel >= 1:
s2 += c
pos += 1
else:
break
else:
s2 += c
pos += 1
if nestlevel > 0:
raise ValueError('End of string reached before comment was closed')
# Now rstrip s2 of all LWS chars.
while len(s2) and s2[-1] in LWS:
s2 = s2[:-1]
return s2, (pos - start)
def _split_at_qfactor( s ):
"""Splits a string at the quality factor (;q=) parameter.
Returns the left and right substrings as a two-member tuple.
"""
# It may be faster, but incorrect, to use s.split(';q=',1), since
# HTTP allows any amount of linear white space (LWS) to appear
# between the parts, so it could also be "; q = ".
# We do this parsing 'manually' for speed rather than using a
# regex, which would be r';[ \t\r\n]*q[ \t\r\n]*=[ \t\r\n]*'
pos = 0
while 0 <= pos < len(s):
pos = s.find(';', pos)
if pos < 0:
break # no more parameters
startpos = pos
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == 'q':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == '=':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
return ( s[:startpos], s[pos:] )
return (s, '')
def parse_accept_header( header_value ):
"""Parses the value of an HTTP Accept-*: style header with quality factors.
The value of the header as a string should be passed in; without
the header name itself.
This will parse the value of any of the HTTP headers "Accept",
"Accept-Charset", "Accept-Encoding", or "Accept-Language". These
headers are similarly formatted, in that they are a list of items
with associated quality factors. The quality factor, or qvalue,
is a number in the range [0.0..1.0] which indicates the relative
preference of each item.
This function returns a list of those items, sorted by preference
(from most-prefered to least-prefered). Each item in the returned
list is actually a tuple consisting of:
( item_name, item_parms, qvalue, accept_parms )
As an example, the following string,
text/plain; charset="utf-8"; q=.5; columns=80
would be parsed into this resulting tuple,
( 'text/plain', [('charset','utf-8')], 0.5, [('columns','80')] )
The value of the returned item_name depends upon which header is
being parsed, but for example it may be a MIME content or media
type (without parameters), a language tag, or so on. Any optional
parameters (delimited by semicolons) occuring before the "q="
attribute will be in the item_parms list as (attribute,value)
tuples in the same order as they appear in the header. Any quoted
values will have been unquoted and unescaped.
The qvalue is a floating point number in the inclusive range 0.0
to 1.0, and roughly indicates the preference for this item.
Values outside this range will be capped to the closest extreme.
(!) Note that a qvalue of 0 indicates that the item is
explicitly NOT acceptable to the user agent, and should be
handled differently by the caller.
The accept_parms, like the item_parms, is a list of any attributes
occuring after the "q=" attribute, and will be in the list as
(attribute,value) tuples in the same order as they occur.
Usually accept_parms will be an empty list, as the HTTP spec
allows these extra parameters in the syntax but does not
currently define any possible values.
All empty items will be removed from the list. However, duplicate
or conflicting values are not detected or handled in any way by
this function.
"""
accept_list = []
# itemnum is used to insure a stable-sort later. Could use enumerate(),
# but we want to preserve Python 2.2 compatibility.
itemnum = 0
for item, qvalue in [ _split_at_qfactor(v.strip()) for v in header_value.split(',') ]:
if not item[0]:
continue # an empty list item
if not qvalue:
qvalue, accept_ext = 1, ''
else:
if ';' in qvalue:
qvalue, accept_ext = qvalue.split(';', 1)
else:
accept_ext = ''
try:
qvalue = float(qvalue.strip())
except ValueError:
qvalue = 0.1 # Mangled q-value, assume low quality
if qvalue < 0: # Insure in range 0 to 1.
qvalue = 0
elif qvalue > 1:
qvalue = 1
accept_list.append( (qvalue, itemnum, item, accept_ext) )
itemnum = itemnum + 1
accept_list.sort()
accept_list.reverse()
# Reformat the tuples in the list, so the name comes first. We also
# discard the itemnum ordinal, since it was only there to enforce
# a stable sort.
accept_list = [(x[2], x[0], x[3]) for x in accept_list]
return accept_list
class content_type(object):
"""This class represents a media type (aka a MIME content type).
You initialize these by passing in a content-type declaration string,
such as "text/plain", to the constructor or to the set() method.
Normally you will get the value by using str(), or optionally you
can access the components via the 'major', 'minor', and 'parmdict'
members.
"""
def __init__(self, content_type_string=None):
if content_type_string:
self.set( content_type_string )
else:
self.set( '*/*' )
def set(self, content_type_string):
"""Parses the content type string and sets this object to it's value."""
major, minor, pdict = self._parse_media_type( content_type_string )
self._set_major( major )
self._set_minor( minor )
self.parmdict = pdict
def _get_major(self):
return self._major
def _set_major(self, s):
s = s.lower() # case-insentive
if not is_token(s):
raise ValueError('Major media type contains an illegal character')
self._major = s
def _get_minor(self):
return self._minor
def _set_minor(self, s):
s = s.lower() # case-insentive
if not is_token(s):
raise ValueError('Minor media type contains an illegal character')
self._minor = s
major = property(_get_major,_set_major,doc="Major media classification")
minor = property(_get_minor,_set_minor,doc="Minor media sub-classification")
def __str__(self):
"""String value."""
s = '%s/%s' % (self.major, self.minor)
if self.parmdict:
extra = '; '.join([ '%s=%s' % (a[0],self._quote(a[1])) \
for a in self.parmdict.items()])
s += '; ' + extra
return s
def __unicode__(self):
"""Unicode string value."""
return unicode(self.__str__())
def __repr__(self):
"""Python representation of this object."""
s = '%s(%s)' % (self.__class__.__name__, repr(self.__str__()))
return s
def __hash__(self):
"""Hash this object; the hash is dependent only upon the value."""
return hash(str(self))
def __getstate__(self):
"""Pickler"""
return str(self)
def __setstate__(self, state):
"""Unpickler"""
self.set(state)
def __len__(self):
"""Logical length of this media type.
For example:
len('*/*') -> 0
len('image/*') -> 1
len('image/png') -> 2
len('text/plain; charset=utf-8') -> 3
len('text/plain; charset=utf-8; filename=xyz.txt') -> 4
"""
if self.major == '*':
return 0
elif self.minor == '*':
return 1
else:
return 2 + len(self.parmdict)
def __eq__(self, other):
"""Equality test."""
return self.major == other.major and \
self.minor == other.minor and \
self.parmdict == other.parmdict
def __ne__(self, other):
"""Inequality test."""
return not self.__eq__(other)
def _quote(self, val):
"""Produces a token, or a quoted string if necessary, from the input string value.
"""
need_quotes = False
s = ''
for c in val:
if c in self.separators or ord(c)<32 or ord(c)>127:
need_quotes = True
s += "\\%s" % c
else:
s += c
if need_quotes:
s = '"%s"' % s
return s
def _parse_media_type(self, media_type):
"""Parses a media type (MIME type) designator into it's parts.
Given a media type string, returns a tuple of it's parts.
(major,minor,parmlist).
Examples:
image/png -> ('image','png',[])
text/plain; charset="utf-16be" -> ('text','plain',[('charset,'utf-16be')])
"""
ctmaj, ctmin = media_type.split('/', 1)
parmlist = []
if ';' in ctmin:
ctmin, ctparms = ctmin.split(';', 1)
i = 0
while i < len(ctparms):
while i < len(ctparms) and ctparms[i] in LWS:
i += 1
pname, i = parse_token_or_quoted_string( ctparms, start=i, allow_quoted=False )
while i < len(ctparms) and ctparms[i] in LWS:
i += 1
#print 'pname=[%s]' % pname, 'at', i
if i < len(ctparms) and ctparms[i] == '=':
i += 1
while i < len(ctparms) and ctparms[i] in LWS:
i += 1
#print 'found = at', i
pval, i = parse_token_or_quoted_string( ctparms, start=i, allow_quoted=True )
else:
pval = ''
#print 'pval=[%s]' % pval, 'at', i
while i < len(ctparms) and ctparms[i] in LWS:
i += 1
if i < len(ctparms):
if ctparms[i] == ';':
i += 1
else:
raise ValueError('Content type parmeters not separated with semicolons at "%s"' % ctparms[i:])
parmlist.append( (pname, pval) )
if i < len(ctparms):
raise ValueError('Syntax error in content type parmeters')
return (ctmaj, ctmin, parmlist)
def media_type(self):
"""Returns the media 'type/subtype' string, without parameters."""
return '%s/%s' % (self.major, self.minor)
def is_wildcard(self):
"""Returns True if this is a 'something/*' media type.
"""
return self.minor == '*'
def is_universal_wildcard(self):
"""Returns True if this is the unspecified '*/*' media type.
"""
return self.major == '*' and self.minor == '*'
def is_composite(self):
"""Is this media type composed of multiple parts.
"""
return self.major == 'multipart' or self.major == 'message'
def is_xml(self):
"""Returns True if this media type is XML-based.
Note this does not consider text/html to be XML, but
application/xhtml+xml is.
"""
return self.minor == 'xml' or self.minor.endswith('+xml')
# Some common media types
enctype_formdata = content_type('multipart/form-data')
enctype_urlencoded = content_type('application/x-www-form-urlencoded')
octet_stream = content_type('application/octet-stream')
html = content_type('text/html')
xhtml = content_type('application/xhtml+xml')
def acceptable_content_type( accept_header, content_types, ignore_wildcard=True ):
"""Determines if the given content type is acceptable to the user agent.
The accept_header should be the value present in the HTTP
"Accept:" header. In mod_python this is typically obtained from
the req.http_headers_in table; in WSGI it is environ["Accept"];
other web frameworks may provide other methods of obtaining it.
Optionally the accept_header parameter can instead be the list
returned from the parse_accept_header() function in this module.
The content_types argument should either be a single MIME media type
string, or a sequence of them. It represents the set of content
types that the caller (server) is willing to send.
This function determines the content type which is the most prefered
and is acceptable to both the user agent and the caller. If one
is negotiated, it will return a tuple of:
(content_type, accept_parms)
In most cases accept_parms will be an empty string (see
description of parse_accept_header() for more details). If no
content type could be negotiated, then this function will return
None (and the caller should typically cause an HTTP 406 Not
Acceptable as a response).
Note that the wildcarded content type "*/*" will be ignored, since
it is often incorrectly sent by web browsers that don't really
mean it. To override this, call with ignore_wildcard=False.
Partial wildcards such as "image/*" will always be processed,
but be at a lower priority than a complete matching type.
See also: RFC 2616 section 14.1, and
<http://www.iana.org/assignments/media-types/>
"""
if _is_string(accept_header):
accept_list = parse_accept_header(accept_header)
else:
accept_list = accept_header
if _is_string(content_types):
content_types = [content_types]
#server_ctlist = [content_type(ct) for ct in content_types]
best = None
for ct, qvalue, aargs in accept_list:
try:
# The content type is like "major/minor;parms...", parse it apart.
ctmaj, ctmin, ctparms = parse_media_type(ct)
except:
continue # content type is malformed, skip it
if ignore_wildcard and ctmaj=='*' and ctmin=='*':
continue # */* being ignored
for server_ct in content_types:
server_ct = content_type(server_ct)
test_ctmaj, test_ctmin, test_ctparms = server_ct
# The best match is determined first by the quality factor,
# and then by the most specific match.
print "comparing", server_ct
matchlen = 0 # how specifically this one matches (0 is a non-match)
if ctmaj == '*' and ctmin == '*':
matchlen = 1 # */* is a 1
elif ctmaj == test_ctmaj:
if ctmin == '*': # something/* is a 2
matchlen = 2
elif ctmin == test_ctmin: # something/something is a 3
matchlen = 3
if ctparms: # must make sure all the parms match too
for pname, pval in ctparms.items():
if test_ctparms.get(pname) == pval:
matchlen = matchlen + 1
else:
matchlen = 0
break
else:
matchlen = 0
if matchlen:
if not best \
or matchlen > best[3] \
or (matchlen == best[3] and qvalue > best[1]):
# This match is better
best = (ct, qvalue, aargs, matchlen)
if not best or best[1] <= 0:
return None
return (best[0], best[2])
def _canonical_charset( charset ):
return charset.upper()
def acceptable_charset( accept_charset_header, charsets, ignore_wildcard=True, default='ISO-8859-1' ):
"""
Determines if the given charset is acceptable to the user agent.
The accept_charset_header should be the value present in the HTTP
"Accept-Charset:" header. In mod_python this is typically
obtained from the req.http_headers table; in WSGI it is
environ["Accept-Charset"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_charset_header parameter can instead be the
list returned from the parse_accept_header() function in this
module.
The charsets argument should either be a charset identifier string,
or a sequence of them.
This function returns the charset identifier string which is the
most prefered and is acceptable to both the user agent and the
caller. It will return the default value if no charset is negotiable.
Note that the wildcarded charset "*" will be ignored. To override
this, call with ignore_wildcard=False.
See also: RFC 2616 section 14.2, and
<http://www.iana.org/assignments/character-sets>
"""
if default:
default = _canonical_charset(default)
if _is_string(accept_charset_header):
accept_list = parse_accept_header(accept_charset_header)
else:
accept_list = accept_charset_header
if _is_string(charsets):
charsets = [_canonical_charset(charsets)]
else:
charsets = [_canonical_charset(c) for c in charsets]
# Note per RFC that 'ISO-8859-1' is special, and is implictly in the
# accept list with q=1; unless it is already in the list, or '*' is in the list.
best = None
for c, qvalue, junk in accept_list:
if c == '*':
default = None
if ignore_wildcard:
continue
if not best or qvalue > best[1]:
best = (c, qvalue)
else:
c = _canonical_charset(c)
for test_c in charsets:
if c == default:
default = None
if c == test_c and (not best or best[0]=='*' or qvalue > best[1]):
best = (c, qvalue)
if default and default in [test_c.upper() for test_c in charsets]:
best = (default, 1)
if best[0] == '*':
best = (charsets[0], best[1])
return best
class language_tag(object):
"""This class represents an RFC 3066 language tag.
Initialize objects of this class with a single string representing
the language tag, such as "en-US".
Case is insensitive. Wildcarded subtags are ignored or stripped as
they have no significance, so that "en-*" is the same as "en".
However the universal wildcard "*" language tag is kept as-is.
Note that although relational operators such as < are defined,
they only form a partial order based upon specialization.
Thus for example,
"en" <= "en-US"
but,
not "en" <= "de", and
not "de" <= "en".
"""
def __init__(self, tagname):
"""Initialize objects of this class with a single string representing
the language tag, such as "en-US". Case is insensitive.
"""
self.parts = tagname.lower().split('-')
while len(self.parts) > 1 and self.parts[-1] == '*':
del self.parts[-1]
def __len__(self):
"""Number of subtags in this tag."""
if len(self.parts) == 1 and self.parts[0] == '*':
return 0
return len(self.parts)
def __str__(self):
"""The standard string form of this language tag."""
a = []
if len(self.parts) >= 1:
a.append(self.parts[0])
if len(self.parts) >= 2:
if len(self.parts[1]) == 2:
a.append( self.parts[1].upper() )
else:
a.append( self.parts[1] )
a.extend( self.parts[2:] )
return '-'.join(a)
def __unicode__(self):
"""The unicode string form of this language tag."""
return unicode(self.__str__())
def __repr__(self):
"""The python representation of this language tag."""
s = '%s("%s")' % (self.__class__.__name__, self.__str__())
return s
def superior(self):
"""Returns another instance of language_tag which is the superior.
Thus en-US gives en, and en gives *.
"""
if len(self) <= 1:
return self.__class__('*')
return self.__class__( '-'.join(self.parts[:-1]) )
def all_superiors(self, include_wildcard=False):
"""Returns a list of this language and all it's superiors.
If include_wildcard is False, then "*" will not be among the
output list, unless this language is itself "*".
"""
langlist = [ self ]
l = self
while not l.is_universal_wildcard():
l = l.superior()
if l.is_universal_wildcard() and not include_wildcard:
continue
langlist.append(l)
return langlist
def is_universal_wildcard(self):
"""Returns True if this language tag represents all possible
languages, by using the reserved tag of "*".
"""
return len(self.parts) == 1 and self.parts[0] == '*'
def dialect_of(self, other, ignore_wildcard=True):
"""Is this language a dialect (or subset/specialization) of another.
This method returns True if this language is the same as or a
specialization (dialect) of the other language_tag.
If ignore_wildcard is False, then all languages will be
considered to be a dialect of the special language tag of "*".
"""
if not ignore_wildcard and self.is_universal_wildcard():
return True
for i in range( min(len(self), len(other)) ):
if self.parts[i] != other.parts[i]:
return False
if len(self) >= len(other):
return True
return False
def __eq__(self, other):
"""== operator. Are the two languages the same?"""
return self.parts == other.parts
def __neq__(self, other):
"""!= operator. Are the two languages different?"""
return not self.__eq__(other)
def __lt__(self, other):
"""< operator. Returns True if the other language is a more
specialized dialect of this one."""
return other.dialect_of(self) and self != other
def __le__(self, other):
"""<= operator. Returns True if the other language is the same
as or a more specialized dialect of this one."""
return other.dialect_of(self)
def __gt__(self, other):
"""> operator. Returns True if this language is a more
specialized dialect of the other one."""
return self.dialect_of(other) and self != other
def __ge__(self, other):
""">= operator. Returns True if this language is the same as
or a more specialized dialect of the other one."""
return self.dialect_of(other)
def acceptable_language( accept_language, languages, ignore_wildcard=True, assume_superiors=True ):
"""Determines if the given language is acceptable to the user agent.
The accept_language should be the value present in the HTTP
"Accept-Language:" header. In mod_python this is typically
obtained from the req.http_headers_in table; in WSGI it is
environ["Accept-Language"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_language parameter can instead be the list
resulting from the parse_accept_header() function defined in this
module.
The languages argument should either be a single language string,
a language_tag object, or a sequence of them. It represents the
set of languages that the caller is willing to send.
Note that the wildcarded language tag "*" will be ignored. To
override this, call with ignore_wildcard=False, and even then
it will be the lowest-priority choice regardless of it's
quality factor (as per HTTP spec).
If the assume_superiors is True then it the languages that the
browser accepts will automatically include all superior languages.
Any superior languages which must be added are done so with one
half the qvalue of the language which is present. For example, if
the accept string is "en-US", then it will be treated as if it
were "en-US, en;q=0.5". Note that although the HTTP 1.1 spec says
that browsers are supposed to encourage users to configure all
acceptable languages, sometimes they don't, thus the ability
for this function to assume this. But setting assume_superiors
to False will insure strict adherence to the HTTP 1.1 spec; which
means that if the browser accepts "en-US", then it will not
be acceptable to send just "en" to it.
This function returns the language which is the most prefered and
is acceptable to both the user agent and the caller. It will
return None if no language is negotiable, otherwise the return
value is always an instance of language_tag.
See also: RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>, and
ISO 639, links at <http://en.wikipedia.org/wiki/ISO_639>, and
<http://www.iana.org/assignments/language-tags>.
"""
# Note special instructions from RFC 2616 sect. 14.1:
# "The language quality factor assigned to a language-tag by the
# Accept-Language field is the quality value of the longest
# language- range in the field that matches the language-tag."
if _is_string(accept_language):
accept_list = parse_accept_header(accept_language)
else:
accept_list = accept_header
# Possibly add in any "missing" languages that the browser may
# have forgotten to include in the list. Insure list is sorted so
# more general languages come before more specific ones.
accept_list.sort()
all_tags = [a[0] for a in accept_list]
if assume_superiors:
to_add = []
for lang, qvalue, aargs in accept_list:
try:
langtag = language_tag(lang)
except:
continue
if len(langtag) >= 2:
for sup in langtag.all_superiors( include_wildcard=False ):
suptag = str(sup)
if suptag not in all_tags:
to_add.append( (suptag, qvalue / 2, '') )
all_tags.append( suptag )
accept_list.extend( to_add )
if _is_string(languages):
server_languages = [language_tag(languages)]
else:
server_languages = [language_tag(lang) for lang in languages]
#print 'accept_list', repr(accept_list)
#print 'server_languages', repr(server_languages)
best = None # tuple (langtag, qvalue, matchlen)
for lang, qvalue, aargs in accept_list:
# aargs is ignored for Accept-Language
if qvalue <= 0:
continue # UA doesn't accept this language
try:
# The content type is like "major/minor;parms...", parse it apart.
langtag = language_tag(lang)
except:
continue # language tag is malformed, skip it
if ignore_wildcard and langtag.is_universal_wildcard():
continue # "*" being ignored
for svrlang in server_languages:
# The best match is determined first by the quality factor,
# and then by the most specific match.
#print 'comparing UA language', repr(langtag), 'with server language', repr(svrlang)
matchlen = -1 # how specifically this one matches (0 is a non-match)
#if langtag.dialect_of( svrlang ):
if svrlang.dialect_of( langtag, ignore_wildcard=ignore_wildcard ):
matchlen = len(langtag)
#print ' matches', repr(langtag), ', len', matchlen, ', qvalue', qvalue
if not best \
or matchlen > best[2] \
or (matchlen == best[2] and qvalue > best[1]):
# This match is better
best = (langtag, qvalue, matchlen)
if not best:
return None
return best[0]
|
import sys
from approver.models import *
from approver.signals.bridge.model_signals import model_push
import django
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import fields, signals
from django.test import TestCase, Client
def check_fields(ModelName,fieldname,type,len):
model_meta = getattr(ModelName, "_meta")
f = getattr(model_meta,"fields")
for field in f :
if field.name == fieldname:
if (isinstance(field, getattr(django.db.models.fields,type+"Field")) == True):
return True
else:
return False
class OrganizationModel(TestCase):
def test_organization_model(self):
org = Organization()
for field in Organization._meta.fields:
if field.name in "title":
self.assertEqual(isinstance(field, django.db.models.fields.CharField), True)
self.assertEqual(field.max_length, 30)
class SpecialityModel(TestCase):
def test_speciality_model(self):
self.assertEqual(check_fields(Speciality,"name","Char",50), True)
class PositionModel(TestCase):
def test_position_model(self):
self.assertEqual(check_fields(Position,"name","Char",50), True)
class KeywordModel(TestCase):
def test_keyword_model(self):
self.assertEqual(check_fields(Keyword,"name","Char",50), True)
class SuffixModel(TestCase):
def test_suffix_model(self):
self.assertEqual(check_fields(Suffix,"name","Char",50), True)
self.assertEqual(check_fields(Suffix,"description","Char",100), True)
class QIInterestModel(TestCase):
def test_category_model(self):
self.assertEqual(check_fields(Suffix,"name","Char",50), True)
self.assertEqual(check_fields(Suffix,"description","Char",100), True)
class CategoryModel(TestCase):
def test_category_model(self):
self.assertEqual(check_fields(Category,"name","Char",50), True)
self.assertEqual(check_fields(Category,"description","Char",100), True)
class BigAimModel(TestCase):
def test_big_aim_model(self):
self.assertEqual(check_fields(BigAim,"name","Char",100), True)
class FocusAreaModel(TestCase):
def test_focus_area_model(self):
self.assertEqual(check_fields(FocusArea,"name","Char",100), True)
class ClinicalDepartmentModel(TestCase):
def test_clinical_department_model(self):
self.assertEqual(check_fields(ClinicalDepartment,"name","Char",100), True)
Adding some tests for Models
import sys
from approver.models import *
from approver.signals.bridge.model_signals import model_push
import django
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import fields, signals
from django.test import TestCase, Client
def check_fields(ModelName,fieldname,type,len):
model_meta = getattr(ModelName, "_meta")
fields = getattr(model_meta,"fields")
for field in fields :
if field.name == fieldname:
if (isinstance(field, getattr(django.db.models.fields,type+"Field")) == True):
return True
else:
return False
class OrganizationModel(TestCase):
def test_organization_model(self):
org = Organization()
for field in Organization._meta.fields:
if field.name in "title":
self.assertEqual(isinstance(field, django.db.models.fields.CharField), True)
self.assertEqual(field.max_length, 30)
class SpecialityModel(TestCase):
def test_speciality_model(self):
self.assertEqual(check_fields(Speciality,"name","Char",50), True)
class PositionModel(TestCase):
def test_position_model(self):
self.assertEqual(check_fields(Position,"name","Char",50), True)
class KeywordModel(TestCase):
def test_keyword_model(self):
self.assertEqual(check_fields(Keyword,"name","Char",50), True)
class SuffixModel(TestCase):
def test_suffix_model(self):
self.assertEqual(check_fields(Suffix,"name","Char",50), True)
self.assertEqual(check_fields(Suffix,"description","Char",100), True)
class QIInterestModel(TestCase):
def test_category_model(self):
self.assertEqual(check_fields(Suffix,"name","Char",50), True)
self.assertEqual(check_fields(Suffix,"description","Char",100), True)
class CategoryModel(TestCase):
def test_category_model(self):
self.assertEqual(check_fields(Category,"name","Char",50), True)
self.assertEqual(check_fields(Category,"description","Char",100), True)
class BigAimModel(TestCase):
def test_big_aim_model(self):
self.assertEqual(check_fields(BigAim,"name","Char",100), True)
class FocusAreaModel(TestCase):
def test_focus_area_model(self):
self.assertEqual(check_fields(FocusArea,"name","Char",100), True)
class ClinicalDepartmentModel(TestCase):
def test_clinical_department_model(self):
self.assertEqual(check_fields(ClinicalDepartment,"name","Char",100), True) |
import unittest
import util.precendent_directory_cleaner as directory_cleaner
from util.constant import Path
class PrecendentDirectoryCleanerTest(unittest.TestCase):
def test_precendent_removal(self):
total_files_removed = directory_cleaner.remove_files(Path.cluster_directory + 'fact/')
self.assertEqual(total_files_removed[0].__len__(), 0)
self.assertEqual(total_files_removed[1].__len__(), 56)
[#186] updated test
import unittest
import util.precendent_directory_cleaner as directory_cleaner
from util.constant import Path
class PrecendentDirectoryCleanerTest(unittest.TestCase):
def test_precendent_removal(self):
total_files_removed = directory_cleaner.remove_files(Path.cluster_directory + 'fact/')
self.assertEqual(total_files_removed[0].__len__(), 0)
self.assertEqual(total_files_removed[1].__len__(), 59)
|
#!/usr/bin/env python
"""Model of a LOFAR station. Gets Jones matrix towards a given direction
and frequency.
Example:
$ pointing_jones.py print LOFAR LBA SE607 Hamaker 2012-04-01T01:02:03 60 1 6.11 1.02 60E6
This prints out the Jones matrices for the LOFAR LBA antenna at 60.e6 Hz for
the SE607 station tracking a source at RA-DEC 6.11 1.02 (radians) for 60s
starting at 2012-04-01T01:02:03 using the Hamaker model.
"""
import sys
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
from antpat.dualpolelem import plot_polcomp_dynspec
from dreambeam.rime.scenarios import on_pointing_axis_tracking
from dreambeam.telescopes.rt import TelescopesWiz
def printJonesFreq(timespy, Jnf):
#Select one frequency
for ti in range(len(timespy)):
print freq, timespy[ti], Jnf[ti,0,0], Jnf[ti,0,1], Jnf[ti,1,0], Jnf[ti,1,1]
# Print out data for BST-mode comparison (ie powers of p & q channels):
#print("{0} {1} {2}".format( (timespy[ti]-timespy[0]).total_seconds(), np.abs(Jnf[ti,0,0])**2+np.abs(Jnf[ti,0,1])**2, np.abs(Jnf[ti,1,0])**2+np.abs(Jnf[ti,1,1])**2) )
def plotJonesFreq(timespy, Jnf):
p_ch = np.abs(Jnf[:,0,0].squeeze())**2+np.abs(Jnf[:,0,1].squeeze())**2
q_ch = np.abs(Jnf[:,1,1].squeeze())**2+np.abs(Jnf[:,1,0].squeeze())**2
#p_ch = -np.real(Jnf[:,0,0].squeeze()) #+ 0,1 => 0,0
#q_ch = -np.real(Jnf[:,1,1].squeeze()) #- 1,0 0> 1,1
#In dB:
#p_ch = 10*np.log10(p_ch)
#q_ch = 10*np.log10(q_ch)
plt.figure()
plt.subplot(211)
plt.plot(timespy, p_ch)
plt.title('p-channel')
plt.subplot(212)
plt.plot(timespy, q_ch)
plt.title('q-channel')
plt.xlabel('Time')
plt.show()
def printAllJones(timespy, freqs, Jn):
print "Time, Freq, J11, J12, J21, J22"
#duration.seconds/ObsTimeStp.seconds
for ti in range(0, len(timespy)):
for fi,freq in enumerate(freqs):
print timespy[ti].isoformat(), freq, Jn[fi,ti,0,0], Jn[fi,ti,0,1], Jn[fi,ti,1,0], Jn[fi,ti,1,1]
def plotAllJones(timespy, freqs, Jn):
plot_polcomp_dynspec(timespy, freqs, Jn)
def getnextcmdarg(args, mes):
try:
arg = args.pop(0)
except IndexError:
print("Specify "+mes)
print(USAGE)
exit()
return arg
SCRIPTNAME = sys.argv[0].split('/')[-1]
USAGE = "Usage:\n {} print|plot telescope band stnID beammodel beginUTC duration timeStep pointingRA pointingDEC [frequency]".format(SCRIPTNAME)
if __name__ == "__main__":
#Startup a telescope wizard
TW = TelescopesWiz()
#Process cmd line arguments
args = sys.argv[1:]
action = getnextcmdarg(args, "output-type:\n 'print' or 'plot'")
telescopeName = getnextcmdarg(args, "telescope:\n "+', '.join(TW.get_telescopes()))
band = getnextcmdarg(args, "band/feed:\n "+', '.join(TW.get_bands(telescopeName)))
stnID = getnextcmdarg(args, "station-ID:\n "+ ', '.join(TW.get_stations(telescopeName, band)))
antmodel = getnextcmdarg(args, "beam-model:\n "+', '.join(TW.get_beammodels(telescopeName, band)))
try:
bTime = datetime.strptime(args[0], "%Y-%m-%dT%H:%M:%S")
except IndexError:
print("Specify start-time (UTC in ISO format: yyyy-mm-ddTHH:MM:SS ).")
print(USAGE)
exit()
try:
duration =timedelta(0,float(args[1]))
except IndexError:
print("Specify duration (in seconds).")
print(USAGE)
exit()
try:
stepTime =timedelta(0,float(args[2]))
except IndexError:
print("Specify step-time (in seconds).")
print(USAGE)
exit()
try:
CelDir=(float(args[3]), float(args[4]), 'J2000')
except IndexError:
print("Specify pointing direction (in radians): RA DEC")
print(USAGE)
exit()
if len(args)>5:
try:
freq=float(args[5])
except ValueError:
print("Specify frequency (in Hz).")
print(USAGE)
exit()
else:
freq=0.
#Get the telescopeband instance:
telescope = TW.getTelescopeBand(telescopeName, band, antmodel)
#Compute the Jones matrices
timespy, freqs, Jn = on_pointing_axis_tracking(telescope, stnID, bTime, duration,
stepTime, CelDir) #fix: freq not used
#Do something with resulting Jones according to cmdline args
if freq == 0.:
if action == "plot":
plotAllJones(timespy, freqs, Jn)
else:
printAllJones(timespy, freqs, Jn)
else:
frqIdx = np.where(np.isclose(freqs,freq,atol=190e3))[0][0]
Jnf = Jn[frqIdx,:,:,:].squeeze()
if action == "plot":
plotJonesFreq(timespy, Jnf)
else:
printJonesFreq(timespy, Jnf)
Homogenize CSV output for single frequency vs all frequencies.
Merge branch 'creaneroDIAS-master'
#!/usr/bin/env python
"""Model of a LOFAR station. Gets Jones matrix towards a given direction
and frequency.
Example:
$ pointing_jones.py print LOFAR LBA SE607 Hamaker 2012-04-01T01:02:03 60 1 6.11 1.02 60E6
This prints out the Jones matrices for the LOFAR LBA antenna at 60.e6 Hz for
the SE607 station tracking a source at RA-DEC 6.11 1.02 (radians) for 60s
starting at 2012-04-01T01:02:03 using the Hamaker model.
"""
import sys
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
from antpat.dualpolelem import plot_polcomp_dynspec
from dreambeam.rime.scenarios import on_pointing_axis_tracking
from dreambeam.telescopes.rt import TelescopesWiz
def printJonesFreq(timespy, Jnf):
#Select one frequency
print "Time, Freq, J11, J12, J21, J22" #header for CSV
for ti in range(len(timespy)):
#creates and prints a comma separated string
jones_1f_outstring=",".join(map(str,[timespy[ti].isoformat(), freq, Jnf[ti,0,0], Jnf[ti,0,1], Jnf[ti,1,0], Jnf[ti,1,1]]))
print jones_1f_outstring
# Print out data for BST-mode comparison (ie powers of p & q channels):
#print("{0} {1} {2}".format( (timespy[ti]-timespy[0]).total_seconds(), np.abs(Jnf[ti,0,0])**2+np.abs(Jnf[ti,0,1])**2, np.abs(Jnf[ti,1,0])**2+np.abs(Jnf[ti,1,1])**2) )
def plotJonesFreq(timespy, Jnf):
p_ch = np.abs(Jnf[:,0,0].squeeze())**2+np.abs(Jnf[:,0,1].squeeze())**2
q_ch = np.abs(Jnf[:,1,1].squeeze())**2+np.abs(Jnf[:,1,0].squeeze())**2
#p_ch = -np.real(Jnf[:,0,0].squeeze()) #+ 0,1 => 0,0
#q_ch = -np.real(Jnf[:,1,1].squeeze()) #- 1,0 0> 1,1
#In dB:
#p_ch = 10*np.log10(p_ch)
#q_ch = 10*np.log10(q_ch)
plt.figure()
plt.subplot(211)
plt.plot(timespy, p_ch)
plt.title('p-channel')
plt.subplot(212)
plt.plot(timespy, q_ch)
plt.title('q-channel')
plt.xlabel('Time')
plt.show()
def printAllJones(timespy, freqs, Jn):
print "Time, Freq, J11, J12, J21, J22" #header for CSV
#duration.seconds/ObsTimeStp.seconds
for ti in range(0, len(timespy)):
for fi,freq in enumerate(freqs):
#creates and prints a comma-separated string
jones_nf_outstring=",".join(map(str,[timespy[ti].isoformat(), freq, Jn[fi,ti,0,0], Jn[fi,ti,0,1], Jn[fi,ti,1,0], Jn[fi,ti,1,1]]))
print jones_nf_outstring
def plotAllJones(timespy, freqs, Jn):
plot_polcomp_dynspec(timespy, freqs, Jn)
def getnextcmdarg(args, mes):
try:
arg = args.pop(0)
except IndexError:
print("Specify "+mes)
print(USAGE)
exit()
return arg
SCRIPTNAME = sys.argv[0].split('/')[-1]
USAGE = "Usage:\n {} print|plot telescope band stnID beammodel beginUTC duration timeStep pointingRA pointingDEC [frequency]".format(SCRIPTNAME)
if __name__ == "__main__":
#Startup a telescope wizard
TW = TelescopesWiz()
#Process cmd line arguments
args = sys.argv[1:]
action = getnextcmdarg(args, "output-type:\n 'print' or 'plot'")
telescopeName = getnextcmdarg(args, "telescope:\n "+', '.join(TW.get_telescopes()))
band = getnextcmdarg(args, "band/feed:\n "+', '.join(TW.get_bands(telescopeName)))
stnID = getnextcmdarg(args, "station-ID:\n "+ ', '.join(TW.get_stations(telescopeName, band)))
antmodel = getnextcmdarg(args, "beam-model:\n "+', '.join(TW.get_beammodels(telescopeName, band)))
try:
bTime = datetime.strptime(args[0], "%Y-%m-%dT%H:%M:%S")
except IndexError:
print("Specify start-time (UTC in ISO format: yyyy-mm-ddTHH:MM:SS ).")
print(USAGE)
exit()
try:
duration =timedelta(0,float(args[1]))
except IndexError:
print("Specify duration (in seconds).")
print(USAGE)
exit()
try:
stepTime =timedelta(0,float(args[2]))
except IndexError:
print("Specify step-time (in seconds).")
print(USAGE)
exit()
try:
CelDir=(float(args[3]), float(args[4]), 'J2000')
except IndexError:
print("Specify pointing direction (in radians): RA DEC")
print(USAGE)
exit()
if len(args)>5:
try:
freq=float(args[5])
except ValueError:
print("Specify frequency (in Hz).")
print(USAGE)
exit()
else:
freq=0.
#Get the telescopeband instance:
telescope = TW.getTelescopeBand(telescopeName, band, antmodel)
#Compute the Jones matrices
timespy, freqs, Jn = on_pointing_axis_tracking(telescope, stnID, bTime, duration,
stepTime, CelDir) #fix: freq not used
#Do something with resulting Jones according to cmdline args
if freq == 0.:
if action == "plot":
plotAllJones(timespy, freqs, Jn)
else:
printAllJones(timespy, freqs, Jn)
else:
frqIdx = np.where(np.isclose(freqs,freq,atol=190e3))[0][0]
Jnf = Jn[frqIdx,:,:,:].squeeze()
if action == "plot":
plotJonesFreq(timespy, Jnf)
else:
printJonesFreq(timespy, Jnf)
|
# -*- coding: UTF-8 -*-
"""
fbchat
~~~~~~
Facebook Chat (Messenger) for Python
:copyright: (c) 2015 by Taehoon Kim.
:copyright: (c) 2015-2016 by PidgeyL.
:license: BSD, see LICENSE for more details.
"""
import requests
import logging
from uuid import uuid1
import warnings
from random import choice
from datetime import datetime
from bs4 import BeautifulSoup as bs
from mimetypes import guess_type
from .utils import *
from .models import *
from .stickers import *
import time
import sys
# Python 3 does not have raw_input, whereas Python 2 has and it's more secure
try:
input = raw_input
except NameError:
pass
# URLs
LoginURL ="https://m.facebook.com/login.php?login_attempt=1"
SearchURL ="https://www.facebook.com/ajax/typeahead/search.php"
SendURL ="https://www.facebook.com/messaging/send/"
ThreadsURL ="https://www.facebook.com/ajax/mercury/threadlist_info.php"
ThreadSyncURL="https://www.facebook.com/ajax/mercury/thread_sync.php"
MessagesURL ="https://www.facebook.com/ajax/mercury/thread_info.php"
ReadStatusURL="https://www.facebook.com/ajax/mercury/change_read_status.php"
DeliveredURL ="https://www.facebook.com/ajax/mercury/delivery_receipts.php"
MarkSeenURL ="https://www.facebook.com/ajax/mercury/mark_seen.php"
BaseURL ="https://www.facebook.com"
MobileURL ="https://m.facebook.com/"
StickyURL ="https://0-edge-chat.facebook.com/pull"
PingURL ="https://0-channel-proxy-06-ash2.facebook.com/active_ping"
UploadURL ="https://upload.facebook.com/ajax/mercury/upload.php"
UserInfoURL ="https://www.facebook.com/chat/user_info/"
ConnectURL ="https://www.facebook.com/ajax/add_friend/action.php?dpr=1"
RemoveUserURL="https://www.facebook.com/chat/remove_participants/"
LogoutURL ="https://www.facebook.com/logout.php"
AllUsersURL ="https://www.facebook.com/chat/user_info_all"
SaveDeviceURL="https://m.facebook.com/login/save-device/cancel/"
CheckpointURL="https://m.facebook.com/login/checkpoint/"
facebookEncoding = 'UTF-8'
# Log settings
log = logging.getLogger("client")
class Client(object):
"""A client for the Facebook Chat (Messenger).
See http://github.com/carpedm20/fbchat for complete
documentation for the API.
"""
def __init__(self, email, password, debug=True, info_log=True, user_agent=None, max_retries=5, do_login=True):
"""A client for the Facebook Chat (Messenger).
:param email: Facebook `email` or `id` or `phone number`
:param password: Facebook account password
import fbchat
chat = fbchat.Client(email, password)
"""
if do_login and not (email and password):
raise Exception("Email and password not found.")
self.is_def_recipient_set = False
self.debug = debug
self.sticky, self.pool = (None, None)
self._session = requests.session()
self.req_counter = 1
self.seq = "0"
self.payloadDefault={}
self.client = 'mercury'
self.listening = False
self.GENDERS = {
0: 'unknown',
1: 'female_singular',
2: 'male_singular',
3: 'female_singular_guess',
4: 'male_singular_guess',
5: 'mixed',
6: 'neuter_singular',
7: 'unknown_singular',
8: 'female_plural',
9: 'male_plural',
10: 'neuter_plural',
11: 'unknown_plural',
}
if not user_agent:
user_agent = choice(USER_AGENTS)
self._header = {
'Content-Type' : 'application/x-www-form-urlencoded',
'Referer' : BaseURL,
'Origin' : BaseURL,
'User-Agent' : user_agent,
'Connection' : 'keep-alive',
}
# Configure the logger differently based on the 'debug' parameter
if debug:
logging_level = logging.DEBUG
elif info_log:
logging_level = logging.INFO
else:
logging_level = logging.WARNING
# Creates the console handler
handler = logging.StreamHandler()
handler.setLevel(logging_level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
if do_login:
self.login(email, password, max_retries)
self.threads = []
def _console(self, msg):
"""Assumes an INFO level and log it.
This method shouldn't be used anymore.
Use the log itself:
>>> import logging
>>> from fbchat.client import log
>>> log.setLevel(logging.DEBUG)
You can do the same thing by adding the 'debug' argument:
>>> from fbchat import Client
>>> client = Client("...", "...", debug=True)
"""
warnings.warn(
"Client._console shouldn't be used. Use 'log.<level>'",
DeprecationWarning)
log.debug(msg)
def _setttstamp(self):
for i in self.fb_dtsg:
self.ttstamp += str(ord(i))
self.ttstamp += '2'
def _generatePayload(self, query):
"""Adds the following defaults to the payload:
__rev, __user, __a, ttstamp, fb_dtsg, __req
"""
payload = self.payloadDefault.copy()
if query:
payload.update(query)
payload['__req'] = str_base(self.req_counter, 36)
payload['seq'] = self.seq
self.req_counter += 1
return payload
def _get(self, url, query=None, timeout=30):
payload=self._generatePayload(query)
return self._session.get(url, headers=self._header, params=payload, timeout=timeout)
def _post(self, url, query=None, timeout=30):
payload=self._generatePayload(query)
return self._session.post(url, headers=self._header, data=payload, timeout=timeout)
def _cleanGet(self, url, query=None, timeout=30):
return self._session.get(url, headers=self._header, params=query, timeout=timeout)
def _cleanPost(self, url, query=None, timeout=30):
self.req_counter += 1
return self._session.post(url, headers=self._header, data=query, timeout=timeout)
def _postFile(self, url, files=None, timeout=30):
payload=self._generatePayload(None)
return self._session.post(url, data=payload, timeout=timeout, files=files)
def _post_login(self):
self.payloadDefault = {}
self.client_id = hex(int(random()*2147483648))[2:]
self.start_time = now()
self.uid = int(self._session.cookies['c_user'])
self.user_channel = "p_" + str(self.uid)
self.ttstamp = ''
r = self._get(BaseURL)
soup = bs(r.text, "lxml")
log.debug(r.text)
log.debug(r.url)
self.fb_dtsg = soup.find("input", {'name':'fb_dtsg'})['value']
self.fb_h = soup.find("input", {'name':'h'})['value']
self._setttstamp()
# Set default payload
self.payloadDefault['__rev'] = int(r.text.split('"revision":',1)[1].split(",",1)[0])
self.payloadDefault['__user'] = self.uid
self.payloadDefault['__a'] = '1'
self.payloadDefault['ttstamp'] = self.ttstamp
self.payloadDefault['fb_dtsg'] = self.fb_dtsg
self.form = {
'channel' : self.user_channel,
'partition' : '-2',
'clientid' : self.client_id,
'viewer_uid' : self.uid,
'uid' : self.uid,
'state' : 'active',
'format' : 'json',
'idle' : 0,
'cap' : '8'
}
self.prev = now()
self.tmp_prev = now()
self.last_sync = now()
def _login(self):
if not (self.email and self.password):
raise Exception("Email and password not found.")
soup = bs(self._get(MobileURL).text, "lxml")
data = dict((elem['name'], elem['value']) for elem in soup.findAll("input") if elem.has_attr('value') and elem.has_attr('name'))
data['email'] = self.email
data['pass'] = self.password
data['login'] = 'Log In'
r = self._cleanPost(LoginURL, data)
# Usually, 'Checkpoint' will refer to 2FA
if 'checkpoint' in r.url and 'Enter Security Code to Continue' in r.text:
r = self._2FA(r)
# Sometimes Facebook tries to show the user a "Save Device" dialog
if 'save-device' in r.url:
r = self._cleanGet(SaveDeviceURL)
if 'home' in r.url:
self._post_login()
return True
else:
return False
def _2FA(self, r):
soup = bs(r.text, "lxml")
data = dict()
s = input('Please enter your 2FA code --> ')
data['approvals_code'] = s
data['fb_dtsg'] = soup.find("input", {'name':'fb_dtsg'})['value']
data['nh'] = soup.find("input", {'name':'nh'})['value']
data['submit[Submit Code]'] = 'Submit Code'
data['codes_submitted'] = 0
log.info('Submitting 2FA code.')
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['approvals_code'])
del(data['submit[Submit Code]'])
del(data['codes_submitted'])
data['name_action_selected'] = 'save_device'
data['submit[Continue]'] = 'Continue'
log.info('Saving browser.') # At this stage, we have dtsg, nh, name_action_selected, submit[Continue]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['name_action_selected'])
log.info('Starting Facebook checkup flow.') # At this stage, we have dtsg, nh, submit[Continue]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['submit[Continue]'])
data['submit[This was me]'] = 'This Was Me'
log.info('Verifying login attempt.') # At this stage, we have dtsg, nh, submit[This was me]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['submit[This was me]'])
data['submit[Continue]'] = 'Continue'
data['name_action_selected'] = 'save_device'
log.info('Saving device again.') # At this stage, we have dtsg, nh, submit[Continue], name_action_selected
r = self._cleanPost(CheckpointURL, data)
return r
def saveSession(self, sessionfile):
"""Dumps the session cookies to (sessionfile).
WILL OVERWRITE ANY EXISTING FILE
:param sessionfile: location of saved session file
"""
log.info('Saving session')
with open(sessionfile, 'w') as f:
# Grab cookies from current session, and save them as JSON
f.write(json.dumps(self._session.cookies.get_dict(), ensure_ascii=False))
def loadSession(self, sessionfile):
"""Loads session cookies from (sessionfile)
:param sessionfile: location of saved session file
"""
log.info('Loading session')
with open(sessionfile, 'r') as f:
try:
j = json.load(f)
if not j or 'c_user' not in j:
return False
# Load cookies into current session
self._session.cookies = requests.cookies.merge_cookies(self._session.cookies, j)
self._post_login()
return True
except Exception as e:
raise Exception('Invalid json in {}, or bad merging of cookies'.format(sessionfile))
def login(self, email, password, max_retries=5):
# Logging in
log.info("Logging in...")
self.email = email
self.password = password
for i in range(1, max_retries+1):
if not self._login():
log.warning("Attempt #{} failed{}".format(i,{True:', retrying'}.get(i<5,'')))
time.sleep(1)
continue
else:
log.info("Login successful.")
break
else:
raise Exception("Login failed. Check email/password.")
def logout(self, timeout=30):
data = {
'ref': "mb",
'h': self.fb_h
}
payload=self._generatePayload(data)
r = self._session.get(LogoutURL, headers=self._header, params=payload, timeout=timeout)
# reset value
self.payloadDefault={}
self._session = requests.session()
self.req_counter = 1
self.seq = "0"
return r
def setDefaultRecipient(self, recipient_id, is_user=True):
"""Sets default recipient to send messages and images to.
:param recipient_id: the user id or thread id that you want to send a message to
:param is_user: determines if the recipient_id is for user or thread
"""
self.def_recipient_id = recipient_id
self.def_is_user = is_user
self.is_def_recipient_set = True
def _adapt_user_in_chat_to_user_model(self, user_in_chat):
""" Adapts user info from chat to User model acceptable initial dict
:param user_in_chat: user info from chat
'dir': None,
'mThumbSrcSmall': None,
'is_friend': False,
'is_nonfriend_messenger_contact': True,
'alternateName': '',
'i18nGender': 16777216,
'vanity': '',
'type': 'friend',
'searchTokens': ['Voznesenskij', 'Sergej'],
'thumbSrc': 'https://fb-s-b-a.akamaihd.net/h-ak-xfa1/v/t1.0-1/c9.0.32.32/p32x32/10354686_10150004552801856_220367501106153455_n.jpg?oh=71a87d76d4e4d17615a20c43fb8dbb47&oe=59118CE4&__gda__=1493753268_ae75cef40e9785398e744259ccffd7ff',
'mThumbSrcLarge': None,
'firstName': 'Sergej',
'name': 'Sergej Voznesenskij',
'uri': 'https://www.facebook.com/profile.php?id=100014812758264',
'id': '100014812758264',
'gender': 2
"""
return {
'type': 'user',
'uid': user_in_chat['id'],
'photo': user_in_chat['thumbSrc'],
'path': user_in_chat['uri'],
'text': user_in_chat['name'],
'score': '',
'data': user_in_chat,
}
def getAllUsers(self):
""" Gets all users from chat with info included """
data = {
'viewer': self.uid,
}
r = self._post(AllUsersURL, query=data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
if not j['payload']:
return None
payload = j['payload']
users = []
for k in payload.keys():
try:
user = self._adapt_user_in_chat_to_user_model(payload[k])
except KeyError:
continue
users.append(User(user))
return users
def getUsers(self, name):
"""Find and get user by his/her name
:param name: name of a person
"""
payload = {
'value' : name.lower(),
'viewer' : self.uid,
'rsp' : "search",
'context' : "search",
'path' : "/home.php",
'request_id' : str(uuid1()),
}
r = self._get(SearchURL, payload)
self.j = j = get_json(r.text)
users = []
for entry in j['payload']['entries']:
if entry['type'] == 'user':
users.append(User(entry))
return users # have bug TypeError: __repr__ returned non-string (type bytes)
def send(self, recipient_id=None, message=None, is_user=True, like=None, image_id=None, add_user_ids=None):
"""Send a message with given thread id
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param like: size of the like sticker you want to send
:param image_id: id for the image to send, gotten from the UploadURL
:param add_user_ids: a list of user ids to add to a chat
"""
if self.is_def_recipient_set:
recipient_id = self.def_recipient_id
is_user = self.def_is_user
elif recipient_id is None:
raise Exception('Recipient ID is not set.')
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
date = datetime.now()
data = {
'client': self.client,
'author' : 'fbid:' + str(self.uid),
'timestamp' : timestamp,
'timestamp_absolute' : 'Today',
'timestamp_relative' : str(date.hour) + ":" + str(date.minute).zfill(2),
'timestamp_time_passed' : '0',
'is_unread' : False,
'is_cleared' : False,
'is_forward' : False,
'is_filtered_content' : False,
'is_filtered_content_bh': False,
'is_filtered_content_account': False,
'is_filtered_content_quasar': False,
'is_filtered_content_invalid_app': False,
'is_spoof_warning' : False,
'source' : 'source:chat:web',
'source_tags[0]' : 'source:chat',
'html_body' : False,
'ui_push_phase' : 'V3',
'status' : '0',
'offline_threading_id':messageAndOTID,
'message_id' : messageAndOTID,
'threading_id':generateMessageID(self.client_id),
'ephemeral_ttl_mode:': '0',
'manual_retry_cnt' : '0',
'signatureID' : getSignatureID()
}
if is_user:
data["other_user_fbid"] = recipient_id
else:
data["thread_fbid"] = recipient_id
if add_user_ids:
data['action_type'] = 'ma-type:log-message'
# It's possible to add multiple users
for i, add_user_id in enumerate(add_user_ids):
data['log_message_data[added_participants][' + str(i) + ']'] = "fbid:" + str(add_user_id)
data['log_message_type'] = 'log:subscribe'
else:
data['action_type'] = 'ma-type:user-generated-message'
data['body'] = message
data['has_attachment'] = image_id is not None
data['specific_to_list[0]'] = 'fbid:' + str(recipient_id)
data['specific_to_list[1]'] = 'fbid:' + str(self.uid)
if image_id:
data['image_ids[0]'] = image_id
if like:
try:
sticker = LIKES[like.lower()]
except KeyError:
# if user doesn't enter l or m or s, then use the large one
sticker = LIKES['l']
data["sticker_id"] = sticker
r = self._post(SendURL, data)
if r.ok:
log.info('Message sent.')
else:
log.info('Message not sent.')
if isinstance(r._content, str) is False:
r._content = r._content.decode(facebookEncoding)
j = get_json(r._content)
if 'error' in j:
# 'errorDescription' is in the users own language!
log.warning('Error #{} when sending message: {}'.format(j['error'], j['errorDescription']))
return False
log.debug("Sending {}".format(r))
log.debug("With data {}".format(data))
return True
def sendRemoteImage(self, recipient_id=None, message=None, is_user=True, image=''):
"""Send an image from a URL
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param image: URL for an image to download and send
"""
mimetype = guess_type(image)[0]
remote_image = requests.get(image).content
image_id = self.uploadImage({'file': (image, remote_image, mimetype)})
return self.send(recipient_id, message, is_user, None, image_id)
def sendLocalImage(self, recipient_id=None, message=None, is_user=True, image=''):
"""Send an image from a file path
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param image: path to a local image to send
"""
mimetype = guess_type(image)[0]
image_id = self.uploadImage({'file': (image, open(image, 'rb'), mimetype)})
return self.send(recipient_id, message, is_user, None, image_id)
def uploadImage(self, image):
"""Upload an image and get the image_id for sending in a message
:param image: a tuple of (file name, data, mime type) to upload to facebook
"""
r = self._postFile(UploadURL, image)
if isinstance(r._content, str) is False:
r._content = r._content.decode(facebookEncoding)
# Strip the start and parse out the returned image_id
return json.loads(r._content[9:])['payload']['metadata'][0]['image_id']
def getThreadInfo(self, userID, last_n=20, start=None, is_user=True):
"""Get the info of one Thread
:param userID: ID of the user you want the messages from
:param last_n: (optional) number of retrieved messages from start
:param start: (optional) the start index of a thread (Deprecated)
:param is_user: (optional) determines if the userID is for user or thread
"""
assert last_n > 0, 'length must be positive integer, got %d' % last_n
assert start is None, '`start` is deprecated, always 0 offset querry is returned'
if is_user:
key = 'user_ids'
else:
key = 'thread_fbids'
# deprecated
# `start` doesn't matter, always returns from the last
# data['messages[{}][{}][offset]'.format(key, userID)] = start
data = {'messages[{}][{}][offset]'.format(key, userID): 0,
'messages[{}][{}][limit]'.format(key, userID): last_n,
'messages[{}][{}][timestamp]'.format(key, userID): now()}
r = self._post(MessagesURL, query=data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
if not j['payload']:
return None
messages = []
for message in j['payload']['actions']:
messages.append(Message(**message))
return list(reversed(messages))
def getThreadList(self, start, length=20):
"""Get thread list of your facebook account.
:param start: the start index of a thread
:param length: (optional) the length of a thread
"""
assert length < 21, '`length` is deprecated, max. last 20 threads are returned'
data = {
'client' : self.client,
'inbox[offset]' : start,
'inbox[limit]' : length,
}
r = self._post(ThreadsURL, data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
# Get names for people
participants = {}
try:
for participant in j['payload']['participants']:
participants[participant["fbid"]] = participant["name"]
except Exception as e:
log.warning(str(j))
# Prevent duplicates in self.threads
threadIDs = [getattr(x, "thread_id") for x in self.threads]
for thread in j['payload']['threads']:
if thread["thread_id"] not in threadIDs:
try:
thread["other_user_name"] = participants[int(thread["other_user_fbid"])]
except:
thread["other_user_name"] = ""
t = Thread(**thread)
self.threads.append(t)
return self.threads
def getUnread(self):
form = {
'client': 'mercury_sync',
'folders[0]': 'inbox',
'last_action_timestamp': now() - 60*1000
# 'last_action_timestamp': 0
}
r = self._post(ThreadSyncURL, form)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
result = {
"message_counts": j['payload']['message_counts'],
"unseen_threads": j['payload']['unseen_thread_ids']
}
return result
def markAsDelivered(self, userID, threadID):
data = {
"message_ids[0]": threadID,
"thread_ids[%s][0]" % userID: threadID
}
r = self._post(DeliveredURL, data)
return r.ok
def markAsRead(self, userID):
data = {
"watermarkTimestamp": now(),
"shouldSendReadReceipt": True,
"ids[%s]" % userID: True
}
r = self._post(ReadStatusURL, data)
return r.ok
def markAsSeen(self):
r = self._post(MarkSeenURL, {"seen_timestamp": 0})
return r.ok
def friend_connect(self, friend_id):
data = {
"to_friend": friend_id,
"action": "confirm"
}
r = self._post(ConnectURL, data)
return r.ok
def ping(self, sticky):
data = {
'channel': self.user_channel,
'clientid': self.client_id,
'partition': -2,
'cap': 0,
'uid': self.uid,
'sticky': sticky,
'viewer_uid': self.uid
}
r = self._get(PingURL, data)
return r.ok
def _getSticky(self):
"""Call pull api to get sticky and pool parameter,
newer api needs these parameter to work.
"""
data = {
"msgs_recv": 0,
"channel": self.user_channel,
"clientid": self.client_id
}
r = self._get(StickyURL, data)
j = get_json(r.text)
if 'lb_info' not in j:
raise Exception('Get sticky pool error')
sticky = j['lb_info']['sticky']
pool = j['lb_info']['pool']
return sticky, pool
def _pullMessage(self, sticky, pool):
"""Call pull api with seq value to get message data."""
data = {
"msgs_recv": 0,
"sticky_token": sticky,
"sticky_pool": pool,
"clientid": self.client_id,
}
r = self._get(StickyURL, data)
r.encoding = facebookEncoding
j = get_json(r.text)
self.seq = j.get('seq', '0')
return j
def _parseMessage(self, content):
"""Get message and author name from content.
May contains multiple messages in the content.
"""
if 'ms' not in content: return
log.debug("Received {}".format(content["ms"]))
for m in content['ms']:
try:
if m['type'] in ['m_messaging', 'messaging']:
if m['event'] in ['deliver']:
mid = m['message']['mid']
message = m['message']['body']
fbid = m['message']['sender_fbid']
name = m['message']['sender_name']
self.on_message(mid, fbid, name, message, m)
elif m['type'] in ['typ']:
self.on_typing(m.get("from"))
elif m['type'] in ['m_read_receipt']:
self.on_read(m.get('realtime_viewer_fbid'), m.get('reader'), m.get('time'))
elif m['type'] in ['inbox']:
viewer = m.get('realtime_viewer_fbid')
unseen = m.get('unseen')
unread = m.get('unread')
other_unseen = m.get('other_unseen')
other_unread = m.get('other_unread')
timestamp = m.get('seen_timestamp')
self.on_inbox(viewer, unseen, unread, other_unseen, other_unread, timestamp)
elif m['type'] in ['qprimer']:
self.on_qprimer(m.get('made'))
elif m['type'] in ['delta']:
if 'leftParticipantFbId' in m['delta']:
user_id = m['delta']['leftParticipantFbId']
actor_id = m['delta']['messageMetadata']['actorFbId']
thread_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
self.on_person_removed(user_id, actor_id, thread_id)
elif 'addedParticipants' in m['delta']:
user_ids = [x['userFbId'] for x in m['delta']['addedParticipants']]
actor_id = m['delta']['messageMetadata']['actorFbId']
thread_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
self.on_people_added(user_ids, actor_id, thread_id)
elif 'messageMetadata' in m['delta']:
recipient_id = 0
thread_type = None
if 'threadKey' in m['delta']['messageMetadata']:
if 'threadFbId' in m['delta']['messageMetadata']['threadKey']:
recipient_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
thread_type = 'group'
elif 'otherUserFbId' in m['delta']['messageMetadata']['threadKey']:
recipient_id = m['delta']['messageMetadata']['threadKey']['otherUserFbId']
thread_type = 'user'
mid = m['delta']['messageMetadata']['messageId']
message = m['delta'].get('body','')
fbid = m['delta']['messageMetadata']['actorFbId']
self.on_message_new(mid, fbid, message, m, recipient_id, thread_type)
elif m['type'] in ['jewel_requests_add']:
from_id = m['from']
self.on_friend_request(from_id)
else:
log.debug("Unknown type {}".format(m))
except Exception as e:
# ex_type, ex, tb = sys.exc_info()
self.on_message_error(sys.exc_info(), m)
def start_listening(self):
"""Start listening from an external event loop."""
self.listening = True
self.sticky, self.pool = self._getSticky()
def do_one_listen(self, markAlive=True):
"""Does one cycle of the listening loop.
This method is only useful if you want to control fbchat from an
external event loop."""
try:
if markAlive: self.ping(self.sticky)
try:
content = self._pullMessage(self.sticky, self.pool)
if content: self._parseMessage(content)
except requests.exceptions.RequestException as e:
pass
except KeyboardInterrupt:
self.listening = False
except requests.exceptions.Timeout:
pass
def stop_listening(self):
"""Cleans up the variables from start_listening."""
self.listening = False
self.sticky, self.pool = (None, None)
def listen(self, markAlive=True):
self.start_listening()
log.info("Listening...")
while self.listening:
self.do_one_listen(markAlive)
self.stop_listening()
def getUserInfo(self, *user_ids):
"""Get user info from id. Unordered.
:param user_ids: one or more user id(s) to query
"""
def fbidStrip(_fbid):
# Stripping of `fbid:` from author_id
if type(_fbid) == int:
return _fbid
if type(_fbid) == str and 'fbid:' in _fbid:
return int(_fbid[5:])
user_ids = [fbidStrip(uid) for uid in user_ids]
data = {"ids[{}]".format(i):uid for i,uid in enumerate(user_ids)}
r = self._post(UserInfoURL, data)
info = get_json(r.text)
full_data= [details for profile,details in info['payload']['profiles'].items()]
if len(full_data)==1:
full_data=full_data[0]
return full_data
def remove_user_from_chat(self, threadID, userID):
"""Remove user (userID) from group chat (threadID)
:param threadID: group chat id
:param userID: user id to remove from chat
"""
data = {
"uid" : userID,
"tid" : threadID
}
r = self._post(RemoveUserURL, data)
return r.ok
def add_users_to_chat(self, threadID, userID):
"""Add user (userID) to group chat (threadID)
:param threadID: group chat id
:param userID: user id to add to chat
"""
return self.send(threadID, is_user=False, add_user_ids=[userID])
def changeThreadTitle(self, threadID, newTitle):
"""Change title of a group conversation
:param threadID: group chat id
:param newTitle: new group chat title
"""
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
date = datetime.now()
data = {
'client' : self.client,
'action_type' : 'ma-type:log-message',
'author' : 'fbid:' + str(self.uid),
'thread_id' : '',
'author_email' : '',
'coordinates' : '',
'timestamp' : timestamp,
'timestamp_absolute' : 'Today',
'timestamp_relative' : str(date.hour) + ":" + str(date.minute).zfill(2),
'timestamp_time_passed' : '0',
'is_unread' : False,
'is_cleared' : False,
'is_forward' : False,
'is_filtered_content' : False,
'is_spoof_warning' : False,
'source' : 'source:chat:web',
'source_tags[0]' : 'source:chat',
'status' : '0',
'offline_threading_id' : messageAndOTID,
'message_id' : messageAndOTID,
'threading_id': generateMessageID(self.client_id),
'manual_retry_cnt' : '0',
'thread_fbid' : threadID,
'log_message_data[name]' : newTitle,
'log_message_type' : 'log:thread-name'
}
r = self._post(SendURL, data)
return r.ok
def on_message_new(self, mid, author_id, message, metadata, recipient_id, thread_type):
"""subclass Client and override this method to add custom behavior on event
This version of on_message recieves recipient_id and thread_type.
For backwards compatability, this data is sent directly to the old on_message.
"""
self.on_message(mid, author_id, None, message, metadata)
def on_message(self, mid, author_id, author_name, message, metadata):
"""subclass Client and override this method to add custom behavior on event"""
self.markAsDelivered(author_id, mid)
self.markAsRead(author_id)
log.info("%s said: %s" % (author_name, message))
def on_friend_request(self, from_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("Friend request from %s." % from_id)
def on_typing(self, author_id):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_read(self, author, reader, time):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_people_added(self, user_ids, actor_id, thread_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("User(s) {} was added to {} by {}".format(repr(user_ids), thread_id, actor_id))
def on_person_removed(self, user_id, actor_id, thread_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("User {} was removed from {} by {}".format(user_id, thread_id, actor_id))
def on_inbox(self, viewer, unseen, unread, other_unseen, other_unread, timestamp):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_message_error(self, exception, message):
"""subclass Client and override this method to add custom behavior on event"""
log.warning("Exception:\n{}".format(exception))
def on_qprimer(self, timestamp):
pass
Update client.py
# -*- coding: UTF-8 -*-
"""
fbchat
~~~~~~
Facebook Chat (Messenger) for Python
:copyright: (c) 2015 by Taehoon Kim.
:copyright: (c) 2015-2016 by PidgeyL.
:license: BSD, see LICENSE for more details.
"""
import requests
import logging
from uuid import uuid1
import warnings
from random import choice
from datetime import datetime
from bs4 import BeautifulSoup as bs
from mimetypes import guess_type
from .utils import *
from .models import *
from .stickers import *
import time
import sys
# Python 3 does not have raw_input, whereas Python 2 has and it's more secure
try:
input = raw_input
except NameError:
pass
# URLs
LoginURL ="https://m.facebook.com/login.php?login_attempt=1"
SearchURL ="https://www.facebook.com/ajax/typeahead/search.php"
SendURL ="https://www.facebook.com/messaging/send/"
ThreadsURL ="https://www.facebook.com/ajax/mercury/threadlist_info.php"
ThreadSyncURL="https://www.facebook.com/ajax/mercury/thread_sync.php"
MessagesURL ="https://www.facebook.com/ajax/mercury/thread_info.php"
ReadStatusURL="https://www.facebook.com/ajax/mercury/change_read_status.php"
DeliveredURL ="https://www.facebook.com/ajax/mercury/delivery_receipts.php"
MarkSeenURL ="https://www.facebook.com/ajax/mercury/mark_seen.php"
BaseURL ="https://www.facebook.com"
MobileURL ="https://m.facebook.com/"
StickyURL ="https://0-edge-chat.facebook.com/pull"
PingURL ="https://0-channel-proxy-06-ash2.facebook.com/active_ping"
UploadURL ="https://upload.facebook.com/ajax/mercury/upload.php"
UserInfoURL ="https://www.facebook.com/chat/user_info/"
ConnectURL ="https://www.facebook.com/ajax/add_friend/action.php?dpr=1"
RemoveUserURL="https://www.facebook.com/chat/remove_participants/"
LogoutURL ="https://www.facebook.com/logout.php"
AllUsersURL ="https://www.facebook.com/chat/user_info_all"
SaveDeviceURL="https://m.facebook.com/login/save-device/cancel/"
CheckpointURL="https://m.facebook.com/login/checkpoint/"
facebookEncoding = 'UTF-8'
# Log settings
log = logging.getLogger("client")
class Client(object):
"""A client for the Facebook Chat (Messenger).
See http://github.com/carpedm20/fbchat for complete
documentation for the API.
"""
def __init__(self, email, password, debug=True, info_log=True, user_agent=None, max_retries=5, do_login=True):
"""A client for the Facebook Chat (Messenger).
:param email: Facebook `email` or `id` or `phone number`
:param password: Facebook account password
import fbchat
chat = fbchat.Client(email, password)
"""
if do_login and not (email and password):
raise Exception("Email and password not found.")
self.is_def_recipient_set = False
self.debug = debug
self.sticky, self.pool = (None, None)
self._session = requests.session()
self.req_counter = 1
self.seq = "0"
self.payloadDefault={}
self.client = 'mercury'
self.listening = False
self.GENDERS = {
0: 'unknown',
1: 'female_singular',
2: 'male_singular',
3: 'female_singular_guess',
4: 'male_singular_guess',
5: 'mixed',
6: 'neuter_singular',
7: 'unknown_singular',
8: 'female_plural',
9: 'male_plural',
10: 'neuter_plural',
11: 'unknown_plural',
}
if not user_agent:
user_agent = choice(USER_AGENTS)
self._header = {
'Content-Type' : 'application/x-www-form-urlencoded',
'Referer' : BaseURL,
'Origin' : BaseURL,
'User-Agent' : user_agent,
'Connection' : 'keep-alive',
}
# Configure the logger differently based on the 'debug' parameter
if debug:
logging_level = logging.DEBUG
elif info_log:
logging_level = logging.INFO
else:
logging_level = logging.WARNING
# Creates the console handler
handler = logging.StreamHandler()
handler.setLevel(logging_level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
if do_login:
self.login(email, password, max_retries)
self.threads = []
def _console(self, msg):
"""Assumes an INFO level and log it.
This method shouldn't be used anymore.
Use the log itself:
>>> import logging
>>> from fbchat.client import log
>>> log.setLevel(logging.DEBUG)
You can do the same thing by adding the 'debug' argument:
>>> from fbchat import Client
>>> client = Client("...", "...", debug=True)
"""
warnings.warn(
"Client._console shouldn't be used. Use 'log.<level>'",
DeprecationWarning)
log.debug(msg)
def _setttstamp(self):
for i in self.fb_dtsg:
self.ttstamp += str(ord(i))
self.ttstamp += '2'
def _generatePayload(self, query):
"""Adds the following defaults to the payload:
__rev, __user, __a, ttstamp, fb_dtsg, __req
"""
payload = self.payloadDefault.copy()
if query:
payload.update(query)
payload['__req'] = str_base(self.req_counter, 36)
payload['seq'] = self.seq
self.req_counter += 1
return payload
def _get(self, url, query=None, timeout=30):
payload=self._generatePayload(query)
return self._session.get(url, headers=self._header, params=payload, timeout=timeout)
def _post(self, url, query=None, timeout=30):
payload=self._generatePayload(query)
return self._session.post(url, headers=self._header, data=payload, timeout=timeout)
def _cleanGet(self, url, query=None, timeout=30):
return self._session.get(url, headers=self._header, params=query, timeout=timeout)
def _cleanPost(self, url, query=None, timeout=30):
self.req_counter += 1
return self._session.post(url, headers=self._header, data=query, timeout=timeout)
def _postFile(self, url, files=None, timeout=30):
payload=self._generatePayload(None)
return self._session.post(url, data=payload, timeout=timeout, files=files)
def _post_login(self):
self.payloadDefault = {}
self.client_id = hex(int(random()*2147483648))[2:]
self.start_time = now()
self.uid = int(self._session.cookies['c_user'])
self.user_channel = "p_" + str(self.uid)
self.ttstamp = ''
r = self._get(BaseURL)
soup = bs(r.text, "lxml")
log.debug(r.text)
log.debug(r.url)
self.fb_dtsg = soup.find("input", {'name':'fb_dtsg'})['value']
self.fb_h = soup.find("input", {'name':'h'})['value']
self._setttstamp()
# Set default payload
self.payloadDefault['__rev'] = int(r.text.split('"revision":',1)[1].split(",",1)[0])
self.payloadDefault['__user'] = self.uid
self.payloadDefault['__a'] = '1'
self.payloadDefault['ttstamp'] = self.ttstamp
self.payloadDefault['fb_dtsg'] = self.fb_dtsg
self.form = {
'channel' : self.user_channel,
'partition' : '-2',
'clientid' : self.client_id,
'viewer_uid' : self.uid,
'uid' : self.uid,
'state' : 'active',
'format' : 'json',
'idle' : 0,
'cap' : '8'
}
self.prev = now()
self.tmp_prev = now()
self.last_sync = now()
def _login(self):
if not (self.email and self.password):
raise Exception("Email and password not found.")
soup = bs(self._get(MobileURL).text, "lxml")
data = dict((elem['name'], elem['value']) for elem in soup.findAll("input") if elem.has_attr('value') and elem.has_attr('name'))
data['email'] = self.email
data['pass'] = self.password
data['login'] = 'Log In'
r = self._cleanPost(LoginURL, data)
# Usually, 'Checkpoint' will refer to 2FA
if 'checkpoint' in r.url and 'Enter Security Code to Continue' in r.text:
r = self._2FA(r)
# Sometimes Facebook tries to show the user a "Save Device" dialog
if 'save-device' in r.url:
r = self._cleanGet(SaveDeviceURL)
if 'home' in r.url:
self._post_login()
return True
else:
return False
def _2FA(self, r):
soup = bs(r.text, "lxml")
data = dict()
s = input('Please enter your 2FA code --> ')
data['approvals_code'] = s
data['fb_dtsg'] = soup.find("input", {'name':'fb_dtsg'})['value']
data['nh'] = soup.find("input", {'name':'nh'})['value']
data['submit[Submit Code]'] = 'Submit Code'
data['codes_submitted'] = 0
log.info('Submitting 2FA code.')
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['approvals_code'])
del(data['submit[Submit Code]'])
del(data['codes_submitted'])
data['name_action_selected'] = 'save_device'
data['submit[Continue]'] = 'Continue'
log.info('Saving browser.') # At this stage, we have dtsg, nh, name_action_selected, submit[Continue]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['name_action_selected'])
log.info('Starting Facebook checkup flow.') # At this stage, we have dtsg, nh, submit[Continue]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['submit[Continue]'])
data['submit[This was me]'] = 'This Was Me'
log.info('Verifying login attempt.') # At this stage, we have dtsg, nh, submit[This was me]
r = self._cleanPost(CheckpointURL, data)
if 'home' in r.url:
return r
del(data['submit[This was me]'])
data['submit[Continue]'] = 'Continue'
data['name_action_selected'] = 'save_device'
log.info('Saving device again.') # At this stage, we have dtsg, nh, submit[Continue], name_action_selected
r = self._cleanPost(CheckpointURL, data)
return r
def saveSession(self, sessionfile):
"""Dumps the session cookies to (sessionfile).
WILL OVERWRITE ANY EXISTING FILE
:param sessionfile: location of saved session file
"""
log.info('Saving session')
with open(sessionfile, 'w') as f:
# Grab cookies from current session, and save them as JSON
f.write(json.dumps(self._session.cookies.get_dict(), ensure_ascii=False))
def loadSession(self, sessionfile):
"""Loads session cookies from (sessionfile)
:param sessionfile: location of saved session file
"""
log.info('Loading session')
with open(sessionfile, 'r') as f:
try:
j = json.load(f)
if not j or 'c_user' not in j:
return False
# Load cookies into current session
self._session.cookies = requests.cookies.merge_cookies(self._session.cookies, j)
self._post_login()
return True
except Exception as e:
raise Exception('Invalid json in {}, or bad merging of cookies'.format(sessionfile))
def login(self, email, password, max_retries=5):
# Logging in
log.info("Logging in...")
self.email = email
self.password = password
for i in range(1, max_retries+1):
if not self._login():
log.warning("Attempt #{} failed{}".format(i,{True:', retrying'}.get(i<5,'')))
time.sleep(1)
continue
else:
log.info("Login successful.")
break
else:
raise Exception("Login failed. Check email/password.")
def logout(self, timeout=30):
data = {
'ref': "mb",
'h': self.fb_h
}
payload=self._generatePayload(data)
r = self._session.get(LogoutURL, headers=self._header, params=payload, timeout=timeout)
# reset value
self.payloadDefault={}
self._session = requests.session()
self.req_counter = 1
self.seq = "0"
return r
def setDefaultRecipient(self, recipient_id, is_user=True):
"""Sets default recipient to send messages and images to.
:param recipient_id: the user id or thread id that you want to send a message to
:param is_user: determines if the recipient_id is for user or thread
"""
self.def_recipient_id = recipient_id
self.def_is_user = is_user
self.is_def_recipient_set = True
def _adapt_user_in_chat_to_user_model(self, user_in_chat):
""" Adapts user info from chat to User model acceptable initial dict
:param user_in_chat: user info from chat
'dir': None,
'mThumbSrcSmall': None,
'is_friend': False,
'is_nonfriend_messenger_contact': True,
'alternateName': '',
'i18nGender': 16777216,
'vanity': '',
'type': 'friend',
'searchTokens': ['Voznesenskij', 'Sergej'],
'thumbSrc': 'https://fb-s-b-a.akamaihd.net/h-ak-xfa1/v/t1.0-1/c9.0.32.32/p32x32/10354686_10150004552801856_220367501106153455_n.jpg?oh=71a87d76d4e4d17615a20c43fb8dbb47&oe=59118CE4&__gda__=1493753268_ae75cef40e9785398e744259ccffd7ff',
'mThumbSrcLarge': None,
'firstName': 'Sergej',
'name': 'Sergej Voznesenskij',
'uri': 'https://www.facebook.com/profile.php?id=100014812758264',
'id': '100014812758264',
'gender': 2
"""
return {
'type': 'user',
'uid': user_in_chat['id'],
'photo': user_in_chat['thumbSrc'],
'path': user_in_chat['uri'],
'text': user_in_chat['name'],
'score': '',
'data': user_in_chat,
}
def getAllUsers(self):
""" Gets all users from chat with info included """
data = {
'viewer': self.uid,
}
r = self._post(AllUsersURL, query=data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
if not j['payload']:
return None
payload = j['payload']
users = []
for k in payload.keys():
try:
user = self._adapt_user_in_chat_to_user_model(payload[k])
except KeyError:
continue
users.append(User(user))
return users
def getUsers(self, name):
"""Find and get user by his/her name
:param name: name of a person
"""
payload = {
'value' : name.lower(),
'viewer' : self.uid,
'rsp' : "search",
'context' : "search",
'path' : "/home.php",
'request_id' : str(uuid1()),
}
r = self._get(SearchURL, payload)
self.j = j = get_json(r.text)
users = []
for entry in j['payload']['entries']:
if entry['type'] == 'user':
users.append(User(entry))
return users # have bug TypeError: __repr__ returned non-string (type bytes)
def send(self, recipient_id=None, message=None, is_user=True, like=None, image_id=None, add_user_ids=None):
"""Send a message with given thread id
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param like: size of the like sticker you want to send
:param image_id: id for the image to send, gotten from the UploadURL
:param add_user_ids: a list of user ids to add to a chat
"""
if self.is_def_recipient_set:
recipient_id = self.def_recipient_id
is_user = self.def_is_user
elif recipient_id is None:
raise Exception('Recipient ID is not set.')
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
date = datetime.now()
data = {
'client': self.client,
'author' : 'fbid:' + str(self.uid),
'timestamp' : timestamp,
'timestamp_absolute' : 'Today',
'timestamp_relative' : str(date.hour) + ":" + str(date.minute).zfill(2),
'timestamp_time_passed' : '0',
'is_unread' : False,
'is_cleared' : False,
'is_forward' : False,
'is_filtered_content' : False,
'is_filtered_content_bh': False,
'is_filtered_content_account': False,
'is_filtered_content_quasar': False,
'is_filtered_content_invalid_app': False,
'is_spoof_warning' : False,
'source' : 'source:chat:web',
'source_tags[0]' : 'source:chat',
'html_body' : False,
'ui_push_phase' : 'V3',
'status' : '0',
'offline_threading_id':messageAndOTID,
'message_id' : messageAndOTID,
'threading_id':generateMessageID(self.client_id),
'ephemeral_ttl_mode:': '0',
'manual_retry_cnt' : '0',
'signatureID' : getSignatureID()
}
if is_user:
data["other_user_fbid"] = recipient_id
else:
data["thread_fbid"] = recipient_id
if add_user_ids:
data['action_type'] = 'ma-type:log-message'
# It's possible to add multiple users
for i, add_user_id in enumerate(add_user_ids):
data['log_message_data[added_participants][' + str(i) + ']'] = "fbid:" + str(add_user_id)
data['log_message_type'] = 'log:subscribe'
else:
data['action_type'] = 'ma-type:user-generated-message'
data['body'] = message
data['has_attachment'] = image_id is not None
data['specific_to_list[0]'] = 'fbid:' + str(recipient_id)
data['specific_to_list[1]'] = 'fbid:' + str(self.uid)
if image_id:
data['image_ids[0]'] = image_id
if like:
try:
sticker = LIKES[like.lower()]
except KeyError:
# if user doesn't enter l or m or s, then use the large one
sticker = LIKES['l']
data["sticker_id"] = sticker
r = self._post(SendURL, data)
if r.ok:
log.info('Message sent.')
else:
log.info('Message not sent.')
if isinstance(r._content, str) is False:
r._content = r._content.decode(facebookEncoding)
j = get_json(r._content)
if 'error' in j:
# 'errorDescription' is in the users own language!
log.warning('Error #{} when sending message: {}'.format(j['error'], j['errorDescription']))
return False
log.debug("Sending {}".format(r))
log.debug("With data {}".format(data))
return True
def sendRemoteImage(self, recipient_id=None, message=None, is_user=True, image=''):
"""Send an image from a URL
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param image: URL for an image to download and send
"""
mimetype = guess_type(image)[0]
remote_image = requests.get(image).content
image_id = self.uploadImage({'file': (image, remote_image, mimetype)})
return self.send(recipient_id, message, is_user, None, image_id)
def sendLocalImage(self, recipient_id=None, message=None, is_user=True, image=''):
"""Send an image from a file path
:param recipient_id: the user id or thread id that you want to send a message to
:param message: a text that you want to send
:param is_user: determines if the recipient_id is for user or thread
:param image: path to a local image to send
"""
mimetype = guess_type(image)[0]
image_id = self.uploadImage({'file': (image, open(image, 'rb'), mimetype)})
return self.send(recipient_id, message, is_user, None, image_id)
def uploadImage(self, image):
"""Upload an image and get the image_id for sending in a message
:param image: a tuple of (file name, data, mime type) to upload to facebook
"""
r = self._postFile(UploadURL, image)
if isinstance(r._content, str) is False:
r._content = r._content.decode(facebookEncoding)
# Strip the start and parse out the returned image_id
return json.loads(r._content[9:])['payload']['metadata'][0]['image_id']
def getThreadInfo(self, userID, last_n=20, start=None, is_user=True):
"""Get the info of one Thread
:param userID: ID of the user you want the messages from
:param last_n: (optional) number of retrieved messages from start
:param start: (optional) the start index of a thread (Deprecated)
:param is_user: (optional) determines if the userID is for user or thread
"""
assert last_n > 0, 'length must be positive integer, got %d' % last_n
assert start is None, '`start` is deprecated, always 0 offset querry is returned'
if is_user:
key = 'user_ids'
else:
key = 'thread_fbids'
# deprecated
# `start` doesn't matter, always returns from the last
# data['messages[{}][{}][offset]'.format(key, userID)] = start
data = {'messages[{}][{}][offset]'.format(key, userID): 0,
'messages[{}][{}][limit]'.format(key, userID): last_n - 1,
'messages[{}][{}][timestamp]'.format(key, userID): now()}
r = self._post(MessagesURL, query=data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
if not j['payload']:
return None
messages = []
for message in j['payload']['actions']:
messages.append(Message(**message))
return list(reversed(messages))
def getThreadList(self, start, length=20):
"""Get thread list of your facebook account.
:param start: the start index of a thread
:param length: (optional) the length of a thread
"""
assert length < 21, '`length` is deprecated, max. last 20 threads are returned'
data = {
'client' : self.client,
'inbox[offset]' : start,
'inbox[limit]' : length,
}
r = self._post(ThreadsURL, data)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
# Get names for people
participants = {}
try:
for participant in j['payload']['participants']:
participants[participant["fbid"]] = participant["name"]
except Exception as e:
log.warning(str(j))
# Prevent duplicates in self.threads
threadIDs = [getattr(x, "thread_id") for x in self.threads]
for thread in j['payload']['threads']:
if thread["thread_id"] not in threadIDs:
try:
thread["other_user_name"] = participants[int(thread["other_user_fbid"])]
except:
thread["other_user_name"] = ""
t = Thread(**thread)
self.threads.append(t)
return self.threads
def getUnread(self):
form = {
'client': 'mercury_sync',
'folders[0]': 'inbox',
'last_action_timestamp': now() - 60*1000
# 'last_action_timestamp': 0
}
r = self._post(ThreadSyncURL, form)
if not r.ok or len(r.text) == 0:
return None
j = get_json(r.text)
result = {
"message_counts": j['payload']['message_counts'],
"unseen_threads": j['payload']['unseen_thread_ids']
}
return result
def markAsDelivered(self, userID, threadID):
data = {
"message_ids[0]": threadID,
"thread_ids[%s][0]" % userID: threadID
}
r = self._post(DeliveredURL, data)
return r.ok
def markAsRead(self, userID):
data = {
"watermarkTimestamp": now(),
"shouldSendReadReceipt": True,
"ids[%s]" % userID: True
}
r = self._post(ReadStatusURL, data)
return r.ok
def markAsSeen(self):
r = self._post(MarkSeenURL, {"seen_timestamp": 0})
return r.ok
def friend_connect(self, friend_id):
data = {
"to_friend": friend_id,
"action": "confirm"
}
r = self._post(ConnectURL, data)
return r.ok
def ping(self, sticky):
data = {
'channel': self.user_channel,
'clientid': self.client_id,
'partition': -2,
'cap': 0,
'uid': self.uid,
'sticky': sticky,
'viewer_uid': self.uid
}
r = self._get(PingURL, data)
return r.ok
def _getSticky(self):
"""Call pull api to get sticky and pool parameter,
newer api needs these parameter to work.
"""
data = {
"msgs_recv": 0,
"channel": self.user_channel,
"clientid": self.client_id
}
r = self._get(StickyURL, data)
j = get_json(r.text)
if 'lb_info' not in j:
raise Exception('Get sticky pool error')
sticky = j['lb_info']['sticky']
pool = j['lb_info']['pool']
return sticky, pool
def _pullMessage(self, sticky, pool):
"""Call pull api with seq value to get message data."""
data = {
"msgs_recv": 0,
"sticky_token": sticky,
"sticky_pool": pool,
"clientid": self.client_id,
}
r = self._get(StickyURL, data)
r.encoding = facebookEncoding
j = get_json(r.text)
self.seq = j.get('seq', '0')
return j
def _parseMessage(self, content):
"""Get message and author name from content.
May contains multiple messages in the content.
"""
if 'ms' not in content: return
log.debug("Received {}".format(content["ms"]))
for m in content['ms']:
try:
if m['type'] in ['m_messaging', 'messaging']:
if m['event'] in ['deliver']:
mid = m['message']['mid']
message = m['message']['body']
fbid = m['message']['sender_fbid']
name = m['message']['sender_name']
self.on_message(mid, fbid, name, message, m)
elif m['type'] in ['typ']:
self.on_typing(m.get("from"))
elif m['type'] in ['m_read_receipt']:
self.on_read(m.get('realtime_viewer_fbid'), m.get('reader'), m.get('time'))
elif m['type'] in ['inbox']:
viewer = m.get('realtime_viewer_fbid')
unseen = m.get('unseen')
unread = m.get('unread')
other_unseen = m.get('other_unseen')
other_unread = m.get('other_unread')
timestamp = m.get('seen_timestamp')
self.on_inbox(viewer, unseen, unread, other_unseen, other_unread, timestamp)
elif m['type'] in ['qprimer']:
self.on_qprimer(m.get('made'))
elif m['type'] in ['delta']:
if 'leftParticipantFbId' in m['delta']:
user_id = m['delta']['leftParticipantFbId']
actor_id = m['delta']['messageMetadata']['actorFbId']
thread_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
self.on_person_removed(user_id, actor_id, thread_id)
elif 'addedParticipants' in m['delta']:
user_ids = [x['userFbId'] for x in m['delta']['addedParticipants']]
actor_id = m['delta']['messageMetadata']['actorFbId']
thread_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
self.on_people_added(user_ids, actor_id, thread_id)
elif 'messageMetadata' in m['delta']:
recipient_id = 0
thread_type = None
if 'threadKey' in m['delta']['messageMetadata']:
if 'threadFbId' in m['delta']['messageMetadata']['threadKey']:
recipient_id = m['delta']['messageMetadata']['threadKey']['threadFbId']
thread_type = 'group'
elif 'otherUserFbId' in m['delta']['messageMetadata']['threadKey']:
recipient_id = m['delta']['messageMetadata']['threadKey']['otherUserFbId']
thread_type = 'user'
mid = m['delta']['messageMetadata']['messageId']
message = m['delta'].get('body','')
fbid = m['delta']['messageMetadata']['actorFbId']
self.on_message_new(mid, fbid, message, m, recipient_id, thread_type)
elif m['type'] in ['jewel_requests_add']:
from_id = m['from']
self.on_friend_request(from_id)
else:
log.debug("Unknown type {}".format(m))
except Exception as e:
# ex_type, ex, tb = sys.exc_info()
self.on_message_error(sys.exc_info(), m)
def start_listening(self):
"""Start listening from an external event loop."""
self.listening = True
self.sticky, self.pool = self._getSticky()
def do_one_listen(self, markAlive=True):
"""Does one cycle of the listening loop.
This method is only useful if you want to control fbchat from an
external event loop."""
try:
if markAlive: self.ping(self.sticky)
try:
content = self._pullMessage(self.sticky, self.pool)
if content: self._parseMessage(content)
except requests.exceptions.RequestException as e:
pass
except KeyboardInterrupt:
self.listening = False
except requests.exceptions.Timeout:
pass
def stop_listening(self):
"""Cleans up the variables from start_listening."""
self.listening = False
self.sticky, self.pool = (None, None)
def listen(self, markAlive=True):
self.start_listening()
log.info("Listening...")
while self.listening:
self.do_one_listen(markAlive)
self.stop_listening()
def getUserInfo(self, *user_ids):
"""Get user info from id. Unordered.
:param user_ids: one or more user id(s) to query
"""
def fbidStrip(_fbid):
# Stripping of `fbid:` from author_id
if type(_fbid) == int:
return _fbid
if type(_fbid) == str and 'fbid:' in _fbid:
return int(_fbid[5:])
user_ids = [fbidStrip(uid) for uid in user_ids]
data = {"ids[{}]".format(i):uid for i,uid in enumerate(user_ids)}
r = self._post(UserInfoURL, data)
info = get_json(r.text)
full_data= [details for profile,details in info['payload']['profiles'].items()]
if len(full_data)==1:
full_data=full_data[0]
return full_data
def remove_user_from_chat(self, threadID, userID):
"""Remove user (userID) from group chat (threadID)
:param threadID: group chat id
:param userID: user id to remove from chat
"""
data = {
"uid" : userID,
"tid" : threadID
}
r = self._post(RemoveUserURL, data)
return r.ok
def add_users_to_chat(self, threadID, userID):
"""Add user (userID) to group chat (threadID)
:param threadID: group chat id
:param userID: user id to add to chat
"""
return self.send(threadID, is_user=False, add_user_ids=[userID])
def changeThreadTitle(self, threadID, newTitle):
"""Change title of a group conversation
:param threadID: group chat id
:param newTitle: new group chat title
"""
messageAndOTID = generateOfflineThreadingID()
timestamp = now()
date = datetime.now()
data = {
'client' : self.client,
'action_type' : 'ma-type:log-message',
'author' : 'fbid:' + str(self.uid),
'thread_id' : '',
'author_email' : '',
'coordinates' : '',
'timestamp' : timestamp,
'timestamp_absolute' : 'Today',
'timestamp_relative' : str(date.hour) + ":" + str(date.minute).zfill(2),
'timestamp_time_passed' : '0',
'is_unread' : False,
'is_cleared' : False,
'is_forward' : False,
'is_filtered_content' : False,
'is_spoof_warning' : False,
'source' : 'source:chat:web',
'source_tags[0]' : 'source:chat',
'status' : '0',
'offline_threading_id' : messageAndOTID,
'message_id' : messageAndOTID,
'threading_id': generateMessageID(self.client_id),
'manual_retry_cnt' : '0',
'thread_fbid' : threadID,
'log_message_data[name]' : newTitle,
'log_message_type' : 'log:thread-name'
}
r = self._post(SendURL, data)
return r.ok
def on_message_new(self, mid, author_id, message, metadata, recipient_id, thread_type):
"""subclass Client and override this method to add custom behavior on event
This version of on_message recieves recipient_id and thread_type.
For backwards compatability, this data is sent directly to the old on_message.
"""
self.on_message(mid, author_id, None, message, metadata)
def on_message(self, mid, author_id, author_name, message, metadata):
"""subclass Client and override this method to add custom behavior on event"""
self.markAsDelivered(author_id, mid)
self.markAsRead(author_id)
log.info("%s said: %s" % (author_name, message))
def on_friend_request(self, from_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("Friend request from %s." % from_id)
def on_typing(self, author_id):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_read(self, author, reader, time):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_people_added(self, user_ids, actor_id, thread_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("User(s) {} was added to {} by {}".format(repr(user_ids), thread_id, actor_id))
def on_person_removed(self, user_id, actor_id, thread_id):
"""subclass Client and override this method to add custom behavior on event"""
log.info("User {} was removed from {} by {}".format(user_id, thread_id, actor_id))
def on_inbox(self, viewer, unseen, unread, other_unseen, other_unread, timestamp):
"""subclass Client and override this method to add custom behavior on event"""
pass
def on_message_error(self, exception, message):
"""subclass Client and override this method to add custom behavior on event"""
log.warning("Exception:\n{}".format(exception))
def on_qprimer(self, timestamp):
pass
|
from django.http import Http404
try:
from django.views.generic.edit import UpdateView
except ImportError:
from cbv.views.edit import UpdateView
from .admin import TranslatableModelAdminMixin
from .forms import translatable_modelform_factory, TranslatableModelForm
from .utils import collect_context_modifiers
class TranslatableBaseView(UpdateView, TranslatableModelAdminMixin):
form_class = TranslatableModelForm
def filter_kwargs(self):
"""
ORM Lookup kwargs from URL pattern
Default {'pk': 'object_id'}
Syntax:
- {'model_attr': 'url_block_name'}
"""
if "slug" in self.kwargs:
return {self.slug_field: self.kwargs["slug"]}
return {'pk': self.kwargs['object_id']}
def get_form_class(self):
language = self._language(self.request)
return translatable_modelform_factory(language, self.model, form=self.form_class)
def get_queryset(self):
if self.queryset is None:
if self.model:
language = self._language(self.request)
return self.model._default_manager.language(language)
def _get_object(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
model = self.model
try:
obj = queryset.get(**self.filter_kwargs())
except self.model.DoesNotExist:
obj = None
if obj:
return obj
queryset = self.model.objects.untranslated()
try:
obj = queryset.get(**self.filter_kwargs())
except model.DoesNotExist:
return None
new_translation = model._meta.translations_model()
new_translation.language_code = self._language(self.request)
new_translation.master = obj
setattr(obj, model._meta.translations_cache, new_translation)
return obj
def context_modifier_languages_available(self, **kwargs):
context = {
'language_tabs': self.get_language_tabs(self.request, self.get_available_languages(self.object))
}
return context
def get_context_data(self, **kwargs):
context = super(TranslatableBaseView, self).get_context_data(**kwargs)
context.update(collect_context_modifiers(self, extra_kwargs=kwargs))
return context
class TranslatableCreateView(TranslatableBaseView, TranslatableModelAdminMixin):
"""
Untested, use with caution - or write tests if you see this :-)
"""
pass
class TranslatableUpdateView(TranslatableBaseView, TranslatableModelAdminMixin):
"""
A generic class based update view for translatable models.
"""
def get_object(self, queryset=None):
obj = self._get_object(queryset)
if not obj:
raise Http404("%s instance with arguments %s does not exist" % (self.model, self.filter_kwargs()))
return obj
Evaluating a queryset in a boolean context causes is to generate a query
from django.http import Http404
try:
from django.views.generic.edit import UpdateView
except ImportError:
from cbv.views.edit import UpdateView
from .admin import TranslatableModelAdminMixin
from .forms import translatable_modelform_factory, TranslatableModelForm
from .utils import collect_context_modifiers
class TranslatableBaseView(UpdateView, TranslatableModelAdminMixin):
form_class = TranslatableModelForm
def filter_kwargs(self):
"""
ORM Lookup kwargs from URL pattern
Default {'pk': 'object_id'}
Syntax:
- {'model_attr': 'url_block_name'}
"""
if "slug" in self.kwargs:
return {self.slug_field: self.kwargs["slug"]}
return {'pk': self.kwargs['object_id']}
def get_form_class(self):
language = self._language(self.request)
return translatable_modelform_factory(language, self.model, form=self.form_class)
def get_queryset(self):
if self.queryset is None:
if self.model:
language = self._language(self.request)
return self.model._default_manager.language(language)
def _get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
model = self.model
try:
obj = queryset.get(**self.filter_kwargs())
except self.model.DoesNotExist:
obj = None
if obj:
return obj
queryset = self.model.objects.untranslated()
try:
obj = queryset.get(**self.filter_kwargs())
except model.DoesNotExist:
return None
new_translation = model._meta.translations_model()
new_translation.language_code = self._language(self.request)
new_translation.master = obj
setattr(obj, model._meta.translations_cache, new_translation)
return obj
def context_modifier_languages_available(self, **kwargs):
context = {
'language_tabs': self.get_language_tabs(self.request, self.get_available_languages(self.object))
}
return context
def get_context_data(self, **kwargs):
context = super(TranslatableBaseView, self).get_context_data(**kwargs)
context.update(collect_context_modifiers(self, extra_kwargs=kwargs))
return context
class TranslatableCreateView(TranslatableBaseView, TranslatableModelAdminMixin):
"""
Untested, use with caution - or write tests if you see this :-)
"""
pass
class TranslatableUpdateView(TranslatableBaseView, TranslatableModelAdminMixin):
"""
A generic class based update view for translatable models.
"""
def get_object(self, queryset=None):
obj = self._get_object(queryset)
if not obj:
raise Http404("%s instance with arguments %s does not exist" % (self.model, self.filter_kwargs()))
return obj
|
#!/bin/python
class Phrase:
"""A phrase translation specific for a sentence."""
def __init__(self,s,t,e,score):
self.s = s
self.t = t
self.e = e
self.score = score
class TranslationModel:
"""A phrase translation model, wrapper for lm+pt+hyper params."""
def __init__(self, phrase_table, lm, dist_limit, dist_penalty):
self.phrase_table = phrase_table
self.lm = lm
self.dist_limit = dist_limit
self.dist_penalty = dist_penalty
class PhraseTable:
"""Stores the phrase table (the lexicon)."""
def __init__(self, filename):
self.table = dict() # of source -> list(translation,score)
self.min_score = 0
self.read_file(filename)
def read_file(self, filename):
"""Read the phrase table from the file."""
import gzip
import math
num_vals = 0
vals = []
with gzip.open(filename, 'rt') as f:
for line in f:
line = line.strip()
parts = line.split("|||")
assert len(parts) == 5
src = parts[0].strip().decode("utf-8").split(" ")
trg = parts[1].strip().decode("utf-8").split(" ")
feats = parts[4].strip().split(" ")
assert len(feats) == 5
vals = map(lambda x: float(x), feats)
score = 0.0
# P(f|e) -0.33
score += (-math.log(vals[0]) * -0.33)
# lex(f|e) -0.25
score += (-math.log(vals[1]) * -0.25)
# P(e|f) -1.0
score += (-math.log(vals[2]) * -1.0)
# lex(e|f) -0.35
score += (-math.log(vals[3]) * -0.35)
# bias -0.4
score += (-math.log(vals[4]) * -0.4)
if score < self.min_score:
self.min_score = score
# wordBonus 2.0
score += (len(trg) * 2.0)
key = tuple(src)
if key not in self.table:
self.table[key] = []
num_vals += 1
vals.append(score)
self.table[key].append((trg,score))
print("Phrase table read: {} keys, {} vals".format(len(self.table), num_vals))
min_s = min(vals)
max_s = max(vals)
avg_s = sum(vals)/len(vals)
print("Score dist, min: {}, max: {}, avg: {}".format(min_s, max_s, avg_s))
def check_phrase(self, phrase):
"""Check whether a foreign phrase exists in the table.
Returns a tuple containing whether the translations were found and the
list of the translations, i.e {(e,score)}"""
tp = tuple(phrase)
if tp in self.table:
return (True, self.table[tp])
else:
return (False, None)
def phrases(self, source_sent):
"""Return the set of valid phrases for the sentence."""
P = []
n = len(source_sent)
for s in xrange(n):
# include word dropping
# p = Phrase(s+1,s+1,[],self.min_score)
# P.append(p)
for t in xrange(s,n):
phrase = source_sent[s:t+1]
(present, trans) = self.check_phrase(phrase)
if present:
for tt in trans:
p = Phrase(s+1,t+1,tt[0],tt[1])
P.append(p)
elif t-s == 0:
# include word copying
p = Phrase(s+1,t+1,source_sent[s],self.min_score + 2.0)
P.append(p)
return P
if __name__ == "__main__":
pt = PhraseTable("data/phrasetable.txt.gz")
pt.phrases(["je", "m", "\'", "appelle"])
#pt.phrases(["ne", "vous", "en", "faites", "pas"])
fix bug when copying word
#!/bin/python
class Phrase:
"""A phrase translation specific for a sentence."""
def __init__(self,s,t,e,score):
self.s = s
self.t = t
self.e = e
self.score = score
class TranslationModel:
"""A phrase translation model, wrapper for lm+pt+hyper params."""
def __init__(self, phrase_table, lm, dist_limit, dist_penalty):
self.phrase_table = phrase_table
self.lm = lm
self.dist_limit = dist_limit
self.dist_penalty = dist_penalty
class PhraseTable:
"""Stores the phrase table (the lexicon)."""
def __init__(self, filename):
self.table = dict() # of source -> list(translation,score)
self.min_score = 0
self.read_file(filename)
def read_file(self, filename):
"""Read the phrase table from the file."""
import gzip
import math
num_vals = 0
vals = []
with gzip.open(filename, 'rt') as f:
for line in f:
line = line.strip()
parts = line.split("|||")
assert len(parts) == 5
src = parts[0].strip().decode("utf-8").split(" ")
trg = parts[1].strip().decode("utf-8").split(" ")
feats = parts[4].strip().split(" ")
assert len(feats) == 5
vals = map(lambda x: float(x), feats)
score = 0.0
# P(f|e) -0.33
score += (-math.log(vals[0]) * -0.33)
# lex(f|e) -0.25
score += (-math.log(vals[1]) * -0.25)
# P(e|f) -1.0
score += (-math.log(vals[2]) * -1.0)
# lex(e|f) -0.35
score += (-math.log(vals[3]) * -0.35)
# bias -0.4
score += (-math.log(vals[4]) * -0.4)
if score < self.min_score:
self.min_score = score
# wordBonus 2.0
score += (len(trg) * 2.0)
key = tuple(src)
if key not in self.table:
self.table[key] = []
num_vals += 1
vals.append(score)
self.table[key].append((trg,score))
print("Phrase table read: {} keys, {} vals".format(len(self.table), num_vals))
min_s = min(vals)
max_s = max(vals)
avg_s = sum(vals)/len(vals)
print("Score dist, min: {}, max: {}, avg: {}".format(min_s, max_s, avg_s))
def check_phrase(self, phrase):
"""Check whether a foreign phrase exists in the table.
Returns a tuple containing whether the translations were found and the
list of the translations, i.e {(e,score)}"""
tp = tuple(phrase)
if tp in self.table:
return (True, self.table[tp])
else:
return (False, None)
def phrases(self, source_sent):
"""Return the set of valid phrases for the sentence."""
P = []
n = len(source_sent)
for s in xrange(n):
# include word dropping
# p = Phrase(s+1,s+1,[],self.min_score)
# P.append(p)
for t in xrange(s,n):
phrase = source_sent[s:t+1]
(present, trans) = self.check_phrase(phrase)
if present:
for tt in trans:
p = Phrase(s+1,t+1,tt[0],tt[1])
P.append(p)
elif t-s == 0:
# include word copying
p = Phrase(s+1,t+1,[source_sent[s]],self.min_score + 2.0)
P.append(p)
return P
if __name__ == "__main__":
pt = PhraseTable("data/phrasetable.txt.gz")
pt.phrases(["je", "m", "\'", "appelle"])
#pt.phrases(["ne", "vous", "en", "faites", "pas"])
|
__all__ = ['camera']
from os import getenv
from flask import Flask
app = Flask(__name__)
config_file = getenv('GBOT_ROOT_DIR') + '/config/base.py'
app.config.from_pyfile(config_file)
from feed.routes import *
Update __init__.py
__all__ = ['camera']
from os import getenv
from flask import Flask, render_template, Response
app = Flask(__name__)
config_file = getenv('GBOT_ROOT_DIR') + '/config/base.py'
app.config.from_pyfile(config_file)
@app.route('/feed')
def feed():
if app.config['CAMERA_ENABLED']:
from feed.camera import generate_feed, Camera
return Response(generate_feed(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return str(), 200
|
"""Tests for sho1d.py"""
from sympy import *
from sympy.physics.quantum import *
from sympy.physics.quantum.qexpr import *
from sympy.physics.quantum.cartesian import *
from sympy.physics.quantum.sho1d import *
ap = RaisingOp('a')
am = LoweringOp('a')
k = SHOKet('k')
b = SHOBra('b')
H = Hamiltonian('H')
N = NumberOp('N')
w = Symbol('omega')
m = Symbol('m')
def test_ap():
assert adjoint(ap) == am
assert Commutator(ap, am).doit() == Integer(-1)
assert Commutator(ap, N).doit() == Integer(-1)*ap
assert qapply(ap*k) == sqrt(k.n + 1)*SHOKet(k.n + 1)
assert ap().rewrite('xp').doit() == \
(Integer(1)/sqrt(Integer(2)*hbar*m*w))*(Integer(-1)*I*Px + m*w*X)
def test_am():
assert adjoint(am) == ap
assert Commutator(am, ap).doit() == Integer(1)
assert Commutator(am, N).doit() == am
assert qapply(am*k) == sqrt(k.n)*SHOKet(k.n-Integer(1))
assert am().rewrite('xp').doit() == \
(Integer(1)/sqrt(Integer(2)*hbar*m*w))*(I*Px + m*w*X)
def test_k():
assert SHOKet('k').dual_class() == SHOBra
assert SHOBra('b').dual_class() == SHOKet
assert InnerProduct(b,k).doit() == KroneckerDelta(k.n, b.n)
def test_N():
assert Commutator(N, ap).doit() == ap
assert Commutator(N, am).doit() == Integer(-1)*am
assert Commutator(N, H).doit() == Integer(0)
assert qapply(N*k) == k.n*k
assert N().rewrite('a').doit() == ap*am
assert N().rewrite('H').doit() == H/(hbar*w) - Integer(1)/Integer(2)
def test_H():
assert Commutator(H, N).doit() == Integer(0)
assert qapply(H*k) == (hbar*w*(k.n + Integer(1)/Integer(2)))*k
assert H().rewrite('a').doit() == hbar*w*(ap*am + Integer(1)/Integer(2))
assert H().rewrite('am').doit() == hbar*w*(am*ap - Integer(1)/Integer(2))
assert H().rewrite('xp').doit() == \
(Integer(1)/(Integer(2)*m))*(Px**2 + (m*w*X)**2)
assert H().rewrite('n').doit() == hbar*w*(N + Integer(1)/Integer(2))
Added .expand() to the qapply assertions
"""Tests for sho1d.py"""
from sympy import *
from sympy.physics.quantum import *
from sympy.physics.quantum.qexpr import *
from sympy.physics.quantum.cartesian import *
from sympy.physics.quantum.sho1d import *
ad = RaisingOp('a')
a = LoweringOp('a')
k = SHOKet('k')
b = SHOBra('b')
H = Hamiltonian('H')
N = NumberOp('N')
omega = Symbol('omega')
m = Symbol('m')
def test_ad():
assert adjoint(ad) == a
assert Commutator(ad, a).doit() == Integer(-1)
assert Commutator(ad, N).doit() == Integer(-1)*ad
assert qapply(ad*k) == (sqrt(k.n + 1)*SHOKet(k.n + 1)).expand()
assert ad().rewrite('xp').doit() == \
(Integer(1)/sqrt(Integer(2)*hbar*m*omega))*(Integer(-1)*I*Px + m*omega*X)
def test_a():
assert adjoint(a) == ad
assert Commutator(a, ad).doit() == Integer(1)
assert Commutator(a, N).doit() == a
assert qapply(a*k) == (sqrt(k.n)*SHOKet(k.n-Integer(1))).expand()
assert a().rewrite('xp').doit() == \
(Integer(1)/sqrt(Integer(2)*hbar*m*omega))*(I*Px + m*omega*X)
def test_k():
assert SHOKet('k').dual_class() == SHOBra
assert SHOBra('b').dual_class() == SHOKet
assert InnerProduct(b,k).doit() == KroneckerDelta(k.n, b.n)
def test_N():
assert Commutator(N, ad).doit() == ad
assert Commutator(N, a).doit() == Integer(-1)*a
assert Commutator(N, H).doit() == Integer(0)
assert qapply(N*k) == (k.n*k).expand()
assert N().rewrite('a').doit() == ad*a
assert N().rewrite('H').doit() == H/(hbar*omega) - Integer(1)/Integer(2)
def test_H():
assert Commutator(H, N).doit() == Integer(0)
assert qapply(H*k) == ((hbar*omega*(k.n + Integer(1)/Integer(2)))*k).expand()
assert H().rewrite('a').doit() == hbar*omega*(ad*a + Integer(1)/Integer(2))
assert H().rewrite('xp').doit() == \
(Integer(1)/(Integer(2)*m))*(Px**2 + (m*omega*X)**2)
assert H().rewrite('n').doit() == hbar*omega*(N + Integer(1)/Integer(2))
|
#!/usr/bin/env python
"""
Import diagnostics. Run bin/diagnose_imports.py --help for details.
"""
from __future__ import print_function
if __name__ == "__main__":
import sys
import inspect
from sympy.core.compatibility import builtins
import optparse
from os.path import abspath, dirname, join, normpath
this_file = abspath(__file__)
sympy_dir = join(dirname(this_file), '..', '..', '..')
sympy_dir = normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
option_parser = optparse.OptionParser(
usage=
"Usage: %prog option [options]\n"
"\n"
"Import analysis for imports between SymPy modules.")
option_group = optparse.OptionGroup(
option_parser,
'Analysis options',
'Options that define what to do. Exactly one of these must be given.')
option_group.add_option(
'--problems',
help=
'Print all import problems, that is: '
'If an import pulls in a package instead of a module '
'(e.g. sympy.core instead of sympy.core.add); ' # see ##PACKAGE##
'if it imports a symbol that is already present; ' # see ##DUPLICATE##
'if it imports a symbol '
'from somewhere other than the defining module.', # see ##ORIGIN##
action='count')
option_group.add_option(
'--origins',
help=
'For each imported symbol in each module, '
'print the module that defined it. '
'(This is useful for import refactoring.)',
action='count')
option_parser.add_option_group(option_group)
option_group = optparse.OptionGroup(
option_parser,
'Sort options',
'These options define the sort order for output lines. '
'At most one of these options is allowed. '
'Unsorted output will reflect the order in which imports happened.')
option_group.add_option(
'--by-importer',
help='Sort output lines by name of importing module.',
action='count')
option_group.add_option(
'--by-origin',
help='Sort output lines by name of imported module.',
action='count')
option_parser.add_option_group(option_group)
(options, args) = option_parser.parse_args()
if args:
option_parser.error(
'Unexpected arguments %s (try %s --help)' % (args, sys.argv[0]))
if options.problems > 1:
option_parser.error('--problems must not be given more than once.')
if options.origins > 1:
option_parser.error('--origins must not be given more than once.')
if options.by_importer > 1:
option_parser.error('--by-importer must not be given more than once.')
if options.by_origin > 1:
option_parser.error('--by-origin must not be given more than once.')
options.problems = options.problems == 1
options.origins = options.origins == 1
options.by_importer = options.by_importer == 1
options.by_origin = options.by_origin == 1
if not options.problems and not options.origins:
option_parser.error(
'At least one of --problems and --origins is required')
if options.problems and options.origins:
option_parser.error(
'At most one of --problems and --origins is allowed')
if options.by_importer and options.by_origin:
option_parser.error(
'At most one of --by-importer and --by-origin is allowed')
options.by_process = not options.by_importer and not options.by_origin
builtin_import = builtins.__import__
class Definition(object):
"""Information about a symbol's definition."""
def __init__(self, name, value, definer):
self.name = name
self.value = value
self.definer = definer
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name and self.value == other.value
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Definition(%s, ..., %s)' % (
repr(self.name), repr(self.definer))
symbol_definers = {} # Maps each function/variable to name of module to define it
def in_module(a, b):
"""Is a the same module as or a submodule of b?"""
return a == b or a != None and b != None and a.startswith(b + '.')
def relevant(module):
"""Is module relevant for import checking?
Only imports between relevant modules will be checked."""
return in_module(module, 'sympy') and not in_module(module, 'sympy.mpmath')
sorted_messages = []
def msg(msg, *args):
global options, sorted_messages
if options.by_process:
print(msg % args)
else:
sorted_messages.append(msg % args)
def tracking_import(module, globals=globals(), locals=[], fromlist=None, level=-1):
"""__import__ wrapper - does not change imports at all, but tracks them.
Default order is implemented by doing output directly.
All other orders are implemented by collecting output information into
a sorted list that will be emitted after all imports are processed.
Indirect imports can only occur after the requested symbol has been
imported directly (because the indirect import would not have a module
to pick the symbol up from).
So this code detects indirect imports by checking whether the symbol in
question was already imported.
Keeps the semantics of __import__ unchanged."""
global options, symbol_definers
caller_frame = inspect.getframeinfo(sys._getframe(1))
importer_filename = caller_frame.filename
importer_module = globals['__name__']
if importer_filename == caller_frame.filename:
importer_reference = '%s line %s' % (
importer_filename, str(caller_frame.lineno))
else:
importer_reference = importer_filename
result = builtin_import(module, globals, locals, fromlist, level)
importee_module = result.__name__
# We're only interested if importer and importee are in SymPy
if relevant(importer_module) and relevant(importee_module):
for symbol in result.__dict__.iterkeys():
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
if not symbol_definers.has_key(definition):
symbol_definers[definition] = importee_module
if hasattr(result, '__path__'):
##PACKAGE##
# The existence of __path__ is documented in the tutorial on modules.
# Python 3.3 documents this in http://docs.python.org/3.3/reference/import.html
if options.by_origin:
msg('Error: %s (a package) is imported by %s',
module, importer_reference)
else:
msg('Error: %s contains package import %s',
importer_reference, module)
if fromlist != None:
symbol_list = fromlist
if '*' in symbol_list:
if (importer_filename.endswith('__init__.py')
or importer_filename.endswith('__init__.pyc')
or importer_filename.endswith('__init__.pyo')):
# We do not check starred imports inside __init__
# That's the normal "please copy over its imports to my namespace"
symbol_list = []
else:
symbol_list = result.__dict__.iterkeys()
for symbol in symbol_list:
if not symbol in result.__dict__:
if options.by_origin:
msg('Error: %s.%s is not defined (yet), but %s tries to import it',
importee_module, symbol, importer_reference)
else:
msg('Error: %s tries to import %s.%s, which did not define it (yet)',
importer_reference, importee_module, symbol)
else:
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
symbol_definer = symbol_definers[definition]
if symbol_definer == importee_module:
##DUPLICATE##
if options.by_origin:
msg('Error: %s.%s is imported again into %s',
importee_module, symbol, importer_reference)
else:
msg('Error: %s imports %s.%s again',
importer_reference, importee_module, symbol)
else:
##ORIGIN##
if options.by_origin:
msg('Error: %s.%s is imported by %s, which should import %s.%s instead',
importee_module, symbol, importer_reference, symbol_definer, symbol)
else:
msg('Error: %s imports %s.%s but should import %s.%s instead',
importer_reference, importee_module, symbol, symbol_definer, symbol)
return result
builtins.__import__ = tracking_import
__import__('sympy')
sorted_messages.sort()
for message in sorted_messages:
print(message)
Change dict.has_key(key) to key in dict
#!/usr/bin/env python
"""
Import diagnostics. Run bin/diagnose_imports.py --help for details.
"""
from __future__ import print_function
if __name__ == "__main__":
import sys
import inspect
from sympy.core.compatibility import builtins
import optparse
from os.path import abspath, dirname, join, normpath
this_file = abspath(__file__)
sympy_dir = join(dirname(this_file), '..', '..', '..')
sympy_dir = normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
option_parser = optparse.OptionParser(
usage=
"Usage: %prog option [options]\n"
"\n"
"Import analysis for imports between SymPy modules.")
option_group = optparse.OptionGroup(
option_parser,
'Analysis options',
'Options that define what to do. Exactly one of these must be given.')
option_group.add_option(
'--problems',
help=
'Print all import problems, that is: '
'If an import pulls in a package instead of a module '
'(e.g. sympy.core instead of sympy.core.add); ' # see ##PACKAGE##
'if it imports a symbol that is already present; ' # see ##DUPLICATE##
'if it imports a symbol '
'from somewhere other than the defining module.', # see ##ORIGIN##
action='count')
option_group.add_option(
'--origins',
help=
'For each imported symbol in each module, '
'print the module that defined it. '
'(This is useful for import refactoring.)',
action='count')
option_parser.add_option_group(option_group)
option_group = optparse.OptionGroup(
option_parser,
'Sort options',
'These options define the sort order for output lines. '
'At most one of these options is allowed. '
'Unsorted output will reflect the order in which imports happened.')
option_group.add_option(
'--by-importer',
help='Sort output lines by name of importing module.',
action='count')
option_group.add_option(
'--by-origin',
help='Sort output lines by name of imported module.',
action='count')
option_parser.add_option_group(option_group)
(options, args) = option_parser.parse_args()
if args:
option_parser.error(
'Unexpected arguments %s (try %s --help)' % (args, sys.argv[0]))
if options.problems > 1:
option_parser.error('--problems must not be given more than once.')
if options.origins > 1:
option_parser.error('--origins must not be given more than once.')
if options.by_importer > 1:
option_parser.error('--by-importer must not be given more than once.')
if options.by_origin > 1:
option_parser.error('--by-origin must not be given more than once.')
options.problems = options.problems == 1
options.origins = options.origins == 1
options.by_importer = options.by_importer == 1
options.by_origin = options.by_origin == 1
if not options.problems and not options.origins:
option_parser.error(
'At least one of --problems and --origins is required')
if options.problems and options.origins:
option_parser.error(
'At most one of --problems and --origins is allowed')
if options.by_importer and options.by_origin:
option_parser.error(
'At most one of --by-importer and --by-origin is allowed')
options.by_process = not options.by_importer and not options.by_origin
builtin_import = builtins.__import__
class Definition(object):
"""Information about a symbol's definition."""
def __init__(self, name, value, definer):
self.name = name
self.value = value
self.definer = definer
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name and self.value == other.value
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Definition(%s, ..., %s)' % (
repr(self.name), repr(self.definer))
symbol_definers = {} # Maps each function/variable to name of module to define it
def in_module(a, b):
"""Is a the same module as or a submodule of b?"""
return a == b or a != None and b != None and a.startswith(b + '.')
def relevant(module):
"""Is module relevant for import checking?
Only imports between relevant modules will be checked."""
return in_module(module, 'sympy') and not in_module(module, 'sympy.mpmath')
sorted_messages = []
def msg(msg, *args):
global options, sorted_messages
if options.by_process:
print(msg % args)
else:
sorted_messages.append(msg % args)
def tracking_import(module, globals=globals(), locals=[], fromlist=None, level=-1):
"""__import__ wrapper - does not change imports at all, but tracks them.
Default order is implemented by doing output directly.
All other orders are implemented by collecting output information into
a sorted list that will be emitted after all imports are processed.
Indirect imports can only occur after the requested symbol has been
imported directly (because the indirect import would not have a module
to pick the symbol up from).
So this code detects indirect imports by checking whether the symbol in
question was already imported.
Keeps the semantics of __import__ unchanged."""
global options, symbol_definers
caller_frame = inspect.getframeinfo(sys._getframe(1))
importer_filename = caller_frame.filename
importer_module = globals['__name__']
if importer_filename == caller_frame.filename:
importer_reference = '%s line %s' % (
importer_filename, str(caller_frame.lineno))
else:
importer_reference = importer_filename
result = builtin_import(module, globals, locals, fromlist, level)
importee_module = result.__name__
# We're only interested if importer and importee are in SymPy
if relevant(importer_module) and relevant(importee_module):
for symbol in result.__dict__.iterkeys():
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
if not definition in symbol_definers:
symbol_definers[definition] = importee_module
if hasattr(result, '__path__'):
##PACKAGE##
# The existence of __path__ is documented in the tutorial on modules.
# Python 3.3 documents this in http://docs.python.org/3.3/reference/import.html
if options.by_origin:
msg('Error: %s (a package) is imported by %s',
module, importer_reference)
else:
msg('Error: %s contains package import %s',
importer_reference, module)
if fromlist != None:
symbol_list = fromlist
if '*' in symbol_list:
if (importer_filename.endswith('__init__.py')
or importer_filename.endswith('__init__.pyc')
or importer_filename.endswith('__init__.pyo')):
# We do not check starred imports inside __init__
# That's the normal "please copy over its imports to my namespace"
symbol_list = []
else:
symbol_list = result.__dict__.iterkeys()
for symbol in symbol_list:
if not symbol in result.__dict__:
if options.by_origin:
msg('Error: %s.%s is not defined (yet), but %s tries to import it',
importee_module, symbol, importer_reference)
else:
msg('Error: %s tries to import %s.%s, which did not define it (yet)',
importer_reference, importee_module, symbol)
else:
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
symbol_definer = symbol_definers[definition]
if symbol_definer == importee_module:
##DUPLICATE##
if options.by_origin:
msg('Error: %s.%s is imported again into %s',
importee_module, symbol, importer_reference)
else:
msg('Error: %s imports %s.%s again',
importer_reference, importee_module, symbol)
else:
##ORIGIN##
if options.by_origin:
msg('Error: %s.%s is imported by %s, which should import %s.%s instead',
importee_module, symbol, importer_reference, symbol_definer, symbol)
else:
msg('Error: %s imports %s.%s but should import %s.%s instead',
importer_reference, importee_module, symbol, symbol_definer, symbol)
return result
builtins.__import__ = tracking_import
__import__('sympy')
sorted_messages.sort()
for message in sorted_messages:
print(message)
|
#!/usr/bin/env python
"""dp2ppgen
Usage:
dp2ppgen [options] <infile> [<outfile>]
dp2ppgen -h | --help
dp2ppgen --version
Translates pgdp.org formatted text files into ppgen syntax.
Examples:
dp2ppgen book.txt
dp2ppgen book.txt book-src.txt
Options:
-c, --chapters Convert chapter headings into ppgen style chapter headings.
-d, --dryrun Run through conversions but do not write out result.
-e, --sections Convert section headings into ppgen style section headings.
-f, --footnotes Convert footnotes into ppgen format.
--fndest=<fndest> Where to relocate footnotes (paragraphend, chapterend, bookend, inline).
--fixup Perform guiguts style fixup operations.
--force Ignore markup errors and force operation.
-j, --joinspanned Join hypenations (-* *-) and formatting markup (/* */ /# #/) that spans page breaks
-k, --keeporiginal On any conversion keep original text as a comment.
-p, --pages Convert page breaks into ppgen // 001.png style, add .pn statements and comment out [Blank Page] lines.
-q, --quiet Print less text.
-v, --verbose Print more text.
-h, --help Show help.
--utf8 Convert characters to UTF8
--version Show version.
"""
from docopt import docopt
import glob
import re
import os
import sys
import logging
VERSION="0.1.0" # MAJOR.MINOR.PATCH | http://semver.org
# Limited check for syntax errors in dp markup of input file
def validateDpMarkup( inBuf ):
# TODO, someone must have written a more thorough version of this already.. use that instead
logging.info("-- Checking input file for markup errors")
inBuf = removeTrailingSpaces(inBuf)
formattingStack = []
lineNum = 0
errorCount = 0
while lineNum < len(inBuf):
# Detect unbalanced out-of-line formatting markup /# #/ /* */
m = re.match(r"^\/(\*|\#)", inBuf[lineNum])
if m:
d = ({'ln':lineNum+1,'v':"/{}".format(m.group(1))})
formattingStack.append(d)
m = re.match(r"^(\*|\#)\/", inBuf[lineNum])
if m:
v = m.group(1)
if len(formattingStack) == 0 or formattingStack[-1]['v'] != "/{}".format(v):
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}/".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}/, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
# # Check balance of [], {}, (), <i></i>
# m = re.findall(r"(\[|\]|\{|\}|\(|\)|<\/?\w+>)", inBuf[lineNum])
# # Check balance of [], {}, <i></i>
# m = re.findall(r"(\[|\]|\{|\}|<\/?\w+>)", inBuf[lineNum])
# Check balance of [], <i></i>
m = re.findall(r"(\[|\]|<\/?\w+>)", inBuf[lineNum])
for v in m:
if v == "<tb>": # ignore
pass
elif v == "]": # closing markup
if len(formattingStack) == 0 or formattingStack[-1]['v'] != "[":
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
# elif v == "}": # closing markup
# if len(formattingStack) == 0 or formattingStack[-1]['v'] != "{":
# errorCount += 1
# if len(formattingStack) == 0:
# logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
# else:
# logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
# logging.debug("{}".format(formattingStack))
# else:
# formattingStack.pop()
# Disabled as this will get false positives from diacratic markup [)x] and won't affect conversion anyways
# if len(formattingStack) == 0 or formattingStack[-1]['v'] != "(":
# errorCount += 1
# if len(formattingStack) == 0:
# logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
# else:
# logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
# logging.debug("{}".format(formattingStack))
# else:
# formattingStack.pop()
elif "/" in v: # closing markup
v2 = re.sub("/","",v)
if len(formattingStack) == 0 or formattingStack[-1]['v'] != v2:
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
else:
d = ({'ln':lineNum+1,'v':v})
formattingStack.append(d)
# Check for specific issues that have caused conversion issues in the past
# Single line [Footnote] does not end at closing ]
# ex. [Footnote 1: Duine, <i>Saints de Domnonée</i>, pp. 5-12].
if re.match(r"\*?\[Footnote(.*)\]\*?.*$", inBuf[lineNum]):
if inBuf[lineNum].count('[') - inBuf[lineNum].count(']') == 0: # ignore multiline footnotes with proofer notes or some other [] markup within them
if not (inBuf[lineNum][-1] == ']' or inBuf[lineNum][-2:] == ']*'):
errorCount += 1
logging.error("Line {}: Extra characters found after closing ']' in [Footnote]\n {}".format(lineNum+1,inBuf[lineNum]))
# Extra text after out-of-line formatting markup
# ex. /*[**new stanza?]
if re.match(r"^(\/\*|\/\#|\*\/|\#\/).+", inBuf[lineNum]):
errorCount += 1
logging.error("Line {}: Extra text after out-of-line formatting markup\n {}".format(lineNum+1,inBuf[lineNum]))
lineNum += 1
# Chapters
# Sections
# Look for unresolved <i></i>, [], {}
if len(formattingStack) > 0:
errorCount += 1
logging.error("Reached end of file with unresolved formatting markup, (probably due to previous markup error(s))")
if errorCount == 1:
logging.error("Unresolved markup:")
s = "Line {}: '{}'".format(formattingStack[0]['ln'],formattingStack[0]['v'])
for v in formattingStack[1:]:
s += ", Line {}: '{}'".format(v['ln'],v['v'])
logging.error(s)
else:
logging.debug(formattingStack)
if errorCount > 0:
logging.info("-- Found {} markup errors".format(errorCount) )
return errorCount
# Format helper function, truncate to width and indicate truncation occured with ...
def truncate( string, width ):
if len(string) > width:
string = string[:width-3] + '...'
return string
# Removes trailing spaces and tabs from an array of strings
def removeTrailingSpaces( inBuf ):
outBuf = []
for line in inBuf:
outBuf.append(line.rstrip(" \t"))
return outBuf
# Replace : [Blank Page]
# with : // [Blank Page]
def processBlankPages( inBuf, keepOriginal ):
outBuf = []
lineNum = 0
count = 0
logging.info("-- Processing blank pages")
while lineNum < len(inBuf):
m = re.match(r"^\[Blank Page]", inBuf[lineNum])
if m:
if keepOriginal:
outBuf.append("// *** DP2PPGEN ORIGINAL: {}".format(inBuf[lineNum]))
outBuf.append("// [Blank Page]")
logging.debug("{:>{:d}}: '{}' to '{}'".format(str(lineNum+1),len(str(len(inBuf))),inBuf[lineNum],outBuf[-1]))
lineNum += 1
count += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Processed {} blank pages".format(count))
return outBuf;
# Replace : -----File: 001.png---\sparkleshine\swankypup\Kipling\SeaRose\Scholar\------
# with : // 001.png
def processPageNumbers( inBuf, keepOriginal ):
outBuf = []
lineNum = 0
count = 0
logging.info("-- Processing page numbers")
while lineNum < len(inBuf):
m = re.match(r"-----File: (\d+\.png|jpg|jpeg).*", inBuf[lineNum])
if m:
if keepOriginal:
outBuf.append("// *** DP2PPGEN ORIGINAL: {}".format(inBuf[lineNum]))
# outBuf.append("// {}".format(m.group(1)))
outBuf.append(".bn {}".format(m.group(1)))
outBuf.append(".pn +1")
logging.debug("{:>{:d}}: '{}' to '{}, {}'".format(str(lineNum+1),len(str(len(inBuf))),inBuf[lineNum],outBuf[-2],outBuf[-1]))
lineNum += 1
count += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Processed {} page numbers".format(count))
return outBuf;
def isLineBlank( line ):
return re.match(r"^\s*$", line)
def isLineComment( line ):
return re.match(r"^\/\/ *$", line)
def isLinePageBreak( line ):
return (parseScanPage != None)
def parseScanPage( line ):
scanPageNum = None
m = re.match(r"-----File: (\d+\.(png|jpg|jpeg)).*", line)
if m:
scanPageNum = m.group(1)
m = re.match(r"\/\/ (\d+\.(png|jpg|jpeg))", line)
if m:
scanPageNum = m.group(1)
m = re.match(r"\.bn (\d+\.(png|jpg|jpeg))", line)
if m:
scanPageNum = m.group(1)
return scanPageNum
def formatAsID( s ):
s = re.sub(r"<\/?\w+>", "", s) # Remove inline markup
s = re.sub(r" ", "_", s) # Replace spaces with underscore
s = re.sub(r"[^\w\s]", "", s) # Strip everything but alphanumeric and _
s = s.lower() # Lowercase
return s
def findNextEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and not isLineBlank(buf[lineNum]):
lineNum += 1
return lineNum
def findPreviousEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum >= 0 and not isLineBlank(buf[lineNum]):
lineNum -= 1
return lineNum
def findNextNonEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and isLineBlank(buf[lineNum]):
lineNum += 1
return lineNum
def findPreviousNonEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum >= 0 and isLineBlank(buf[lineNum]):
lineNum -= 1
return lineNum
# find previous line that contains original book text (ignore ppgen markup, proofing markup, blank lines)
def findPreviousLineOfText( buf, startLine ):
lineNum = findPreviousNonEmptyLine(buf, startLine)
while lineNum > 0 and re.match(r"[\.\*\#\/\[]", buf[lineNum]):
lineNum = findPreviousNonEmptyLine(buf, lineNum-1)
return lineNum
# find next line that contains original book text (ignore ppgen markup, proofing markup, blank lines)
def findNextLineOfText( buf, startLine ):
lineNum = findNextNonEmptyLine(buf, startLine)
while lineNum < len(buf)-1 and re.match(r"(\.[a-z0-9]{2} |[\*\#]\/|\/[\*\#]|\[\w+|\/\/)", buf[lineNum]):
lineNum = findNextNonEmptyLine(buf, lineNum+1)
return lineNum
def findNextChapter( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and not re.match(r"\.h2", buf[lineNum]):
lineNum += 1
return lineNum
def processHeadings( inBuf, doChapterHeadings, doSectionHeadings, keepOriginal ):
outBuf = []
lineNum = 0
consecutiveEmptyLineCount = 0
rewrapLevel = 0
foundChapterHeadingStart = False
chapterCount = 0
sectionCount = 0
if doChapterHeadings and doSectionHeadings:
logging.info("-- Processing chapter and section headings")
if doChapterHeadings:
logging.info("-- Processing chapter headings")
if doSectionHeadings:
logging.info("-- Processing section headings")
while lineNum < len(inBuf):
# Chapter heading blocks are in the form:
# (4 empty lines)
# chapter name
# can span more than one line
# (1 empty line)
# chapter description, opening quote, etc., 1 empty line seperating each
# ...
# (2 empty lines)
# Section heading blocks are in the form
# (2 empty lines)
# section name
# can span more than one line
# (1 empty line)
# Detect when inside out-of-line formatting block /# #/ /* */
if re.match(r"^\/\*", inBuf[lineNum]) or re.match(r"^\/\#", inBuf[lineNum]):
rewrapLevel += 1
elif re.match(r"^\*\/", inBuf[lineNum]) or re.match(r"^\#\/", inBuf[lineNum]):
rewrapLevel -= 1
# Chapter heading
if doChapterHeadings and consecutiveEmptyLineCount == 4 and not isLineBlank(inBuf[lineNum]) and rewrapLevel == 0:
inBlock = []
outBlock = []
foundChapterHeadingEnd = False;
consecutiveEmptyLineCount = 0;
# Copy chapter heading block to inBlock
while lineNum < len(inBuf) and not foundChapterHeadingEnd:
if isLineBlank(inBuf[lineNum]):
consecutiveEmptyLineCount += 1
if consecutiveEmptyLineCount == 2:
foundChapterHeadingEnd = True
consecutiveEmptyLineCount = 0
else:
consecutiveEmptyLineCount = 0
# chapters don't span pages
if isLinePageBreak(inBuf[lineNum]):
foundChapterHeadingEnd = True
if foundChapterHeadingEnd:
# Remove empty lines from end of chapter heading block
while isLineBlank(inBlock[-1]):
inBlock = inBlock[:-1]
# Rewind parser (to handle back to back chapter headings)
lineNum = findPreviousNonEmptyLine(inBuf, lineNum) + 1
else:
inBlock.append(inBuf[lineNum])
lineNum += 1
# Remove the four consecutive blank lines that preceeds chapter heading
outBuf = outBuf[:-4]
# .sp 4
# .h2 id=chapter_vi
# CHAPTER VI.||chapter description etc..
# .sp 2
chapterID = formatAsID(inBlock[0])
chapterLine = ""
for line in inBlock:
chapterLine += line
chapterLine += "|"
chapterLine = chapterLine[:-1]
outBlock.append("")
outBlock.append("// ******** DP2PPGEN GENERATED ****************************************")
outBlock.append(".sp 4")
outBlock.append(".h2 id={}".format(chapterID))
outBlock.append(chapterLine)
outBlock.append(".sp 2")
if keepOriginal:
# Write out original as a comment
outBlock.append(".ig // *** DP2PPGEN BEGIN ORIGINAL ***********************************")
outBlock.append("")
outBlock.append("")
outBlock.append("")
for line in inBlock:
outBlock.append(line)
outBlock.append(".ig- // *** END *****************************************************")
# Write out chapter heading block
for line in outBlock:
outBuf.append(line)
# Log action
logging.info("--- .h2 {}".format(chapterLine))
chapterCount += 1
# Section heading
elif doSectionHeadings and consecutiveEmptyLineCount == 2 and not isLineBlank(inBuf[lineNum]) and rewrapLevel == 0:
inBlock = []
outBlock = []
foundSectionHeadingEnd = False;
consecutiveEmptyLineCount = 0;
# Copy section heading block to inBlock
while lineNum < len(inBuf) and not foundSectionHeadingEnd:
if isLineBlank(inBuf[lineNum]):
foundSectionHeadingEnd = True
else:
inBlock.append(inBuf[lineNum])
lineNum += 1
# Remove two consecutive blank lines that preceed section heading
outBuf = outBuf[:-2]
# .sp 2
# .h3 id=section_i
# Section I.
# .sp 1
sectionID = formatAsID(inBlock[0])
sectionLine = ""
for line in inBlock:
sectionLine += line
sectionLine += "|"
sectionLine = sectionLine[:-1]
outBlock.append("// ******** DP2PPGEN GENERATED ****************************************")
outBlock.append(".sp 2")
outBlock.append(".h3 id={}".format(sectionID))
outBlock.append(sectionLine)
outBlock.append(".sp 1")
if keepOriginal:
# Write out original as a comment
outBlock.append(".ig // *** DP2PPGEN BEGIN ORIGINAL ***********************************")
outBlock.append("")
outBlock.append("")
for line in inBlock:
outBlock.append(line)
outBlock.append(".ig- // *** END *****************************************************")
# Write out chapter heading block
for line in outBlock:
outBuf.append(line)
# Log action
logging.info("--- .h3 {}".format(sectionID))
sectionCount += 1
else:
if isLineBlank(inBuf[lineNum]):
consecutiveEmptyLineCount += 1
else:
consecutiveEmptyLineCount = 0
outBuf.append(inBuf[lineNum])
lineNum += 1
if doChapterHeadings:
logging.info("-- Processed {} chapters".format(chapterCount))
if doSectionHeadings:
logging.info("-- Processed {} sections".format(sectionCount))
return outBuf;
def loadFile(fn):
inBuf = []
encoding = ""
if not os.path.isfile(fn):
logging.critical("specified file {} not found".format(fn))
exit(1)
if encoding == "":
try:
wbuf = open(fn, "r", encoding='ascii').read()
encoding = "ASCII" # we consider ASCII as a subset of Latin-1 for DP purposes
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
try:
wbuf = open(fn, "rU", encoding='UTF-8').read()
encoding = "utf_8"
inBuf = wbuf.split("\n")
# remove BOM on first line if present
t = ":".join("{0:x}".format(ord(c)) for c in inBuf[0])
if t[0:4] == 'feff':
inBuf[0] = inBuf[0][1:]
except:
pass
if encoding == "":
try:
wbuf = open(fn, "r", encoding='latin_1').read()
encoding = "latin_1"
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
fatal("Cannot determine input file decoding")
else:
# self.info("input file is: {}".format(encoding))
if encoding == "ASCII":
encoding = "latin_1" # handle ASCII as Latin-1 for DP purposes
for i in range(len(inBuf)):
inBuf[i] = inBuf[i].rstrip()
return inBuf;
def createOutputFileName( infile ):
# TODO make this smart.. is infile raw or ppgen source? maybe two functions needed
outfile = "{}-out.txt".format(infile.split('.')[0])
return outfile
def stripFootnoteMarkup( inBuf ):
outBuf = []
lineNum = 0
while lineNum < len(inBuf):
# copy inBuf to outBuf throwing away all footnote markup [Footnote...]
if re.match(r"[\*]*\[Footnote", inBuf[lineNum]):
while lineNum < len(inBuf) and not re.search(r"][\*]*$", inBuf[lineNum]):
lineNum += 1
lineNum += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
return outBuf
def parseFootnotes( inBuf ):
# parse footnotes into a list of dictionaries with the following properties for each entry
# startLine - line number of [Footnote start
# endLine - line number of last line of [Footnote] block
# fnBlock - list of lines containing full [Footnote:]
# fnText - list of lines containing footnote text
# paragraphEnd - line number of the blank line following the paragraph this footnote is located in
# chapterEnd - line number of the blank line following the last paragraph in the chapter this footnote is located in
# scanPageNumber - scan page this footnote is located on
footnotes = []
lineNum = 0
currentScanPage = 0;
logging.info("--- Parsing footnotes")
while lineNum < len(inBuf):
foundFootnote = False
# Keep track of active scanpage
if isLinePageBreak(inBuf[lineNum]):
currentScanPage = parseScanPage(inBuf[lineNum])
# logging.debug("Processing page "+currentScanPage)
if re.match(r"\*?\[Footnote", inBuf[lineNum]):
foundFootnote = True
if foundFootnote:
startLine = lineNum
# Copy footnote block
fnBlock = []
fnBlock.append(inBuf[lineNum])
while lineNum < len(inBuf)-1 and not re.search(r"][\*]*$", inBuf[lineNum]):
lineNum += 1
fnBlock.append(inBuf[lineNum])
endLine = lineNum
# Is footnote part of a multipage footnote?
needsJoining = False
if re.match(r"\*\[Footnote", fnBlock[0]) or re.search(r"\]\*$", fnBlock[-1]):
logging.debug("Footnote requires joining at line {}: {}".format(lineNum+1,inBuf[lineNum]))
needsJoining = True
foundFootnote = True
# Find end of paragraph
paragraphEnd = -1 # This must be done during footnote anchor processing as paragraph end is relative to anchor and not [Footnote] markup
# Find end of chapter (line after last line of last paragraph)
chapterEnd = -1 # This must be done during footnote anchor processing as chapter end is relative to anchor and not [Footnote] markup
# Extract footnote ID
m = re.search(r"^\[Footnote (\w{1,2}):", fnBlock[0])
if m:
fnID = m.group(1);
# Extract footnote text from [Footnote] block
fnText = []
for line in fnBlock:
line = re.sub(r"^\*\[Footnote: ", "", line)
line = re.sub(r"^\[Footnote [A-Z]: ", "", line)
line = re.sub(r"^\[Footnote \d+: ", "", line)
line = re.sub(r"][\*]*$", "", line)
fnText.append(line)
# Add entry
footnotes.append({'fnBlock':fnBlock, 'fnText':fnText, 'fnID':fnID, 'startLine':startLine, 'endLine':endLine, 'paragraphEnd':paragraphEnd, 'chapterEnd':chapterEnd, 'needsJoining':needsJoining, 'scanPageNum':currentScanPage})
lineNum += 1
logging.info("--- Parsed {} footnotes".format(len(footnotes)))
# print(footnotes)
# Join footnotes marked above during parsing
joinCount = 0
i = 0
while i < len(footnotes):
if footnotes[i]['needsJoining']:
if joinCount == 0:
logging.info("--- Joining footnotes")
# debug message
logging.debug("Merging footnote [{}]".format(i+1))
if len(footnotes[i]['fnBlock']) > 1:
logging.debug(" ScanPg {}: {} ... {} ".format(footnotes[i]['scanPageNum'], footnotes[i]['fnBlock'][0], footnotes[i]['fnBlock'][-1]))
else:
logging.debug(" ScanPg {}: {}".format(footnotes[i]['scanPageNum'], footnotes[i]['fnBlock'][0]))
if len(footnotes[i+1]['fnBlock']) > 1:
logging.debug(" ScanPg {}: {} ... {} ".format(footnotes[i+1]['scanPageNum'], footnotes[i+1]['fnBlock'][0], footnotes[i+1]['fnBlock'][-1]))
else:
logging.debug(" ScanPg {}: {}".format(footnotes[i+1]['scanPageNum'], footnotes[i+1]['fnBlock'][0]))
# TODO: can footnotes span more than two pages?
if not footnotes[i+1]['needsJoining']:
logging.error("Attempt to join footnote failed!")
logging.error("ScanPg {} Footnote {} ({}): {}".format(footnotes[i]['scanPageNum'], i,footnotes[i]['startLine']+1,footnotes[i]['fnBlock'][0]))
logging.error("ScanPg {} Footnote {} ({}): {}".format(footnotes[i+1]['scanPageNum'], i+1,footnotes[i+1]['startLine']+1,footnotes[i+1]['fnBlock'][0]))
else:
# merge fnBlock and fnText from second into first
footnotes[i]['fnBlock'].extend(footnotes[i+1]['fnBlock'])
footnotes[i]['fnText'].extend(footnotes[i+1]['fnText'])
footnotes[i]['needsJoining'] = False
del footnotes[i+1]
joinCount += 1
i += 1
if joinCount > 0:
logging.info("--- Merged {} broken footnote(s)".format(joinCount))
logging.info("--- {} total footnotes after joining".format(len(footnotes)))
return footnotes;
def processFootnoteAnchors( inBuf, footnotes ):
outBuf = inBuf
# process footnote anchors
fnAnchorCount = 0
lineNum = 0
currentScanPage = 0
currentScanPageLabel = ""
fnIDs = []
# r = []
logging.info("--- Processing footnote anchors")
while lineNum < len(outBuf):
# Keep track of active scanpage
if isLinePageBreak(outBuf[lineNum]):
anchorsThisPage = []
currentScanPage = parseScanPage(inBuf[lineNum])
currentScanPageLabel = re.sub(r"\/\/ ","", outBuf[lineNum])
# logging.debug("--- Processing page "+currentScanPage)
# Make list of footnotes found on this page
fnIDs = []
for fn in footnotes:
if fn['scanPageNum'] == currentScanPage:
fnIDs.append(fn['fnID'])
# Build regex for footnote anchors that can be found on this scanpage
# if len(fnIDs) > 0:
# r = "|".join(fnIDs)
# r = r"\[({})\]".format(r)
# print("{}: {}".format(lineNum,outBuf[lineNum]))
m = re.findall("\[([A-Za-z]|[0-9]{1,2})\]", outBuf[lineNum])
for anchor in m:
# Check that anchor found belongs to a footnote on this page
if not anchor in fnIDs:
logging.error("No matching footnote for anchor [{}] on scan page {} (line {} in output file):\n {}".format(anchor,currentScanPage,lineNum+1,outBuf[lineNum]))
logging.debug(fnIDs)
else:
# replace [1] or [A] with [n]
curAnchor = "\[{}\]".format(anchor)
logging.debug("curAnchor={} anchorsThisPage={}".format(curAnchor,anchorsThisPage))
if not curAnchor in anchorsThisPage:
fnAnchorCount += 1
anchorsThisPage.append(curAnchor)
newAnchor = "[{}]".format(fnAnchorCount)
#TODO: add option to use ppgen autonumber? [#].. unsure if good reason to do this, would hide footnote mismatch errors and increase ppgen project compile times
logging.debug("{:>5s}: ({}|{}) ... {} ...".format(newAnchor,lineNum+1,currentScanPageLabel,outBuf[lineNum]))
for line in footnotes[fnAnchorCount-1]['fnText']:
logging.debug(" {}".format(line))
# sanity check (anchor and footnote should be on same scan page)
if currentScanPage != footnotes[fnAnchorCount-1]['scanPageNum']:
logging.fatal("Anchor found on different scan page, anchor({}) and footnotes({}) may be out of sync".format(currentScanPage,footnotes[fnAnchorCount-1]['scanPageNum']))
exit(1)
# replace anchor
outBuf[lineNum] = re.sub(curAnchor, newAnchor, outBuf[lineNum])
# update paragraphEnd and chapterEnd so they are relative to anchor and not [Footnote
# Find end of paragraph
paragraphEnd = findNextEmptyLine(outBuf, lineNum)
footnotes[fnAnchorCount-1]['paragraphEnd'] = paragraphEnd
# Find end of chapter (line after last line of last paragraph)
chapterEnd = findNextChapter(outBuf, lineNum)
chapterEnd = findPreviousLineOfText(outBuf, chapterEnd) + 1
footnotes[fnAnchorCount-1]['chapterEnd'] = chapterEnd
lineNum += 1
logging.info("--- Processed {} footnote anchors".format(fnAnchorCount))
return outBuf, fnAnchorCount
def processFootnotes( inBuf, footnoteDestination, keepOriginal ):
outBuf = []
logging.info("-- Processing footnotes")
# strip empty lines before [Footnotes], *[Footnote
lineNum = 0
logging.info("--- Remove blank lines before [Footnotes]")
while lineNum < len(inBuf):
if re.match(r"\[Footnote", inBuf[lineNum]) or re.match(r"\*\[Footnote", inBuf[lineNum]):
# delete previous blank line(s)
while isLineBlank(outBuf[-1]):
del outBuf[-1]
outBuf.append(inBuf[lineNum])
lineNum += 1
inBuf = outBuf
# parse footnotes into list of dictionaries
footnotes = parseFootnotes(outBuf)
# strip [Footnote markup
outBuf = stripFootnoteMarkup(outBuf)
# find and markup footnote anchors
outBuf, fnAnchorCount = processFootnoteAnchors(outBuf, footnotes)
if len(footnotes) != fnAnchorCount:
logging.error("Footnote anchor count does not match footnote count")
if len(footnotes) > 0:
outBuf = generatePpgenFootnoteMarkup(outBuf, footnotes, footnoteDestination)
return outBuf
# Generate ppgen footnote markup
def generatePpgenFootnoteMarkup( inBuf, footnotes, footnoteDestination ):
outBuf = inBuf
if footnoteDestination == "bookend":
logging.info("--- Adding ppgen style footnotes to end of book")
fnMarkup = []
fnMarkup.append(".pb")
fnMarkup.append(".if t")
fnMarkup.append(".sp 4")
fnMarkup.append(".ce")
fnMarkup.append("FOOTNOTES:")
fnMarkup.append(".sp 2")
fnMarkup.append(".if-")
fnMarkup.append(".if h")
fnMarkup.append(".de div.footnotes { border: dashed 1px #aaaaaa; padding: 1.5em; }")
fnMarkup.append(".li")
fnMarkup.append('<div class="footnotes">')
fnMarkup.append(".li-")
fnMarkup.append(".ce")
fnMarkup.append("<xl>FOOTNOTES:</xl>")
fnMarkup.append(".sp 2") #TODO: current ppgen doesn't add space (pvs not applied to .fn I bet)
fnMarkup.append(".if-")
for i, fn in enumerate(footnotes):
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
fnMarkup.append(".if h")
fnMarkup.append(".li")
fnMarkup.append('</div>')
fnMarkup.append(".li-")
fnMarkup.append(".if-")
outBuf.extend(fnMarkup)
elif footnoteDestination == "chapterend":
logging.info("--- Adding ppgen style footnotes to end of chapters")
curChapterEnd = footnotes[-1]['chapterEnd']
fnMarkup = []
for i, fn in reversed(list(enumerate(footnotes))):
if curChapterEnd != fn['chapterEnd']:
# finish off last group
outBuf.insert(curChapterEnd, ".fm")
curChapterEnd = fn['chapterEnd']
# build markup for this footnote
# print("{} {}".format(fn['chapterEnd'],fn['fnText'][0]))
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
# insert it
outBuf[curChapterEnd:curChapterEnd] = fnMarkup
fnMarkup = []
# finish off last group
outBuf.insert(curChapterEnd, ".fm")
elif footnoteDestination == "paragraphend":
logging.info("--- Adding ppgen style footnotes to end of paragraphs")
curParagraphEnd = footnotes[-1]['paragraphEnd']
fnMarkup = []
for i, fn in reversed(list(enumerate(footnotes))):
if curParagraphEnd != fn['paragraphEnd']:
# finish off last group
outBuf.insert(curParagraphEnd, ".fm")
curParagraphEnd = fn['paragraphEnd']
# build markup for this footnote
# print("{} {}".format(fn['paragraphEnd'],fn['fnText'][0]))
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
# insert it
outBuf[curParagraphEnd:curParagraphEnd] = fnMarkup
fnMarkup = []
# finish off last group
outBuf.insert(curParagraphEnd, ".fm")
return outBuf
def joinSpannedFormatting( inBuf, keepOriginal ):
outBuf = []
logging.info("-- Joining spanned out-of-line formatting markup")
# Find:
# 1: */
# 2: // 010.png
# 3:
# 4: /*
# Replace with:
# 2: // 010.png
# 3:
lineNum = 0
joinCount = 0
while lineNum < len(inBuf):
joinWasMade = False
m = re.match(r"^(\*\/|\#\/)$", inBuf[lineNum])
if m:
outBlock = []
ln = lineNum + 1
joinEndLineRegex = r"^\/\{}$".format(m.group(1)[0])
while ln < len(inBuf) and isLineBlank(inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
if ln < len(inBuf) and isLinePageBreak(inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
while ln < len(inBuf)-1 and isLineBlank(inBuf[ln]) or re.match(r".pn",inBuf[ln]) or re.match(r"\/\/",inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
if re.match(joinEndLineRegex, inBuf[ln]):
for line in outBlock:
outBuf.append(line)
joinWasMade = True
joinCount += 1
logging.debug("Lines {}, {}: Joined spanned markup /{} {}/".format(lineNum,ln,m.group(1)[0],m.group(1)[0]))
lineNum = ln + 1
if not joinWasMade:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Joined {} instances of spanned out-of-line formatting markup".format(joinCount))
return outBuf
def joinSpannedHyphenations( inBuf, keepOriginal ):
outBuf = []
logging.info("-- Joining spanned hyphenations")
# Find:
# 1: the last word on this line is cont-*
# 2: // 010.png
# 3: *-inued. on the line below
# Replace with:
# 1: the last word on this line is cont-**inued.
# 2: // 010.png
# 3: on the line below
lineNum = 0
joinCount = 0
while lineNum < len(inBuf):
joinWasMade = False
if re.search(r"\-\*$", inBuf[lineNum]) and isLinePageBreak(inBuf[lineNum+1]):
ln = findNextLineOfText(inBuf,lineNum+1)
if inBuf[ln][0] == '*':
# Remove first word from last line (secondPart) and join append it to first line
# secondPart = (inBuf[ln].split(' ',1)[0])[1:] # strip first word with leading * removed
secondPart = inBuf[ln].split(' ',1)[0]
inBuf[ln] = inBuf[ln].split(' ',1)[1]
inBuf[lineNum] = inBuf[lineNum] + secondPart
logging.debug("Line {}: Resolved hyphenation, ... '{}'".format(lineNum+1,inBuf[lineNum][-30:]))
# logging.info("Line {}: Resolved hyphenation\n '{}'".format(lineNum+1,inBuf[lineNum]))
joinCount += 1
else:
logging.error("Line {}: Unresolved hyphenation\n {}\n {}".format(lineNum+1,inBuf[lineNum],inBuf[ln]))
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Joined {} instances of spanned hyphenations".format(joinCount))
return outBuf
def tabsToSpaces( inBuf, tabSize ):
outBuf = []
for line in inBuf:
spaces = " " * tabSize
line = line.replace("\t", spaces)
outBuf.append(line)
return outBuf
def convertUTF8( inBuf ):
outBuf = []
for line in inBuf:
if not isLinePageBreak(line):
# -- becomes a unicode mdash, ---- becomes 2 unicode mdashes
line = re.sub(r"(?<!-)-{2}(?!-)","—", line)
line = re.sub(r"(?<!-)-{4}(?!-)","——", line)
if "--" in line:
logging.warn("Unconverted dashes: {}".format(line))
# [oe] becomes œ
# [OE] becomes Œ
line = line.replace("[oe]", "œ")
line = line.replace("[OE]", "Œ")
outBuf.append(line)
# Fractions?
return outBuf
def convertThoughtBreaks( inBuf ):
outBuf = []
for line in inBuf:
# <tb> to .tb
line = re.sub(r"^<tb>$",".tb", line)
outBuf.append(line)
return outBuf
def removeBlankLinesAtPageEnds( inBuf ):
outBuf = []
for line in inBuf:
if isLinePageBreak(line):
while outBuf and isLineBlank(outBuf[-1]):
outBuf.pop()
outBuf.append(line)
return outBuf
# TODO: Make this a tool in itself?
def fixup( inBuf, keepOriginal ):
# • Remove spaces at end of line.
# • Remove blank lines at end of pages.
# • Remove spaces on either side of hyphens.
# • Remove space before periods.
# • Remove space before exclamation points.
# • Remove space before question marks.
# • Remove space before commas.
# • Remove space before semicolons.
# • Remove space before colons.
# • Remove space after opening and before closing brackets. () [] {}
# • Remove space after open angle quote and before close angle quote.
# • Remove space after beginning and before ending double quote.
# • Ensure space before ellipses except after period.
# • Format any line that contains only 5 *s and whitespace to be the standard 5 asterisk thought break.
# • Convert multiple space to single space.
# • Fix obvious l<-->1 problems.
# You can also specify whether to skip text inside the /* */ markers or not.
outBuf = inBuf
outBuf = tabsToSpaces(outBuf, 4)
outBuf = removeTrailingSpaces(outBuf)
outBuf = convertThoughtBreaks(outBuf)
outBuf = removeBlankLinesAtPageEnds(outBuf)
# outBuf = removeExtraSpaces(outBuf)
return outBuf
#TODO: Full guiguts fixit seems error prone.. maybe only do safe defaults or break off into seperate tool with each setting configurable, does gutsweeper do this already?
#def removeExtraSpaces( inBuf ):
# • Remove spaces on either side of hyphens.
# • Remove space before periods.
# • Remove space before exclamation points.
# • Remove space before question marks.
# • Remove space before commas.
# • Remove space before semicolons.
# • Remove space before colons.
# • Remove space after opening and before closing brackets. () [] {}
# • Remove space after open angle quote and before close angle quote.
# • Remove space after beginning and before ending double quote.
# • Ensure space before ellipses except after period.
# rewrapLevel = 0
# for line in inBuf:
# # Detect when inside out-of-line formatting block /# #/ /* */
# if re.match(r"^\/[\*\#]", inBuf[lineNum]):
# rewrapLevel += 1
# elif re.match(r"^[\*\#]\/", inBuf[lineNum]):
# rewrapLevel -= 1
#
# if rewrapLevel == 0:
# # Remove multiple spaces
# # $line =~ s/(?<=\S)\s\s+(?=\S)/
# line = re.sub(r"(?<=\S)\s\s+(?=\S)","", line)
#
# # Remove spaces on either side of hyphens.
# # Remove spaces before hyphen (only if hyphen isn't first on line, like poetry)
# # $line =~ s/(\S) +-/$1-/g;
# line = re.sub(r"(\S) +-","\1-", line)
#
# # Remove space after hyphen
# # $line =~ s/- /-/g;
# line = re.sub(r"- ","-", line)
#
# # Except leave a space after a string of three or more hyphens
# # $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# line = re.sub(r'(?<!-)(-*---)(?=[^\s\\"F-])',"\1", line)
#
# outBuf.append(line)
#
# return outBuf
#
# $edited++ if $line =~ s/- /-/g; # Remove space after hyphen
# $edited++
# if $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# ; # Except leave a space after a string of three or more hyphens
#
#
#
# if ( ${ $::lglobal{fixopt} }[1] ) {
# ; # Remove spaces before hyphen (only if hyphen isn't first on line, like poetry)
# $edited++ if $line =~ s/(\S) +-/$1-/g;
# $edited++ if $line =~ s/- /-/g; # Remove space after hyphen
# $edited++
# if $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# ; # Except leave a space after a string of three or more hyphens
# }
# if ( ${ $::lglobal{fixopt} }[3] ) {
# ; # Remove space before periods (only if not first on line, like poetry's ellipses)
# $edited++ if $line =~ s/(\S) +\.(?=\D)/$1\./g;
# }
# ; # Get rid of space before periods
# if ( ${ $::lglobal{fixopt} }[4] ) {
# $edited++
# if $line =~ s/ +!/!/g;
# }
# ; # Get rid of space before exclamation points
# if ( ${ $::lglobal{fixopt} }[5] ) {
# $edited++
# if $line =~ s/ +\?/\?/g;
# }
# ; # Get rid of space before question marks
# if ( ${ $::lglobal{fixopt} }[6] ) {
# $edited++
# if $line =~ s/ +\;/\;/g;
# }
# ; # Get rid of space before semicolons
# if ( ${ $::lglobal{fixopt} }[7] ) {
# $edited++
# if $line =~ s/ +:/:/g;
# }
# ; # Get rid of space before colons
# if ( ${ $::lglobal{fixopt} }[8] ) {
# $edited++
# if $line =~ s/ +,/,/g;
# }
# ; # Get rid of space before commas
# # FIXME way to go on managing quotes
# if ( ${ $::lglobal{fixopt} }[9] ) {
# $edited++
# if $line =~ s/^\" +/\"/
# ; # Remove space after doublequote if it is the first character on a line
# $edited++
# if $line =~ s/ +\"$/\"/
# ; # Remove space before doublequote if it is the last character on a line
# }
# if ( ${ $::lglobal{fixopt} }[10] ) {
# $edited++
# if $line =~ s/(?<=(\(|\{|\[)) //g
# ; # Get rid of space after opening brackets
# $edited++
# if $line =~ s/ (?=(\)|\}|\]))//g
# ; # Get rid of space before closing brackets
# }
# ; # FIXME format to standard thought breaks - changed to <tb>
# if ( ${ $::lglobal{fixopt} }[11] ) {
# $edited++
#
# # if $line =~
# # s/^\s*(\*\s*){5}$/ \* \* \* \* \*\n/;
# if $line =~ s/^\s*(\*\s*){4,}$/<tb>\n/;
# }
# $edited++ if ( $line =~ s/ +$// );
# ; # Fix llth, lst
# if ( ${ $::lglobal{fixopt} }[12] ) {
# $edited++ if $line =~ s/llth/11th/g;
# $edited++ if $line =~ s/(?<=\d)lst/1st/g;
# $edited++ if $line =~ s/(?<=\s)lst/1st/g;
# $edited++ if $line =~ s/^lst/1st/;
# }
# ; # format ellipses correctly
# if ( ${ $::lglobal{fixopt} }[13] ) {
# $edited++ if $line =~ s/(?<![\.\!\?])\.{3}(?!\.)/ \.\.\./g;
# $edited++ if $line =~ s/^ \./\./;
# }
# ; # format guillemets correctly
# ; # french guillemets
# if ( ${ $::lglobal{fixopt} }[14] and ${ $::lglobal{fixopt} }[15] ) {
# $edited++ if $line =~ s/«\s+/«/g;
# $edited++ if $line =~ s/\s+»/»/g;
# }
# ; # german guillemets
# if ( ${ $::lglobal{fixopt} }[14] and !${ $::lglobal{fixopt} }[15] )
# {
# $edited++ if $line =~ s/\s+«/«/g;
# $edited++ if $line =~ s/»\s+/»/g;
# }
# $update++ if ( ( $index % 250 ) == 0 );
# $textwindow->see($index) if ( $edited || $update );
# if ($edited) {
# $textwindow->replacewith( $lastindex, $index, $line );
# }
# }
# $textwindow->markSet( 'insert', $index ) if $update;
# $textwindow->update if ( $edited || $update );
# ::update_indicators() if ( $edited || $update );
# $edited = 0;
# $update = 0;
# $lastindex = $index;
# $index++;
# $index .= '.0';
# if ( $index > $end ) { $index = $end }
# if ($::operationinterrupt) { $::operationinterrupt = 0; return }
# }
# $textwindow->markSet( 'insert', 'end' );
# $textwindow->see('end');
# ::update_indicators();
#}
def doStandardConversions( inBuf, keepOriginal ):
outBuf = inBuf
outBuf = removeTrailingSpaces(outBuf)
outBuf = convertThoughtBreaks(outBuf)
return outBuf
def main():
args = docopt(__doc__, version="dp2ppgen v{}".format(VERSION))
# Process required command line arguments
outfile = createOutputFileName(args['<infile>'])
if args['<outfile>']:
outfile = args['<outfile>']
infile = args['<infile>']
# Open source file and represent as an array of lines
inBuf = loadFile(infile)
# Configure logging
logLevel = logging.INFO #default
if args['--verbose']:
logLevel = logging.DEBUG
elif args['--quiet']:
logLevel = logging.ERROR
logging.basicConfig(format='%(levelname)s: %(message)s', level=logLevel)
logging.debug(args)
# Process processing options
doChapterHeadings = args['--chapters'];
doSectionHeadings = args['--sections'];
doFootnotes = args['--footnotes'];
doPages = args['--pages'];
doJoinSpanned = args['--joinspanned'];
doFixup = args['--fixup'];
doUTF8 = args['--utf8'];
# Use default options if no processing options are set
if not doChapterHeadings and \
not doSectionHeadings and \
not doFootnotes and \
not doPages and \
not doFixup and \
not doUTF8 and \
not doJoinSpanned:
logging.info("No processing options were given, using default set of options -pcfj --fixup --utf8\n Run 'dp2ppgen -h' for a full list of options")
doPages = True
doChapterHeadings = True
doFootnotes = True
doFixup = False
doUTF8 = True
doJoinSpanned = True
# Process source document
logging.info("Processing '{}'".format(infile))
outBuf = inBuf
errorCount = validateDpMarkup(inBuf)
if errorCount > 0 and not args['--force']:
logging.critical("Correct markup issues then re-run operation, or use --force to ignore markup errors")
else:
outBuf = doStandardConversions(outBuf, args['--keeporiginal'])
if doPages:
outBuf = processBlankPages(outBuf, args['--keeporiginal'])
outBuf = processPageNumbers(outBuf, args['--keeporiginal'])
if doFixup:
outBuf = fixup(outBuf, args['--keeporiginal'])
if doUTF8:
outBuf = convertUTF8(outBuf)
if doJoinSpanned:
outBuf = joinSpannedFormatting(outBuf, args['--keeporiginal'])
outBuf = joinSpannedHyphenations(outBuf, args['--keeporiginal'])
if doChapterHeadings or doSectionHeadings:
outBuf = processHeadings(outBuf, doChapterHeadings, doSectionHeadings, args['--keeporiginal'])
if doFootnotes:
footnoteDestination = "bookend"
if args['--fndest']:
footnoteDestination = args['--fndest']
outBuf = processFootnotes(outBuf, footnoteDestination, args['--keeporiginal'])
if not args['--dryrun']:
logging.info("Saving output to '{}'".format(outfile))
# Save file
f = open(outfile,'w')
for line in outBuf:
f.write(line+'\n')
f.close()
return
if __name__ == "__main__":
main()
Fix bug in isLinePageBreak()
#!/usr/bin/env python
"""dp2ppgen
Usage:
dp2ppgen [options] <infile> [<outfile>]
dp2ppgen -h | --help
dp2ppgen --version
Translates pgdp.org formatted text files into ppgen syntax.
Examples:
dp2ppgen book.txt
dp2ppgen book.txt book-src.txt
Options:
-c, --chapters Convert chapter headings into ppgen style chapter headings.
-d, --dryrun Run through conversions but do not write out result.
-e, --sections Convert section headings into ppgen style section headings.
-f, --footnotes Convert footnotes into ppgen format.
--fndest=<fndest> Where to relocate footnotes (paragraphend, chapterend, bookend, inline).
--fixup Perform guiguts style fixup operations.
--force Ignore markup errors and force operation.
-j, --joinspanned Join hypenations (-* *-) and formatting markup (/* */ /# #/) that spans page breaks
-k, --keeporiginal On any conversion keep original text as a comment.
-p, --pages Convert page breaks into ppgen // 001.png style, add .pn statements and comment out [Blank Page] lines.
-q, --quiet Print less text.
-v, --verbose Print more text.
-h, --help Show help.
--utf8 Convert characters to UTF8
--version Show version.
"""
from docopt import docopt
import glob
import re
import os
import sys
import logging
VERSION="0.1.0" # MAJOR.MINOR.PATCH | http://semver.org
# Limited check for syntax errors in dp markup of input file
def validateDpMarkup( inBuf ):
# TODO, someone must have written a more thorough version of this already.. use that instead
logging.info("-- Checking input file for markup errors")
inBuf = removeTrailingSpaces(inBuf)
formattingStack = []
lineNum = 0
errorCount = 0
while lineNum < len(inBuf):
# Detect unbalanced out-of-line formatting markup /# #/ /* */
m = re.match(r"^\/(\*|\#)", inBuf[lineNum])
if m:
d = ({'ln':lineNum+1,'v':"/{}".format(m.group(1))})
formattingStack.append(d)
m = re.match(r"^(\*|\#)\/", inBuf[lineNum])
if m:
v = m.group(1)
if len(formattingStack) == 0 or formattingStack[-1]['v'] != "/{}".format(v):
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}/".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}/, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
# # Check balance of [], {}, (), <i></i>
# m = re.findall(r"(\[|\]|\{|\}|\(|\)|<\/?\w+>)", inBuf[lineNum])
# # Check balance of [], {}, <i></i>
# m = re.findall(r"(\[|\]|\{|\}|<\/?\w+>)", inBuf[lineNum])
# Check balance of [], <i></i>
m = re.findall(r"(\[|\]|<\/?\w+>)", inBuf[lineNum])
for v in m:
if v == "<tb>": # ignore
pass
elif v == "]": # closing markup
if len(formattingStack) == 0 or formattingStack[-1]['v'] != "[":
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
# elif v == "}": # closing markup
# if len(formattingStack) == 0 or formattingStack[-1]['v'] != "{":
# errorCount += 1
# if len(formattingStack) == 0:
# logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
# else:
# logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
# logging.debug("{}".format(formattingStack))
# else:
# formattingStack.pop()
# Disabled as this will get false positives from diacratic markup [)x] and won't affect conversion anyways
# if len(formattingStack) == 0 or formattingStack[-1]['v'] != "(":
# errorCount += 1
# if len(formattingStack) == 0:
# logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
# else:
# logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
# logging.debug("{}".format(formattingStack))
# else:
# formattingStack.pop()
elif "/" in v: # closing markup
v2 = re.sub("/","",v)
if len(formattingStack) == 0 or formattingStack[-1]['v'] != v2:
errorCount += 1
if len(formattingStack) == 0:
logging.error("Line {}: Unexpected {}".format(lineNum+1,v))
else:
logging.error("Line {}: Unexpected {}, previous ({}:{})".format(lineNum+1,v,formattingStack[-1]['ln'],formattingStack[-1]['v']))
else:
formattingStack.pop()
else:
d = ({'ln':lineNum+1,'v':v})
formattingStack.append(d)
# Check for specific issues that have caused conversion issues in the past
# Single line [Footnote] does not end at closing ]
# ex. [Footnote 1: Duine, <i>Saints de Domnonée</i>, pp. 5-12].
if re.match(r"\*?\[Footnote(.*)\]\*?.*$", inBuf[lineNum]):
if inBuf[lineNum].count('[') - inBuf[lineNum].count(']') == 0: # ignore multiline footnotes with proofer notes or some other [] markup within them
if not (inBuf[lineNum][-1] == ']' or inBuf[lineNum][-2:] == ']*'):
errorCount += 1
logging.error("Line {}: Extra characters found after closing ']' in [Footnote]\n {}".format(lineNum+1,inBuf[lineNum]))
# Extra text after out-of-line formatting markup
# ex. /*[**new stanza?]
if re.match(r"^(\/\*|\/\#|\*\/|\#\/).+", inBuf[lineNum]):
errorCount += 1
logging.error("Line {}: Extra text after out-of-line formatting markup\n {}".format(lineNum+1,inBuf[lineNum]))
lineNum += 1
# Chapters
# Sections
# Look for unresolved <i></i>, [], {}
if len(formattingStack) > 0:
errorCount += 1
logging.error("Reached end of file with unresolved formatting markup, (probably due to previous markup error(s))")
if errorCount == 1:
logging.error("Unresolved markup:")
s = "Line {}: '{}'".format(formattingStack[0]['ln'],formattingStack[0]['v'])
for v in formattingStack[1:]:
s += ", Line {}: '{}'".format(v['ln'],v['v'])
logging.error(s)
else:
logging.debug(formattingStack)
if errorCount > 0:
logging.info("-- Found {} markup errors".format(errorCount) )
return errorCount
# Format helper function, truncate to width and indicate truncation occured with ...
def truncate( string, width ):
if len(string) > width:
string = string[:width-3] + '...'
return string
# Removes trailing spaces and tabs from an array of strings
def removeTrailingSpaces( inBuf ):
outBuf = []
for line in inBuf:
outBuf.append(line.rstrip(" \t"))
return outBuf
# Replace : [Blank Page]
# with : // [Blank Page]
def processBlankPages( inBuf, keepOriginal ):
outBuf = []
lineNum = 0
count = 0
logging.info("-- Processing blank pages")
while lineNum < len(inBuf):
m = re.match(r"^\[Blank Page]", inBuf[lineNum])
if m:
if keepOriginal:
outBuf.append("// *** DP2PPGEN ORIGINAL: {}".format(inBuf[lineNum]))
outBuf.append("// [Blank Page]")
logging.debug("{:>{:d}}: '{}' to '{}'".format(str(lineNum+1),len(str(len(inBuf))),inBuf[lineNum],outBuf[-1]))
lineNum += 1
count += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Processed {} blank pages".format(count))
return outBuf;
# Replace : -----File: 001.png---\sparkleshine\swankypup\Kipling\SeaRose\Scholar\------
# with : // 001.png
def processPageNumbers( inBuf, keepOriginal ):
outBuf = []
lineNum = 0
count = 0
logging.info("-- Processing page numbers")
while lineNum < len(inBuf):
m = re.match(r"-----File: (\d+\.png|jpg|jpeg).*", inBuf[lineNum])
if m:
if keepOriginal:
outBuf.append("// *** DP2PPGEN ORIGINAL: {}".format(inBuf[lineNum]))
# outBuf.append("// {}".format(m.group(1)))
outBuf.append(".bn {}".format(m.group(1)))
outBuf.append(".pn +1")
logging.debug("{:>{:d}}: '{}' to '{}, {}'".format(str(lineNum+1),len(str(len(inBuf))),inBuf[lineNum],outBuf[-2],outBuf[-1]))
lineNum += 1
count += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Processed {} page numbers".format(count))
return outBuf;
def isLineBlank( line ):
return re.match(r"^\s*$", line)
def isLineComment( line ):
return re.match(r"^\/\/ *$", line)
def isLinePageBreak( line ):
return (parseScanPage(line) != None)
def parseScanPage( line ):
scanPageNum = None
m = re.match(r"-----File: (\d+\.(png|jpg|jpeg)).*", line)
if m:
scanPageNum = m.group(1)
m = re.match(r"\/\/ (\d+\.(png|jpg|jpeg))", line)
if m:
scanPageNum = m.group(1)
m = re.match(r"\.bn (\d+\.(png|jpg|jpeg))", line)
if m:
scanPageNum = m.group(1)
return scanPageNum
def formatAsID( s ):
s = re.sub(r"<\/?\w+>", "", s) # Remove inline markup
s = re.sub(r" ", "_", s) # Replace spaces with underscore
s = re.sub(r"[^\w\s]", "", s) # Strip everything but alphanumeric and _
s = s.lower() # Lowercase
return s
def findNextEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and not isLineBlank(buf[lineNum]):
lineNum += 1
return lineNum
def findPreviousEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum >= 0 and not isLineBlank(buf[lineNum]):
lineNum -= 1
return lineNum
def findNextNonEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and isLineBlank(buf[lineNum]):
lineNum += 1
return lineNum
def findPreviousNonEmptyLine( buf, startLine ):
lineNum = startLine
while lineNum >= 0 and isLineBlank(buf[lineNum]):
lineNum -= 1
return lineNum
# find previous line that contains original book text (ignore ppgen markup, proofing markup, blank lines)
def findPreviousLineOfText( buf, startLine ):
lineNum = findPreviousNonEmptyLine(buf, startLine)
while lineNum > 0 and re.match(r"[\.\*\#\/\[]", buf[lineNum]):
lineNum = findPreviousNonEmptyLine(buf, lineNum-1)
return lineNum
# find next line that contains original book text (ignore ppgen markup, proofing markup, blank lines)
def findNextLineOfText( buf, startLine ):
lineNum = findNextNonEmptyLine(buf, startLine)
while lineNum < len(buf)-1 and re.match(r"(\.[a-z0-9]{2} |[\*\#]\/|\/[\*\#]|\[\w+|\/\/)", buf[lineNum]):
lineNum = findNextNonEmptyLine(buf, lineNum+1)
return lineNum
def findNextChapter( buf, startLine ):
lineNum = startLine
while lineNum < len(buf)-1 and not re.match(r"\.h2", buf[lineNum]):
lineNum += 1
return lineNum
def processHeadings( inBuf, doChapterHeadings, doSectionHeadings, keepOriginal ):
outBuf = []
lineNum = 0
consecutiveEmptyLineCount = 0
rewrapLevel = 0
foundChapterHeadingStart = False
chapterCount = 0
sectionCount = 0
if doChapterHeadings and doSectionHeadings:
logging.info("-- Processing chapter and section headings")
if doChapterHeadings:
logging.info("-- Processing chapter headings")
if doSectionHeadings:
logging.info("-- Processing section headings")
while lineNum < len(inBuf):
# Chapter heading blocks are in the form:
# (4 empty lines)
# chapter name
# can span more than one line
# (1 empty line)
# chapter description, opening quote, etc., 1 empty line seperating each
# ...
# (2 empty lines)
# Section heading blocks are in the form
# (2 empty lines)
# section name
# can span more than one line
# (1 empty line)
# Detect when inside out-of-line formatting block /# #/ /* */
if re.match(r"^\/\*", inBuf[lineNum]) or re.match(r"^\/\#", inBuf[lineNum]):
rewrapLevel += 1
elif re.match(r"^\*\/", inBuf[lineNum]) or re.match(r"^\#\/", inBuf[lineNum]):
rewrapLevel -= 1
# Chapter heading
if doChapterHeadings and consecutiveEmptyLineCount == 4 and not isLineBlank(inBuf[lineNum]) and rewrapLevel == 0:
inBlock = []
outBlock = []
foundChapterHeadingEnd = False;
consecutiveEmptyLineCount = 0;
# Copy chapter heading block to inBlock
while lineNum < len(inBuf) and not foundChapterHeadingEnd:
if isLineBlank(inBuf[lineNum]):
consecutiveEmptyLineCount += 1
if consecutiveEmptyLineCount == 2:
foundChapterHeadingEnd = True
consecutiveEmptyLineCount = 0
else:
consecutiveEmptyLineCount = 0
# chapters don't span pages
if isLinePageBreak(inBuf[lineNum]):
foundChapterHeadingEnd = True
if foundChapterHeadingEnd:
# Remove empty lines from end of chapter heading block
while isLineBlank(inBlock[-1]):
inBlock = inBlock[:-1]
# Rewind parser (to handle back to back chapter headings)
lineNum = findPreviousNonEmptyLine(inBuf, lineNum) + 1
else:
inBlock.append(inBuf[lineNum])
lineNum += 1
# Remove the four consecutive blank lines that preceeds chapter heading
outBuf = outBuf[:-4]
# .sp 4
# .h2 id=chapter_vi
# CHAPTER VI.||chapter description etc..
# .sp 2
chapterID = formatAsID(inBlock[0])
chapterLine = ""
for line in inBlock:
chapterLine += line
chapterLine += "|"
chapterLine = chapterLine[:-1]
outBlock.append("")
outBlock.append("// ******** DP2PPGEN GENERATED ****************************************")
outBlock.append(".sp 4")
outBlock.append(".h2 id={}".format(chapterID))
outBlock.append(chapterLine)
outBlock.append(".sp 2")
if keepOriginal:
# Write out original as a comment
outBlock.append(".ig // *** DP2PPGEN BEGIN ORIGINAL ***********************************")
outBlock.append("")
outBlock.append("")
outBlock.append("")
for line in inBlock:
outBlock.append(line)
outBlock.append(".ig- // *** END *****************************************************")
# Write out chapter heading block
for line in outBlock:
outBuf.append(line)
# Log action
logging.info("--- .h2 {}".format(chapterLine))
chapterCount += 1
# Section heading
elif doSectionHeadings and consecutiveEmptyLineCount == 2 and not isLineBlank(inBuf[lineNum]) and rewrapLevel == 0:
inBlock = []
outBlock = []
foundSectionHeadingEnd = False;
consecutiveEmptyLineCount = 0;
# Copy section heading block to inBlock
while lineNum < len(inBuf) and not foundSectionHeadingEnd:
if isLineBlank(inBuf[lineNum]):
foundSectionHeadingEnd = True
else:
inBlock.append(inBuf[lineNum])
lineNum += 1
# Remove two consecutive blank lines that preceed section heading
outBuf = outBuf[:-2]
# .sp 2
# .h3 id=section_i
# Section I.
# .sp 1
sectionID = formatAsID(inBlock[0])
sectionLine = ""
for line in inBlock:
sectionLine += line
sectionLine += "|"
sectionLine = sectionLine[:-1]
outBlock.append("// ******** DP2PPGEN GENERATED ****************************************")
outBlock.append(".sp 2")
outBlock.append(".h3 id={}".format(sectionID))
outBlock.append(sectionLine)
outBlock.append(".sp 1")
if keepOriginal:
# Write out original as a comment
outBlock.append(".ig // *** DP2PPGEN BEGIN ORIGINAL ***********************************")
outBlock.append("")
outBlock.append("")
for line in inBlock:
outBlock.append(line)
outBlock.append(".ig- // *** END *****************************************************")
# Write out chapter heading block
for line in outBlock:
outBuf.append(line)
# Log action
logging.info("--- .h3 {}".format(sectionID))
sectionCount += 1
else:
if isLineBlank(inBuf[lineNum]):
consecutiveEmptyLineCount += 1
else:
consecutiveEmptyLineCount = 0
outBuf.append(inBuf[lineNum])
lineNum += 1
if doChapterHeadings:
logging.info("-- Processed {} chapters".format(chapterCount))
if doSectionHeadings:
logging.info("-- Processed {} sections".format(sectionCount))
return outBuf;
def loadFile(fn):
inBuf = []
encoding = ""
if not os.path.isfile(fn):
logging.critical("specified file {} not found".format(fn))
exit(1)
if encoding == "":
try:
wbuf = open(fn, "r", encoding='ascii').read()
encoding = "ASCII" # we consider ASCII as a subset of Latin-1 for DP purposes
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
try:
wbuf = open(fn, "rU", encoding='UTF-8').read()
encoding = "utf_8"
inBuf = wbuf.split("\n")
# remove BOM on first line if present
t = ":".join("{0:x}".format(ord(c)) for c in inBuf[0])
if t[0:4] == 'feff':
inBuf[0] = inBuf[0][1:]
except:
pass
if encoding == "":
try:
wbuf = open(fn, "r", encoding='latin_1').read()
encoding = "latin_1"
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
fatal("Cannot determine input file decoding")
else:
# self.info("input file is: {}".format(encoding))
if encoding == "ASCII":
encoding = "latin_1" # handle ASCII as Latin-1 for DP purposes
for i in range(len(inBuf)):
inBuf[i] = inBuf[i].rstrip()
return inBuf;
def createOutputFileName( infile ):
# TODO make this smart.. is infile raw or ppgen source? maybe two functions needed
outfile = "{}-out.txt".format(infile.split('.')[0])
return outfile
def stripFootnoteMarkup( inBuf ):
outBuf = []
lineNum = 0
while lineNum < len(inBuf):
# copy inBuf to outBuf throwing away all footnote markup [Footnote...]
if re.match(r"[\*]*\[Footnote", inBuf[lineNum]):
while lineNum < len(inBuf) and not re.search(r"][\*]*$", inBuf[lineNum]):
lineNum += 1
lineNum += 1
else:
outBuf.append(inBuf[lineNum])
lineNum += 1
return outBuf
def parseFootnotes( inBuf ):
# parse footnotes into a list of dictionaries with the following properties for each entry
# startLine - line number of [Footnote start
# endLine - line number of last line of [Footnote] block
# fnBlock - list of lines containing full [Footnote:]
# fnText - list of lines containing footnote text
# paragraphEnd - line number of the blank line following the paragraph this footnote is located in
# chapterEnd - line number of the blank line following the last paragraph in the chapter this footnote is located in
# scanPageNumber - scan page this footnote is located on
footnotes = []
lineNum = 0
currentScanPage = 0;
logging.info("--- Parsing footnotes")
while lineNum < len(inBuf):
foundFootnote = False
# Keep track of active scanpage
if isLinePageBreak(inBuf[lineNum]):
currentScanPage = parseScanPage(inBuf[lineNum])
# logging.debug("Processing page "+currentScanPage)
if re.match(r"\*?\[Footnote", inBuf[lineNum]):
foundFootnote = True
if foundFootnote:
startLine = lineNum
# Copy footnote block
fnBlock = []
fnBlock.append(inBuf[lineNum])
while lineNum < len(inBuf)-1 and not re.search(r"][\*]*$", inBuf[lineNum]):
lineNum += 1
fnBlock.append(inBuf[lineNum])
endLine = lineNum
# Is footnote part of a multipage footnote?
needsJoining = False
if re.match(r"\*\[Footnote", fnBlock[0]) or re.search(r"\]\*$", fnBlock[-1]):
logging.debug("Footnote requires joining at line {}: {}".format(lineNum+1,inBuf[lineNum]))
needsJoining = True
foundFootnote = True
# Find end of paragraph
paragraphEnd = -1 # This must be done during footnote anchor processing as paragraph end is relative to anchor and not [Footnote] markup
# Find end of chapter (line after last line of last paragraph)
chapterEnd = -1 # This must be done during footnote anchor processing as chapter end is relative to anchor and not [Footnote] markup
# Extract footnote ID
m = re.search(r"^\[Footnote (\w{1,2}):", fnBlock[0])
if m:
fnID = m.group(1);
# Extract footnote text from [Footnote] block
fnText = []
for line in fnBlock:
line = re.sub(r"^\*\[Footnote: ", "", line)
line = re.sub(r"^\[Footnote [A-Z]: ", "", line)
line = re.sub(r"^\[Footnote \d+: ", "", line)
line = re.sub(r"][\*]*$", "", line)
fnText.append(line)
# Add entry
footnotes.append({'fnBlock':fnBlock, 'fnText':fnText, 'fnID':fnID, 'startLine':startLine, 'endLine':endLine, 'paragraphEnd':paragraphEnd, 'chapterEnd':chapterEnd, 'needsJoining':needsJoining, 'scanPageNum':currentScanPage})
lineNum += 1
logging.info("--- Parsed {} footnotes".format(len(footnotes)))
# print(footnotes)
# Join footnotes marked above during parsing
joinCount = 0
i = 0
while i < len(footnotes):
if footnotes[i]['needsJoining']:
if joinCount == 0:
logging.info("--- Joining footnotes")
# debug message
logging.debug("Merging footnote [{}]".format(i+1))
if len(footnotes[i]['fnBlock']) > 1:
logging.debug(" ScanPg {}: {} ... {} ".format(footnotes[i]['scanPageNum'], footnotes[i]['fnBlock'][0], footnotes[i]['fnBlock'][-1]))
else:
logging.debug(" ScanPg {}: {}".format(footnotes[i]['scanPageNum'], footnotes[i]['fnBlock'][0]))
if len(footnotes[i+1]['fnBlock']) > 1:
logging.debug(" ScanPg {}: {} ... {} ".format(footnotes[i+1]['scanPageNum'], footnotes[i+1]['fnBlock'][0], footnotes[i+1]['fnBlock'][-1]))
else:
logging.debug(" ScanPg {}: {}".format(footnotes[i+1]['scanPageNum'], footnotes[i+1]['fnBlock'][0]))
# TODO: can footnotes span more than two pages?
if not footnotes[i+1]['needsJoining']:
logging.error("Attempt to join footnote failed!")
logging.error("ScanPg {} Footnote {} ({}): {}".format(footnotes[i]['scanPageNum'], i,footnotes[i]['startLine']+1,footnotes[i]['fnBlock'][0]))
logging.error("ScanPg {} Footnote {} ({}): {}".format(footnotes[i+1]['scanPageNum'], i+1,footnotes[i+1]['startLine']+1,footnotes[i+1]['fnBlock'][0]))
else:
# merge fnBlock and fnText from second into first
footnotes[i]['fnBlock'].extend(footnotes[i+1]['fnBlock'])
footnotes[i]['fnText'].extend(footnotes[i+1]['fnText'])
footnotes[i]['needsJoining'] = False
del footnotes[i+1]
joinCount += 1
i += 1
if joinCount > 0:
logging.info("--- Merged {} broken footnote(s)".format(joinCount))
logging.info("--- {} total footnotes after joining".format(len(footnotes)))
return footnotes;
def processFootnoteAnchors( inBuf, footnotes ):
outBuf = inBuf
# process footnote anchors
fnAnchorCount = 0
lineNum = 0
currentScanPage = 0
currentScanPageLabel = ""
fnIDs = []
# r = []
logging.info("--- Processing footnote anchors")
while lineNum < len(outBuf):
# Keep track of active scanpage
if isLinePageBreak(outBuf[lineNum]):
anchorsThisPage = []
currentScanPage = parseScanPage(inBuf[lineNum])
currentScanPageLabel = re.sub(r"\/\/ ","", outBuf[lineNum])
# logging.debug("--- Processing page "+currentScanPage)
# Make list of footnotes found on this page
fnIDs = []
for fn in footnotes:
if fn['scanPageNum'] == currentScanPage:
fnIDs.append(fn['fnID'])
# Build regex for footnote anchors that can be found on this scanpage
# if len(fnIDs) > 0:
# r = "|".join(fnIDs)
# r = r"\[({})\]".format(r)
# print("{}: {}".format(lineNum,outBuf[lineNum]))
m = re.findall("\[([A-Za-z]|[0-9]{1,2})\]", outBuf[lineNum])
for anchor in m:
# Check that anchor found belongs to a footnote on this page
if not anchor in fnIDs:
logging.error("No matching footnote for anchor [{}] on scan page {} (line {} in output file):\n {}".format(anchor,currentScanPage,lineNum+1,outBuf[lineNum]))
logging.debug(fnIDs)
else:
# replace [1] or [A] with [n]
curAnchor = "\[{}\]".format(anchor)
logging.debug("curAnchor={} anchorsThisPage={}".format(curAnchor,anchorsThisPage))
if not curAnchor in anchorsThisPage:
fnAnchorCount += 1
anchorsThisPage.append(curAnchor)
newAnchor = "[{}]".format(fnAnchorCount)
#TODO: add option to use ppgen autonumber? [#].. unsure if good reason to do this, would hide footnote mismatch errors and increase ppgen project compile times
logging.debug("{:>5s}: ({}|{}) ... {} ...".format(newAnchor,lineNum+1,currentScanPageLabel,outBuf[lineNum]))
for line in footnotes[fnAnchorCount-1]['fnText']:
logging.debug(" {}".format(line))
# sanity check (anchor and footnote should be on same scan page)
if currentScanPage != footnotes[fnAnchorCount-1]['scanPageNum']:
logging.fatal("Anchor found on different scan page, anchor({}) and footnotes({}) may be out of sync".format(currentScanPage,footnotes[fnAnchorCount-1]['scanPageNum']))
exit(1)
# replace anchor
outBuf[lineNum] = re.sub(curAnchor, newAnchor, outBuf[lineNum])
# update paragraphEnd and chapterEnd so they are relative to anchor and not [Footnote
# Find end of paragraph
paragraphEnd = findNextEmptyLine(outBuf, lineNum)
footnotes[fnAnchorCount-1]['paragraphEnd'] = paragraphEnd
# Find end of chapter (line after last line of last paragraph)
chapterEnd = findNextChapter(outBuf, lineNum)
chapterEnd = findPreviousLineOfText(outBuf, chapterEnd) + 1
footnotes[fnAnchorCount-1]['chapterEnd'] = chapterEnd
lineNum += 1
logging.info("--- Processed {} footnote anchors".format(fnAnchorCount))
return outBuf, fnAnchorCount
def processFootnotes( inBuf, footnoteDestination, keepOriginal ):
outBuf = []
logging.info("-- Processing footnotes")
# strip empty lines before [Footnotes], *[Footnote
lineNum = 0
logging.info("--- Remove blank lines before [Footnotes]")
while lineNum < len(inBuf):
if re.match(r"\[Footnote", inBuf[lineNum]) or re.match(r"\*\[Footnote", inBuf[lineNum]):
# delete previous blank line(s)
while isLineBlank(outBuf[-1]):
del outBuf[-1]
outBuf.append(inBuf[lineNum])
lineNum += 1
inBuf = outBuf
# parse footnotes into list of dictionaries
footnotes = parseFootnotes(outBuf)
# strip [Footnote markup
outBuf = stripFootnoteMarkup(outBuf)
# find and markup footnote anchors
outBuf, fnAnchorCount = processFootnoteAnchors(outBuf, footnotes)
if len(footnotes) != fnAnchorCount:
logging.error("Footnote anchor count does not match footnote count")
if len(footnotes) > 0:
outBuf = generatePpgenFootnoteMarkup(outBuf, footnotes, footnoteDestination)
return outBuf
# Generate ppgen footnote markup
def generatePpgenFootnoteMarkup( inBuf, footnotes, footnoteDestination ):
outBuf = inBuf
if footnoteDestination == "bookend":
logging.info("--- Adding ppgen style footnotes to end of book")
fnMarkup = []
fnMarkup.append(".pb")
fnMarkup.append(".if t")
fnMarkup.append(".sp 4")
fnMarkup.append(".ce")
fnMarkup.append("FOOTNOTES:")
fnMarkup.append(".sp 2")
fnMarkup.append(".if-")
fnMarkup.append(".if h")
fnMarkup.append(".de div.footnotes { border: dashed 1px #aaaaaa; padding: 1.5em; }")
fnMarkup.append(".li")
fnMarkup.append('<div class="footnotes">')
fnMarkup.append(".li-")
fnMarkup.append(".ce")
fnMarkup.append("<xl>FOOTNOTES:</xl>")
fnMarkup.append(".sp 2") #TODO: current ppgen doesn't add space (pvs not applied to .fn I bet)
fnMarkup.append(".if-")
for i, fn in enumerate(footnotes):
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
fnMarkup.append(".if h")
fnMarkup.append(".li")
fnMarkup.append('</div>')
fnMarkup.append(".li-")
fnMarkup.append(".if-")
outBuf.extend(fnMarkup)
elif footnoteDestination == "chapterend":
logging.info("--- Adding ppgen style footnotes to end of chapters")
curChapterEnd = footnotes[-1]['chapterEnd']
fnMarkup = []
for i, fn in reversed(list(enumerate(footnotes))):
if curChapterEnd != fn['chapterEnd']:
# finish off last group
outBuf.insert(curChapterEnd, ".fm")
curChapterEnd = fn['chapterEnd']
# build markup for this footnote
# print("{} {}".format(fn['chapterEnd'],fn['fnText'][0]))
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
# insert it
outBuf[curChapterEnd:curChapterEnd] = fnMarkup
fnMarkup = []
# finish off last group
outBuf.insert(curChapterEnd, ".fm")
elif footnoteDestination == "paragraphend":
logging.info("--- Adding ppgen style footnotes to end of paragraphs")
curParagraphEnd = footnotes[-1]['paragraphEnd']
fnMarkup = []
for i, fn in reversed(list(enumerate(footnotes))):
if curParagraphEnd != fn['paragraphEnd']:
# finish off last group
outBuf.insert(curParagraphEnd, ".fm")
curParagraphEnd = fn['paragraphEnd']
# build markup for this footnote
# print("{} {}".format(fn['paragraphEnd'],fn['fnText'][0]))
fnMarkup.append(".fn {}".format(i+1))
for line in fn['fnText']:
fnMarkup.append(line)
fnMarkup.append(".fn-")
# insert it
outBuf[curParagraphEnd:curParagraphEnd] = fnMarkup
fnMarkup = []
# finish off last group
outBuf.insert(curParagraphEnd, ".fm")
return outBuf
def joinSpannedFormatting( inBuf, keepOriginal ):
outBuf = []
logging.info("-- Joining spanned out-of-line formatting markup")
# Find:
# 1: */
# 2: // 010.png
# 3:
# 4: /*
# Replace with:
# 2: // 010.png
# 3:
lineNum = 0
joinCount = 0
while lineNum < len(inBuf):
joinWasMade = False
m = re.match(r"^(\*\/|\#\/)$", inBuf[lineNum])
if m:
outBlock = []
ln = lineNum + 1
joinEndLineRegex = r"^\/\{}$".format(m.group(1)[0])
while ln < len(inBuf) and isLineBlank(inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
if ln < len(inBuf) and isLinePageBreak(inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
while ln < len(inBuf)-1 and isLineBlank(inBuf[ln]) or re.match(r".pn",inBuf[ln]) or re.match(r"\/\/",inBuf[ln]):
outBlock.append(inBuf[ln])
ln += 1
if re.match(joinEndLineRegex, inBuf[ln]):
for line in outBlock:
outBuf.append(line)
joinWasMade = True
joinCount += 1
logging.debug("Lines {}, {}: Joined spanned markup /{} {}/".format(lineNum,ln,m.group(1)[0],m.group(1)[0]))
lineNum = ln + 1
if not joinWasMade:
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Joined {} instances of spanned out-of-line formatting markup".format(joinCount))
return outBuf
def joinSpannedHyphenations( inBuf, keepOriginal ):
outBuf = []
logging.info("-- Joining spanned hyphenations")
# Find:
# 1: the last word on this line is cont-*
# 2: // 010.png
# 3: *-inued. on the line below
# Replace with:
# 1: the last word on this line is cont-**inued.
# 2: // 010.png
# 3: on the line below
lineNum = 0
joinCount = 0
while lineNum < len(inBuf):
joinWasMade = False
if re.search(r"\-\*$", inBuf[lineNum]) and isLinePageBreak(inBuf[lineNum+1]):
ln = findNextLineOfText(inBuf,lineNum+1)
if inBuf[ln][0] == '*':
# Remove first word from last line (secondPart) and join append it to first line
# secondPart = (inBuf[ln].split(' ',1)[0])[1:] # strip first word with leading * removed
secondPart = inBuf[ln].split(' ',1)[0]
inBuf[ln] = inBuf[ln].split(' ',1)[1]
inBuf[lineNum] = inBuf[lineNum] + secondPart
logging.debug("Line {}: Resolved hyphenation, ... '{}'".format(lineNum+1,inBuf[lineNum][-30:]))
# logging.info("Line {}: Resolved hyphenation\n '{}'".format(lineNum+1,inBuf[lineNum]))
joinCount += 1
else:
logging.error("Line {}: Unresolved hyphenation\n {}\n {}".format(lineNum+1,inBuf[lineNum],inBuf[ln]))
outBuf.append(inBuf[lineNum])
lineNum += 1
logging.info("-- Joined {} instances of spanned hyphenations".format(joinCount))
return outBuf
def tabsToSpaces( inBuf, tabSize ):
outBuf = []
for line in inBuf:
spaces = " " * tabSize
line = line.replace("\t", spaces)
outBuf.append(line)
return outBuf
def convertUTF8( inBuf ):
outBuf = []
for line in inBuf:
if not isLinePageBreak(line):
# -- becomes a unicode mdash, ---- becomes 2 unicode mdashes
line = re.sub(r"(?<!-)-{2}(?!-)","—", line)
line = re.sub(r"(?<!-)-{4}(?!-)","——", line)
if "--" in line:
logging.warn("Unconverted dashes: {}".format(line))
# [oe] becomes œ
# [OE] becomes Œ
line = line.replace("[oe]", "œ")
line = line.replace("[OE]", "Œ")
outBuf.append(line)
# Fractions?
return outBuf
def convertThoughtBreaks( inBuf ):
outBuf = []
for line in inBuf:
# <tb> to .tb
line = re.sub(r"^<tb>$",".tb", line)
outBuf.append(line)
return outBuf
def removeBlankLinesAtPageEnds( inBuf ):
outBuf = []
for line in inBuf:
if isLinePageBreak(line):
while outBuf and isLineBlank(outBuf[-1]):
outBuf.pop()
outBuf.append(line)
return outBuf
# TODO: Make this a tool in itself?
def fixup( inBuf, keepOriginal ):
# • Remove spaces at end of line.
# • Remove blank lines at end of pages.
# • Remove spaces on either side of hyphens.
# • Remove space before periods.
# • Remove space before exclamation points.
# • Remove space before question marks.
# • Remove space before commas.
# • Remove space before semicolons.
# • Remove space before colons.
# • Remove space after opening and before closing brackets. () [] {}
# • Remove space after open angle quote and before close angle quote.
# • Remove space after beginning and before ending double quote.
# • Ensure space before ellipses except after period.
# • Format any line that contains only 5 *s and whitespace to be the standard 5 asterisk thought break.
# • Convert multiple space to single space.
# • Fix obvious l<-->1 problems.
# You can also specify whether to skip text inside the /* */ markers or not.
outBuf = inBuf
outBuf = tabsToSpaces(outBuf, 4)
outBuf = removeTrailingSpaces(outBuf)
outBuf = convertThoughtBreaks(outBuf)
outBuf = removeBlankLinesAtPageEnds(outBuf)
# outBuf = removeExtraSpaces(outBuf)
return outBuf
#TODO: Full guiguts fixit seems error prone.. maybe only do safe defaults or break off into seperate tool with each setting configurable, does gutsweeper do this already?
#def removeExtraSpaces( inBuf ):
# • Remove spaces on either side of hyphens.
# • Remove space before periods.
# • Remove space before exclamation points.
# • Remove space before question marks.
# • Remove space before commas.
# • Remove space before semicolons.
# • Remove space before colons.
# • Remove space after opening and before closing brackets. () [] {}
# • Remove space after open angle quote and before close angle quote.
# • Remove space after beginning and before ending double quote.
# • Ensure space before ellipses except after period.
# rewrapLevel = 0
# for line in inBuf:
# # Detect when inside out-of-line formatting block /# #/ /* */
# if re.match(r"^\/[\*\#]", inBuf[lineNum]):
# rewrapLevel += 1
# elif re.match(r"^[\*\#]\/", inBuf[lineNum]):
# rewrapLevel -= 1
#
# if rewrapLevel == 0:
# # Remove multiple spaces
# # $line =~ s/(?<=\S)\s\s+(?=\S)/
# line = re.sub(r"(?<=\S)\s\s+(?=\S)","", line)
#
# # Remove spaces on either side of hyphens.
# # Remove spaces before hyphen (only if hyphen isn't first on line, like poetry)
# # $line =~ s/(\S) +-/$1-/g;
# line = re.sub(r"(\S) +-","\1-", line)
#
# # Remove space after hyphen
# # $line =~ s/- /-/g;
# line = re.sub(r"- ","-", line)
#
# # Except leave a space after a string of three or more hyphens
# # $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# line = re.sub(r'(?<!-)(-*---)(?=[^\s\\"F-])',"\1", line)
#
# outBuf.append(line)
#
# return outBuf
#
# $edited++ if $line =~ s/- /-/g; # Remove space after hyphen
# $edited++
# if $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# ; # Except leave a space after a string of three or more hyphens
#
#
#
# if ( ${ $::lglobal{fixopt} }[1] ) {
# ; # Remove spaces before hyphen (only if hyphen isn't first on line, like poetry)
# $edited++ if $line =~ s/(\S) +-/$1-/g;
# $edited++ if $line =~ s/- /-/g; # Remove space after hyphen
# $edited++
# if $line =~ s/(?<![-])([-]*---)(?=[^\s\\"F-])/$1 /g
# ; # Except leave a space after a string of three or more hyphens
# }
# if ( ${ $::lglobal{fixopt} }[3] ) {
# ; # Remove space before periods (only if not first on line, like poetry's ellipses)
# $edited++ if $line =~ s/(\S) +\.(?=\D)/$1\./g;
# }
# ; # Get rid of space before periods
# if ( ${ $::lglobal{fixopt} }[4] ) {
# $edited++
# if $line =~ s/ +!/!/g;
# }
# ; # Get rid of space before exclamation points
# if ( ${ $::lglobal{fixopt} }[5] ) {
# $edited++
# if $line =~ s/ +\?/\?/g;
# }
# ; # Get rid of space before question marks
# if ( ${ $::lglobal{fixopt} }[6] ) {
# $edited++
# if $line =~ s/ +\;/\;/g;
# }
# ; # Get rid of space before semicolons
# if ( ${ $::lglobal{fixopt} }[7] ) {
# $edited++
# if $line =~ s/ +:/:/g;
# }
# ; # Get rid of space before colons
# if ( ${ $::lglobal{fixopt} }[8] ) {
# $edited++
# if $line =~ s/ +,/,/g;
# }
# ; # Get rid of space before commas
# # FIXME way to go on managing quotes
# if ( ${ $::lglobal{fixopt} }[9] ) {
# $edited++
# if $line =~ s/^\" +/\"/
# ; # Remove space after doublequote if it is the first character on a line
# $edited++
# if $line =~ s/ +\"$/\"/
# ; # Remove space before doublequote if it is the last character on a line
# }
# if ( ${ $::lglobal{fixopt} }[10] ) {
# $edited++
# if $line =~ s/(?<=(\(|\{|\[)) //g
# ; # Get rid of space after opening brackets
# $edited++
# if $line =~ s/ (?=(\)|\}|\]))//g
# ; # Get rid of space before closing brackets
# }
# ; # FIXME format to standard thought breaks - changed to <tb>
# if ( ${ $::lglobal{fixopt} }[11] ) {
# $edited++
#
# # if $line =~
# # s/^\s*(\*\s*){5}$/ \* \* \* \* \*\n/;
# if $line =~ s/^\s*(\*\s*){4,}$/<tb>\n/;
# }
# $edited++ if ( $line =~ s/ +$// );
# ; # Fix llth, lst
# if ( ${ $::lglobal{fixopt} }[12] ) {
# $edited++ if $line =~ s/llth/11th/g;
# $edited++ if $line =~ s/(?<=\d)lst/1st/g;
# $edited++ if $line =~ s/(?<=\s)lst/1st/g;
# $edited++ if $line =~ s/^lst/1st/;
# }
# ; # format ellipses correctly
# if ( ${ $::lglobal{fixopt} }[13] ) {
# $edited++ if $line =~ s/(?<![\.\!\?])\.{3}(?!\.)/ \.\.\./g;
# $edited++ if $line =~ s/^ \./\./;
# }
# ; # format guillemets correctly
# ; # french guillemets
# if ( ${ $::lglobal{fixopt} }[14] and ${ $::lglobal{fixopt} }[15] ) {
# $edited++ if $line =~ s/«\s+/«/g;
# $edited++ if $line =~ s/\s+»/»/g;
# }
# ; # german guillemets
# if ( ${ $::lglobal{fixopt} }[14] and !${ $::lglobal{fixopt} }[15] )
# {
# $edited++ if $line =~ s/\s+«/«/g;
# $edited++ if $line =~ s/»\s+/»/g;
# }
# $update++ if ( ( $index % 250 ) == 0 );
# $textwindow->see($index) if ( $edited || $update );
# if ($edited) {
# $textwindow->replacewith( $lastindex, $index, $line );
# }
# }
# $textwindow->markSet( 'insert', $index ) if $update;
# $textwindow->update if ( $edited || $update );
# ::update_indicators() if ( $edited || $update );
# $edited = 0;
# $update = 0;
# $lastindex = $index;
# $index++;
# $index .= '.0';
# if ( $index > $end ) { $index = $end }
# if ($::operationinterrupt) { $::operationinterrupt = 0; return }
# }
# $textwindow->markSet( 'insert', 'end' );
# $textwindow->see('end');
# ::update_indicators();
#}
def doStandardConversions( inBuf, keepOriginal ):
outBuf = inBuf
outBuf = removeTrailingSpaces(outBuf)
outBuf = convertThoughtBreaks(outBuf)
return outBuf
def main():
args = docopt(__doc__, version="dp2ppgen v{}".format(VERSION))
# Process required command line arguments
outfile = createOutputFileName(args['<infile>'])
if args['<outfile>']:
outfile = args['<outfile>']
infile = args['<infile>']
# Open source file and represent as an array of lines
inBuf = loadFile(infile)
# Configure logging
logLevel = logging.INFO #default
if args['--verbose']:
logLevel = logging.DEBUG
elif args['--quiet']:
logLevel = logging.ERROR
logging.basicConfig(format='%(levelname)s: %(message)s', level=logLevel)
logging.debug(args)
# Process processing options
doChapterHeadings = args['--chapters'];
doSectionHeadings = args['--sections'];
doFootnotes = args['--footnotes'];
doPages = args['--pages'];
doJoinSpanned = args['--joinspanned'];
doFixup = args['--fixup'];
doUTF8 = args['--utf8'];
# Use default options if no processing options are set
if not doChapterHeadings and \
not doSectionHeadings and \
not doFootnotes and \
not doPages and \
not doFixup and \
not doUTF8 and \
not doJoinSpanned:
logging.info("No processing options were given, using default set of options -pcfj --fixup --utf8\n Run 'dp2ppgen -h' for a full list of options")
doPages = True
doChapterHeadings = True
doFootnotes = True
doFixup = False
doUTF8 = True
doJoinSpanned = True
# Process source document
logging.info("Processing '{}'".format(infile))
outBuf = inBuf
errorCount = validateDpMarkup(inBuf)
if errorCount > 0 and not args['--force']:
logging.critical("Correct markup issues then re-run operation, or use --force to ignore markup errors")
else:
outBuf = doStandardConversions(outBuf, args['--keeporiginal'])
if doPages:
outBuf = processBlankPages(outBuf, args['--keeporiginal'])
outBuf = processPageNumbers(outBuf, args['--keeporiginal'])
if doFixup:
outBuf = fixup(outBuf, args['--keeporiginal'])
if doUTF8:
outBuf = convertUTF8(outBuf)
if doJoinSpanned:
outBuf = joinSpannedFormatting(outBuf, args['--keeporiginal'])
outBuf = joinSpannedHyphenations(outBuf, args['--keeporiginal'])
if doChapterHeadings or doSectionHeadings:
outBuf = processHeadings(outBuf, doChapterHeadings, doSectionHeadings, args['--keeporiginal'])
if doFootnotes:
footnoteDestination = "bookend"
if args['--fndest']:
footnoteDestination = args['--fndest']
outBuf = processFootnotes(outBuf, footnoteDestination, args['--keeporiginal'])
if not args['--dryrun']:
logging.info("Saving output to '{}'".format(outfile))
# Save file
f = open(outfile,'w')
for line in outBuf:
f.write(line+'\n')
f.close()
return
if __name__ == "__main__":
main()
|
import config
import file_utils
def _generate_module_listeners(models, classname):
listeners = []
while classname:
model = models[classname]
for key, value in model.allitems():
if key in ['class', 'obj_ref', 'class_ref', 'group_ref']:
name = value.get('name')
type = value.get('type')
card = value.get('card')
cast = 'any' if key == 'group_ref' else type
Cast = cast[:1].upper() + cast[1:]
method = name[:1].upper() + name[1:]
if (card == 'any') and not method.endswith('s'):
method += 's'
if card == '1':
listeners.append(f'if (auto obj = defMod->{method}()) {{')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, defMod);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append(f' inst->{method}(stmt);')
listeners.append( '}')
elif method in ['Task_funcs']:
# We want to deep clone existing instance tasks and funcs
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' enterTask_func(obj, nullptr);')
listeners.append( ' auto* tf = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' ComponentMap& funcMap = std::get<2>(instStack_.at(instStack_.size()-2).second);')
listeners.append( ' funcMap.erase(tf->VpiName());')
listeners.append( ' funcMap.insert(std::make_pair(tf->VpiName(), tf));')
listeners.append( ' leaveTask_func(obj, nullptr);')
listeners.append( ' tf->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(tf);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Cont_assigns', 'Gen_scope_arrays']:
# We want to deep clone existing instance cont assign to perform binding
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
# We also want to clone the module cont assign
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' if (inst->{method}() == nullptr) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' }')
listeners.append(f' auto clone_vec = inst->{method}();')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Typespecs']:
# We don't want to override the elaborated instance ports by the module def ports, same for nets, params and param_assigns
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' if (uniquifyTypespec()) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' } else {')
listeners.append( ' auto* stmt = obj;')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( ' }')
listeners.append( '}')
elif method not in ['Ports', 'Nets', 'Parameters', 'Param_assigns', 'Interface_arrays', 'Module_arrays']:
# We don't want to override the elaborated instance ports by the module def ports, same for nets, params and param_assigns
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Ports']:
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
classname = models[classname]['extends']
return listeners
def _generate_class_listeners(models):
listeners = []
for model in models.values():
modeltype = model.get('type')
if modeltype != 'obj_def':
continue
classname = model.get('name')
if classname != 'class_defn':
continue
while classname:
model = models[classname]
for key, value in model.allitems():
if key in ['class', 'obj_ref', 'class_ref', 'group_ref']:
name = value.get('name')
type = value.get('type')
card = value.get('card')
cast = 'any' if key == 'group_ref' else type
Cast = cast[:1].upper() + cast[1:]
method = name[:1].upper() + name[1:]
if (card == 'any') and not method.endswith('s'):
method += 's'
if card == '1':
listeners.append(f'if (auto obj = cl->{method}()) {{')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, cl);')
listeners.append( ' stmt->VpiParent(cl);')
listeners.append(f' cl->{method}(stmt);')
listeners.append( '}')
elif method == 'Deriveds':
# Don't deep clone
listeners.append(f'if (auto vec = cl->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' cl->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj;')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
else:
listeners.append(f'if (auto vec = cl->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' cl->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, cl);')
listeners.append( ' stmt->VpiParent(cl);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
classname = models[classname]['extends']
return listeners
def generate(models):
module_listeners = _generate_module_listeners(models, 'module')
interface_listeners = _generate_module_listeners(models, 'interface')
class_listeners = _generate_class_listeners(models)
with open(config.get_template_filepath('ElaboratorListener.cpp'), 'rt') as strm:
file_content = strm.read()
file_content = file_content.replace('<MODULE_ELABORATOR_LISTENER>', (' ' * 10) + ('\n' + (' ' * 10)).join(module_listeners))
file_content = file_content.replace('<INTERFACE_ELABORATOR_LISTENER>', (' ' * 10) + ('\n' + (' ' * 10)).join(interface_listeners))
file_content = file_content.replace('<CLASS_ELABORATOR_LISTENER>', (' ' * 4) + ('\n' + (' ' * 4)).join(class_listeners))
file_utils.set_content_if_changed(config.get_output_source_filepath('ElaboratorListener.cpp'), file_content)
return True
def _main():
import loader
config.configure()
models = loader.load_models()
return generate(models)
if __name__ == '__main__':
import sys
sys.exit(0 if _main() else 1)
cont_assign present in Surelog elab tree
import config
import file_utils
def _generate_module_listeners(models, classname):
listeners = []
while classname:
model = models[classname]
for key, value in model.allitems():
if key in ['class', 'obj_ref', 'class_ref', 'group_ref']:
name = value.get('name')
type = value.get('type')
card = value.get('card')
cast = 'any' if key == 'group_ref' else type
Cast = cast[:1].upper() + cast[1:]
method = name[:1].upper() + name[1:]
if (card == 'any') and not method.endswith('s'):
method += 's'
if card == '1':
listeners.append(f'if (auto obj = defMod->{method}()) {{')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, defMod);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append(f' inst->{method}(stmt);')
listeners.append( '}')
elif method in ['Task_funcs']:
# We want to deep clone existing instance tasks and funcs
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' enterTask_func(obj, nullptr);')
listeners.append( ' auto* tf = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' ComponentMap& funcMap = std::get<2>(instStack_.at(instStack_.size()-2).second);')
listeners.append( ' funcMap.erase(tf->VpiName());')
listeners.append( ' funcMap.insert(std::make_pair(tf->VpiName(), tf));')
listeners.append( ' leaveTask_func(obj, nullptr);')
listeners.append( ' tf->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(tf);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Cont_assigns']:
# We want to deep clone existing instance cont assign to perform binding
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Gen_scope_arrays']:
# We want to deep clone existing instance cont assign to perform binding
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
# We also want to clone the module cont assign
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' if (inst->{method}() == nullptr) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' }')
listeners.append(f' auto clone_vec = inst->{method}();')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Typespecs']:
# We don't want to override the elaborated instance ports by the module def ports, same for nets, params and param_assigns
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' if (uniquifyTypespec()) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' } else {')
listeners.append( ' auto* stmt = obj;')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( ' }')
listeners.append( '}')
elif method not in ['Ports', 'Nets', 'Parameters', 'Param_assigns', 'Interface_arrays', 'Module_arrays']:
# We don't want to override the elaborated instance ports by the module def ports, same for nets, params and param_assigns
listeners.append(f'if (auto vec = defMod->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
elif method in ['Ports']:
listeners.append(f'if (auto vec = inst->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' inst->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, inst);')
listeners.append( ' stmt->VpiParent(inst);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
classname = models[classname]['extends']
return listeners
def _generate_class_listeners(models):
listeners = []
for model in models.values():
modeltype = model.get('type')
if modeltype != 'obj_def':
continue
classname = model.get('name')
if classname != 'class_defn':
continue
while classname:
model = models[classname]
for key, value in model.allitems():
if key in ['class', 'obj_ref', 'class_ref', 'group_ref']:
name = value.get('name')
type = value.get('type')
card = value.get('card')
cast = 'any' if key == 'group_ref' else type
Cast = cast[:1].upper() + cast[1:]
method = name[:1].upper() + name[1:]
if (card == 'any') and not method.endswith('s'):
method += 's'
if card == '1':
listeners.append(f'if (auto obj = cl->{method}()) {{')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, cl);')
listeners.append( ' stmt->VpiParent(cl);')
listeners.append(f' cl->{method}(stmt);')
listeners.append( '}')
elif method == 'Deriveds':
# Don't deep clone
listeners.append(f'if (auto vec = cl->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' cl->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj;')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
else:
listeners.append(f'if (auto vec = cl->{method}()) {{')
listeners.append(f' auto clone_vec = serializer_->Make{Cast}Vec();')
listeners.append(f' cl->{method}(clone_vec);')
listeners.append( ' for (auto obj : *vec) {')
listeners.append( ' auto* stmt = obj->DeepClone(serializer_, this, cl);')
listeners.append( ' stmt->VpiParent(cl);')
listeners.append( ' clone_vec->push_back(stmt);')
listeners.append( ' }')
listeners.append( '}')
classname = models[classname]['extends']
return listeners
def generate(models):
module_listeners = _generate_module_listeners(models, 'module')
interface_listeners = _generate_module_listeners(models, 'interface')
class_listeners = _generate_class_listeners(models)
with open(config.get_template_filepath('ElaboratorListener.cpp'), 'rt') as strm:
file_content = strm.read()
file_content = file_content.replace('<MODULE_ELABORATOR_LISTENER>', (' ' * 10) + ('\n' + (' ' * 10)).join(module_listeners))
file_content = file_content.replace('<INTERFACE_ELABORATOR_LISTENER>', (' ' * 10) + ('\n' + (' ' * 10)).join(interface_listeners))
file_content = file_content.replace('<CLASS_ELABORATOR_LISTENER>', (' ' * 4) + ('\n' + (' ' * 4)).join(class_listeners))
file_utils.set_content_if_changed(config.get_output_source_filepath('ElaboratorListener.cpp'), file_content)
return True
def _main():
import loader
config.configure()
models = loader.load_models()
return generate(models)
if __name__ == '__main__':
import sys
sys.exit(0 if _main() else 1)
|
#!/usr/bin/env python
import argparse
import getpass
import girder_client
import os
import json
import sys
import subprocess
def getArguments():
parser = argparse.ArgumentParser(description='''Populate a girder installation
with the public library (datasets and projects) for Resonant Laboratory.
This script will delete the existing Resonant Laboratory Library collection,
so running it multiple times won't result in duplicate installations, files,
or databases (any changes to the existing library will be lost)''')
parser.add_argument('-u', dest='username', default='admin',
help='The administrator username (default: "admin")')
parser.add_argument('-a', dest='apiUrl',
default='http://localhost:8080/api/v1',
help='The url of the Girder instance\'s API endpoint.')
parser.add_argument('-t', dest='databaseThreshold', default='131072',
help='If a file exceeds this threshold (in bytes), upload it as a ' +
'mongo database collection instead of a flat file.')
parser.add_argument('-n', dest='dbName',
default='resonantLaboratoryLibrary',
help='The name of the database to use for larger files.')
parser.add_argument('-d', dest='dbAssetstoreId', default=None,
help='The Id of the database assetstore. If not specified,' +
'this script will use the first database assetstore that it ' +
'discovers.')
return parser.parse_args()
def printMessage(message):
longestLine = max([len(line) for line in message.split('\n')])
print message
print ''.join(['=' for x in xrange(longestLine)]) # underline
print
def authenticate(args):
print 'Enter the password for Girder user "' + args.username + '":'
password = getpass.getpass()
gc = girder_client.GirderClient(apiUrl=args.apiUrl)
try:
gc.authenticate(args.username, password)
except girder_client.AuthenticationError:
print 'Incorrect username/password'
sys.exit(1)
printMessage('\nLogged in successfully')
return gc
def getCurrentAssetstoreId(gc):
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['current'] is True, assetstores)
if len(assetstores) == 0:
return None
else:
return assetstores[0]['_id'], assetstores[0]['name']
def useAssetstore(gc, assetstoreId, assetstoreName):
gc.sendRestRequest('PUT',
'assetstore/' + assetstoreId,
{
'current': True,
'name': assetstoreName
})
def getDBassetstore(gc, specificId):
# Get the database assetstore Id and hostname+port
if specificId is None:
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['type'] == 'database', assetstores)
if len(assetstores) == 0:
print 'Could not find a database assetstore'
sys.exit(1)
assetstore = assetstores[0]
else:
assetstore = gc.sendRestRequest('GET', 'assetstore/' + specificId, {})
if assetstore['database']['dbtype'] != 'mongo':
print 'For now, only mongo-based database assetstores are supported'
sys.exit(1)
assetstoreId = assetstore['_id']
assetstoreHost = assetstore['database']['uri'].split('/')[2]
message = 'Using database assetstore ' + assetstoreId
message += '\nhost: ' + assetstoreHost
printMessage(message)
return assetstoreId, assetstoreHost
def getFSassetstore(gc):
# Get the Filesystem assetstore Id, and make sure it is current
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['current'] is True, assetstores)
if len(assetstores) == 0 or assetstores[0]['type'] != 0:
print 'There must be a current Filesystem assetstore to upload flat files'
sys.exit(1)
assetstore = assetstores[0]
assetstoreId = assetstore['_id']
message = 'Using Filesystem assetstore ' + assetstoreId
printMessage(message)
return assetstoreId
def getLibraryCollection(gc):
# Get or create the Resonant Laboratory Library collection
message = ''
collection = gc.sendRestRequest('GET', 'collection',
{'text': 'Resonant Laboratory Library'})
# Trash the existing Resonant Laboratory Library collection
if (len(collection) == 0):
message = 'No "Resonant Laboratory Library" collection to clean.\n'
else:
gc.sendRestRequest('DELETE', 'collection/' + collection[0]['_id'])
message = 'Deleted "Resonant Laboratory Library" collection.\n'
collection = []
# Create a new collection
collection = gc.sendRestRequest('POST', 'collection',
{'name': 'Resonant Laboratory Library',
'description': 'The public library for' +
' the Resonant ' +
'Laboratory Application',
'public': True})
message += 'Created collection '
collectionId = collection['_id']
message += collectionId
printMessage(message)
return collectionId
def getDatasets():
# Crawl the library/Data directory to determine which datasets
# exist, and how we should upload them
datasets = {}
items = os.listdir('./library/Data')
for item in items:
if not os.path.isdir('./library/Data/' + item):
continue
datasets[item] = {
'files': {},
'collections': {},
'metadata': None
}
files = os.listdir('./library/Data/' + item)
for fileName in files:
filePath = os.path.join('./library/Data', item, fileName)
if fileName == 'metadata.json':
# metadata.json is special; attach it as the item's
# metadata instead of uploading it as a file
temp = open(filePath, 'rb')
contents = temp.read()
metadata = json.loads(contents)
temp.close()
datasets[item]['metadata'] = metadata
else:
fileSize = os.stat(filePath).st_size
if fileSize > int(args.databaseThreshold):
datasets[item]['collections'][fileName] = filePath
elif os.path.splitext(fileName)[1] == '.json':
datasets[item]['files'][fileName] = filePath
else:
datasets[item]['files'][fileName] = filePath
printMessage('Identified %i datasets' % len(datasets))
return datasets
def createMongoCollections(args, host, datasets, dbId, parentId):
collectionNames = set()
# Create/update all the mongo collections at once
for datasetName, spec in datasets.iteritems():
for fileName, filePath in spec['collections'].iteritems():
# TODO: do this with pymongo, not mongo-import
# ... or be even more general, and inspect / use
# whatever kind of database is powering the assetstore
parts = os.path.splitext(fileName)
collectionName = parts[0]
i = 1
while collectionName in collectionNames:
collectionName = parts[0] + i
i += 1
collectionNames.add(collectionName)
command = ['mongoimport',
'--host', host,
'--db', args.dbName,
'--collection', collectionName,
'--drop',
'--file', filePath]
if parts[1].lower() == '.csv':
command.extend(['--type', 'csv',
'--headerline'])
else:
command.append('--jsonArray')
print subprocess.check_output(command, stderr=subprocess.STDOUT)
# Hit the /database_assetstore/{id}/import endpoint to load all these
# collections as files in girder
collectionNames = [{'database': args.dbName, 'name': n, 'table': n} for n in collectionNames]
parameters = {
'parentId': parentId,
'table': json.dumps(collectionNames),
'parentType': 'collection'
}
gc.sendRestRequest('PUT', 'database_assetstore/' + dbId + '/import', parameters)
# This will create a folder named args.dbName inside the
# Resonant Laboratory library collection; we want to rename that
# folder to "Data"
parameters = {
'parentType': 'collection',
'parentId': parentId,
'name': args.dbName
}
dataFolder = gc.sendRestRequest('GET', 'folder', parameters)
gc.sendRestRequest('PUT', 'folder/' + dataFolder[0]['_id'], {'name': 'Data'})
# Now we want to get the Ids of all the items that we just created
itemList = gc.sendRestRequest('GET', 'item', {'folderId': dataFolder[0]['_id']})
# Create a lookup table to find Ids by name
lookupTable = dict(zip([x['name'] for x in itemList],
[x['_id'] for x in itemList]))
printMessage('Uploaded %i datasets as mongodb collections' % len(collectionNames))
return dataFolder[0]['_id'], lookupTable
def uploadFlatFiles(dataFolderId, datasets):
fileCount = 0
lookupTable = {}
for datasetName, spec in datasets.iteritems():
itemSpec = gc.load_or_create_item(datasetName, dataFolderId)
lookupTable[datasetName] = itemSpec['_id']
for fileName, filePath in spec['files'].iteritems():
print 'Uploading ' + filePath
gc.uploadFileToItem(itemSpec['_id'], filePath)
fileCount += 1
printMessage('Uploaded %i datasets as flat files\n' % fileCount)
return lookupTable
def parseProjectMetadata(datasetIdLookup):
projects = {}
badProjects = 0
items = os.listdir('./library/Projects')
for item in items:
metaPath = './library/Projects/' + item + '/metadata.json'
if not os.path.isdir('./library/Projects/' + item) or \
not os.path.isfile(metaPath):
continue
temp = open(metaPath, 'rb')
contents = temp.read()
metadata = json.loads(contents)
temp.close()
for i, d in enumerate(metadata['datasets']):
if d['itemId'] not in datasetIdLookup:
# Hmm... the dataset that this project is
# referring to doesn't exist.
metadata = None
break
else:
metadata['datasets'][i]['dataset'] = datasetIdLookup[d['itemId']]
if metadata is None:
badProjects += 1
else:
projects[item] = metadata
message = 'Identified %i project metadata files' % len(projects)
if badProjects > 0:
message += '\nWARNING: could not find the datasets\n' + \
'corresponding to %i projects!' % badProjects
printMessage(message)
return projects
def createProjects(collectionId, projects):
lookupTable = {}
folderSpec = gc.load_or_create_folder('Projects', collectionId, 'collection')
for projectName, metadata in projects.iteritems():
itemSpec = gc.load_or_create_item(projectName, folderSpec['_id'])
lookupTable[projectName] = itemSpec['_id']
printMessage('Created %i project items' % len(projects))
return lookupTable
def attachMetadata(datasets, datasetIdLookup, projects, projectIdLookup):
datasetMetaCount = 0
projectMetaCount = 0
for datasetName, datasetId in datasetIdLookup.iteritems():
# Hit the endpoint that identifies the item as
# a Resonant Lab dataset
response = gc.sendRestRequest('POST', 'item/' + datasetId + '/dataset')
if datasets[datasetName]['metadata'] is not None:
# Attach the custom metadata
meta = response.get('meta', {})
rlab = meta.get('rlab', {})
rlab.update(datasets[datasetName]['metadata'])
meta['rlab'] = rlab
gc.addMetadataToItem(datasetId, meta)
datasetMetaCount += 1
for projectName, projectId in projectIdLookup.iteritems():
# Hit the endpoint that identifies the item
# as a Resonant Lab project
response = gc.sendRestRequest('POST', 'item/' + projectId + '/project')
# Attach the custom metadata
meta = response.get('meta', {})
rlab = meta.get('rlab', {})
rlab.update(projects[projectName])
meta['rlab'] = rlab
gc.addMetadataToItem(projectId, meta)
projectMetaCount += 1
printMessage('Attached metadata to %i datasets and %i projects' %
(datasetMetaCount, projectMetaCount))
if __name__ == '__main__':
args = getArguments()
gc = authenticate(args)
collectionId = getLibraryCollection(gc)
# Set up the datasets
datasets = getDatasets()
datasetIdLookup = {}
# Mongo datasets first
dbAssetstoreId, dbAssetstoreHost = getDBassetstore(gc, args.dbAssetstoreId)
dataFolderId, datasetIdLookup = createMongoCollections(args,
dbAssetstoreHost,
datasets,
dbAssetstoreId,
collectionId)
# Now for the regular files
fsAssetstoreId = getFSassetstore(gc)
datasetIdLookup.update(uploadFlatFiles(dataFolderId, datasets))
# Set up the projects
projects = parseProjectMetadata(datasetIdLookup)
projectIdLookup = createProjects(collectionId, projects)
# Hit the appropriate endpoints and attach metadata where it exists
attachMetadata(datasets, datasetIdLookup, projects, projectIdLookup)
print 'Done!'
Add -p option to populateGirder.py for use in Ansible provisioning
#!/usr/bin/env python
import argparse
import getpass
import girder_client
import os
import json
import sys
import subprocess
def getArguments():
parser = argparse.ArgumentParser(description='''Populate a girder installation
with the public library (datasets and projects) for Resonant Laboratory.
This script will delete the existing Resonant Laboratory Library collection,
so running it multiple times won't result in duplicate installations, files,
or databases (any changes to the existing library will be lost)''')
parser.add_argument('-u', dest='username', default='admin',
help='The administrator username (default: "admin")')
parser.add_argument('-p', dest='password', default=None,
help='The administrator password; if not given, will be prompted at runtime (optional)')
parser.add_argument('-a', dest='apiUrl',
default='http://localhost:8080/api/v1',
help='The url of the Girder instance\'s API endpoint.')
parser.add_argument('-t', dest='databaseThreshold', default='131072',
help='If a file exceeds this threshold (in bytes), upload it as a ' +
'mongo database collection instead of a flat file.')
parser.add_argument('-n', dest='dbName',
default='resonantLaboratoryLibrary',
help='The name of the database to use for larger files.')
parser.add_argument('-d', dest='dbAssetstoreId', default=None,
help='The Id of the database assetstore. If not specified,' +
'this script will use the first database assetstore that it ' +
'discovers.')
return parser.parse_args()
def printMessage(message):
longestLine = max([len(line) for line in message.split('\n')])
print message
print ''.join(['=' for x in xrange(longestLine)]) # underline
print
def authenticate(args):
if args.password is None:
print 'Enter the password for Girder user "' + args.username + '":'
password = getpass.getpass()
else:
password = args.password
gc = girder_client.GirderClient(apiUrl=args.apiUrl)
try:
gc.authenticate(args.username, password)
except girder_client.AuthenticationError:
print 'Incorrect username/password'
sys.exit(1)
printMessage('\nLogged in successfully')
return gc
def getCurrentAssetstoreId(gc):
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['current'] is True, assetstores)
if len(assetstores) == 0:
return None
else:
return assetstores[0]['_id'], assetstores[0]['name']
def useAssetstore(gc, assetstoreId, assetstoreName):
gc.sendRestRequest('PUT',
'assetstore/' + assetstoreId,
{
'current': True,
'name': assetstoreName
})
def getDBassetstore(gc, specificId):
# Get the database assetstore Id and hostname+port
if specificId is None:
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['type'] == 'database', assetstores)
if len(assetstores) == 0:
print 'Could not find a database assetstore'
sys.exit(1)
assetstore = assetstores[0]
else:
assetstore = gc.sendRestRequest('GET', 'assetstore/' + specificId, {})
if assetstore['database']['dbtype'] != 'mongo':
print 'For now, only mongo-based database assetstores are supported'
sys.exit(1)
assetstoreId = assetstore['_id']
assetstoreHost = assetstore['database']['uri'].split('/')[2]
message = 'Using database assetstore ' + assetstoreId
message += '\nhost: ' + assetstoreHost
printMessage(message)
return assetstoreId, assetstoreHost
def getFSassetstore(gc):
# Get the Filesystem assetstore Id, and make sure it is current
assetstores = gc.sendRestRequest('GET', 'assetstore/', {})
assetstores = filter(lambda x: x['current'] is True, assetstores)
if len(assetstores) == 0 or assetstores[0]['type'] != 0:
print 'There must be a current Filesystem assetstore to upload flat files'
sys.exit(1)
assetstore = assetstores[0]
assetstoreId = assetstore['_id']
message = 'Using Filesystem assetstore ' + assetstoreId
printMessage(message)
return assetstoreId
def getLibraryCollection(gc):
# Get or create the Resonant Laboratory Library collection
message = ''
collection = gc.sendRestRequest('GET', 'collection',
{'text': 'Resonant Laboratory Library'})
# Trash the existing Resonant Laboratory Library collection
if (len(collection) == 0):
message = 'No "Resonant Laboratory Library" collection to clean.\n'
else:
gc.sendRestRequest('DELETE', 'collection/' + collection[0]['_id'])
message = 'Deleted "Resonant Laboratory Library" collection.\n'
collection = []
# Create a new collection
collection = gc.sendRestRequest('POST', 'collection',
{'name': 'Resonant Laboratory Library',
'description': 'The public library for' +
' the Resonant ' +
'Laboratory Application',
'public': True})
message += 'Created collection '
collectionId = collection['_id']
message += collectionId
printMessage(message)
return collectionId
def getDatasets():
# Crawl the library/Data directory to determine which datasets
# exist, and how we should upload them
datasets = {}
items = os.listdir('./library/Data')
for item in items:
if not os.path.isdir('./library/Data/' + item):
continue
datasets[item] = {
'files': {},
'collections': {},
'metadata': None
}
files = os.listdir('./library/Data/' + item)
for fileName in files:
filePath = os.path.join('./library/Data', item, fileName)
if fileName == 'metadata.json':
# metadata.json is special; attach it as the item's
# metadata instead of uploading it as a file
temp = open(filePath, 'rb')
contents = temp.read()
metadata = json.loads(contents)
temp.close()
datasets[item]['metadata'] = metadata
else:
fileSize = os.stat(filePath).st_size
if fileSize > int(args.databaseThreshold):
datasets[item]['collections'][fileName] = filePath
elif os.path.splitext(fileName)[1] == '.json':
datasets[item]['files'][fileName] = filePath
else:
datasets[item]['files'][fileName] = filePath
printMessage('Identified %i datasets' % len(datasets))
return datasets
def createMongoCollections(args, host, datasets, dbId, parentId):
collectionNames = set()
# Create/update all the mongo collections at once
for datasetName, spec in datasets.iteritems():
for fileName, filePath in spec['collections'].iteritems():
# TODO: do this with pymongo, not mongo-import
# ... or be even more general, and inspect / use
# whatever kind of database is powering the assetstore
parts = os.path.splitext(fileName)
collectionName = parts[0]
i = 1
while collectionName in collectionNames:
collectionName = parts[0] + i
i += 1
collectionNames.add(collectionName)
command = ['mongoimport',
'--host', host,
'--db', args.dbName,
'--collection', collectionName,
'--drop',
'--file', filePath]
if parts[1].lower() == '.csv':
command.extend(['--type', 'csv',
'--headerline'])
else:
command.append('--jsonArray')
print subprocess.check_output(command, stderr=subprocess.STDOUT)
# Hit the /database_assetstore/{id}/import endpoint to load all these
# collections as files in girder
collectionNames = [{'database': args.dbName, 'name': n, 'table': n} for n in collectionNames]
parameters = {
'parentId': parentId,
'table': json.dumps(collectionNames),
'parentType': 'collection'
}
gc.sendRestRequest('PUT', 'database_assetstore/' + dbId + '/import', parameters)
# This will create a folder named args.dbName inside the
# Resonant Laboratory library collection; we want to rename that
# folder to "Data"
parameters = {
'parentType': 'collection',
'parentId': parentId,
'name': args.dbName
}
dataFolder = gc.sendRestRequest('GET', 'folder', parameters)
gc.sendRestRequest('PUT', 'folder/' + dataFolder[0]['_id'], {'name': 'Data'})
# Now we want to get the Ids of all the items that we just created
itemList = gc.sendRestRequest('GET', 'item', {'folderId': dataFolder[0]['_id']})
# Create a lookup table to find Ids by name
lookupTable = dict(zip([x['name'] for x in itemList],
[x['_id'] for x in itemList]))
printMessage('Uploaded %i datasets as mongodb collections' % len(collectionNames))
return dataFolder[0]['_id'], lookupTable
def uploadFlatFiles(dataFolderId, datasets):
fileCount = 0
lookupTable = {}
for datasetName, spec in datasets.iteritems():
itemSpec = gc.load_or_create_item(datasetName, dataFolderId)
lookupTable[datasetName] = itemSpec['_id']
for fileName, filePath in spec['files'].iteritems():
print 'Uploading ' + filePath
gc.uploadFileToItem(itemSpec['_id'], filePath)
fileCount += 1
printMessage('Uploaded %i datasets as flat files\n' % fileCount)
return lookupTable
def parseProjectMetadata(datasetIdLookup):
projects = {}
badProjects = 0
items = os.listdir('./library/Projects')
for item in items:
metaPath = './library/Projects/' + item + '/metadata.json'
if not os.path.isdir('./library/Projects/' + item) or \
not os.path.isfile(metaPath):
continue
temp = open(metaPath, 'rb')
contents = temp.read()
metadata = json.loads(contents)
temp.close()
for i, d in enumerate(metadata['datasets']):
if d['itemId'] not in datasetIdLookup:
# Hmm... the dataset that this project is
# referring to doesn't exist.
metadata = None
break
else:
metadata['datasets'][i]['dataset'] = datasetIdLookup[d['itemId']]
if metadata is None:
badProjects += 1
else:
projects[item] = metadata
message = 'Identified %i project metadata files' % len(projects)
if badProjects > 0:
message += '\nWARNING: could not find the datasets\n' + \
'corresponding to %i projects!' % badProjects
printMessage(message)
return projects
def createProjects(collectionId, projects):
lookupTable = {}
folderSpec = gc.load_or_create_folder('Projects', collectionId, 'collection')
for projectName, metadata in projects.iteritems():
itemSpec = gc.load_or_create_item(projectName, folderSpec['_id'])
lookupTable[projectName] = itemSpec['_id']
printMessage('Created %i project items' % len(projects))
return lookupTable
def attachMetadata(datasets, datasetIdLookup, projects, projectIdLookup):
datasetMetaCount = 0
projectMetaCount = 0
for datasetName, datasetId in datasetIdLookup.iteritems():
# Hit the endpoint that identifies the item as
# a Resonant Lab dataset
response = gc.sendRestRequest('POST', 'item/' + datasetId + '/dataset')
if datasets[datasetName]['metadata'] is not None:
# Attach the custom metadata
meta = response.get('meta', {})
rlab = meta.get('rlab', {})
rlab.update(datasets[datasetName]['metadata'])
meta['rlab'] = rlab
gc.addMetadataToItem(datasetId, meta)
datasetMetaCount += 1
for projectName, projectId in projectIdLookup.iteritems():
# Hit the endpoint that identifies the item
# as a Resonant Lab project
response = gc.sendRestRequest('POST', 'item/' + projectId + '/project')
# Attach the custom metadata
meta = response.get('meta', {})
rlab = meta.get('rlab', {})
rlab.update(projects[projectName])
meta['rlab'] = rlab
gc.addMetadataToItem(projectId, meta)
projectMetaCount += 1
printMessage('Attached metadata to %i datasets and %i projects' %
(datasetMetaCount, projectMetaCount))
if __name__ == '__main__':
args = getArguments()
gc = authenticate(args)
collectionId = getLibraryCollection(gc)
# Set up the datasets
datasets = getDatasets()
datasetIdLookup = {}
# Mongo datasets first
dbAssetstoreId, dbAssetstoreHost = getDBassetstore(gc, args.dbAssetstoreId)
dataFolderId, datasetIdLookup = createMongoCollections(args,
dbAssetstoreHost,
datasets,
dbAssetstoreId,
collectionId)
# Now for the regular files
fsAssetstoreId = getFSassetstore(gc)
datasetIdLookup.update(uploadFlatFiles(dataFolderId, datasets))
# Set up the projects
projects = parseProjectMetadata(datasetIdLookup)
projectIdLookup = createProjects(collectionId, projects)
# Hit the appropriate endpoints and attach metadata where it exists
attachMetadata(datasets, datasetIdLookup, projects, projectIdLookup)
print 'Done!'
|
# -*- coding: utf-8 -*-
from werkzeug.exceptions import BadRequest
class DataStore(object):
"""
define a source of data. Can be anything fron database to other
api, files and so one
"""
def __init__(self, data, model, **options):
"""
Set the ressource datastore
"""
self.data = data
self.options = options
self.model = model()
def get(self, identifier):
"""
Should return a dictionnary representing the ressource matching the
identifier or raise a NotFound exception.
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def create(self, data):
"""
data is a dict containing the representation of the
ressource. This method should call
:meth:`~.DataStore.validate`,
create the data in the datastore and return the ressource
identifier
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def update(self, obj, data):
"""
should be able to call :meth:`~.DataStore.get` to retreive the
object to be updated, :meth:`~.DataStore.validate_fields` and
return the updated object
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def delete(self, identifier):
"""
should be able to validate the existence of the object in the
ressource and remove it from the datastore
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def get_list(self, **kwargs):
"""
This method is called each time you want a set of data.
Data could be paginated and filtered.
Should call :meth:`~.DataStore.filter`
and return :meth:`~.DataStore.paginate`
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def filter(self, **kwargs):
"""
should return a way to filter the ressource according to
kwargs. It is not mandatory to actualy retreive the
ressources as they will be paginated just after the filter
call. If you retreive the wole filtered ressources you loose
the pagination advantage. The point here is to prepare the
filtering. Look at SQLiteDataStore.filter for an example.
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def paginate(self, data, **kwargs):
"""
Paginate sould return all the object if no pagination options
have been set or only a subset of the ressources if pagination
options exists.
"""
start = 0
end = self.options.get('paginate_by', None)
if "start" in kwargs:
start = int(kwargs['start'])
end = start + self.options['paginate_by']
elif "end" in kwargs:
end = int(kwargs['end'])
start = end - int(kwargs['end'])
return data[start:end]
def validate(self, data):
"""
Check if data send are valide for objec creation. Validate
Chek that each required fields are in data and check for their
type too.
Used to create new ressources
"""
if not isinstance(data, dict):
raise BadRequest()
for field in self.model.fields:
for validator in field.validators:
if not validator.validate(data[field.name]):
raise BadRequest()
def validate_fields(self, data):
"""
Validate only some fields of the ressource.
Used to update existing objects
"""
if not isinstance(data, dict):
raise BadRequest()
for k, v in data.iteritems():
if k not in self.model.get_fields_name():
raise BadRequest()
field = self.model.get_field(k)
for validator in field.validators:
if not validator.validate(v):
raise BadRequest()
used ABC for base datastore
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from werkzeug.exceptions import BadRequest
class DataStore(object):
"""
define a source of data. Can be anything fron database to other
api, files and so one
"""
__metaclass__ = ABCMeta
def __init__(self, data, model, **options):
"""
Set the ressource datastore
"""
self.data = data
self.options = options
self.model = model()
@abstractmethod
def get(self, identifier):
"""
Should return a dictionnary representing the ressource matching the
identifier or raise a NotFound exception.
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
@abstractmethod
def create(self, data):
"""
data is a dict containing the representation of the
ressource. This method should call
:meth:`~.DataStore.validate`,
create the data in the datastore and return the ressource
identifier
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
@abstractmethod
def update(self, obj, data):
"""
should be able to call :meth:`~.DataStore.get` to retreive the
object to be updated, :meth:`~.DataStore.validate_fields` and
return the updated object
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
@abstractmethod
def delete(self, identifier):
"""
should be able to validate the existence of the object in the
ressource and remove it from the datastore
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
@abstractmethod
def get_list(self, **kwargs):
"""
This method is called each time you want a set of data.
Data could be paginated and filtered.
Should call :meth:`~.DataStore.filter`
and return :meth:`~.DataStore.paginate`
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
@abstractmethod
def filter(self, **kwargs):
"""
should return a way to filter the ressource according to
kwargs. It is not mandatory to actualy retreive the
ressources as they will be paginated just after the filter
call. If you retreive the wole filtered ressources you loose
the pagination advantage. The point here is to prepare the
filtering. Look at SQLiteDataStore.filter for an example.
.. note::
Not implemented by base DataStore class
"""
raise NotImplemented
def paginate(self, data, **kwargs):
"""
Paginate sould return all the object if no pagination options
have been set or only a subset of the ressources if pagination
options exists.
"""
start = 0
end = self.options.get('paginate_by', None)
if "start" in kwargs:
start = int(kwargs['start'])
end = start + self.options['paginate_by']
elif "end" in kwargs:
end = int(kwargs['end'])
start = end - int(kwargs['end'])
return data[start:end]
def validate(self, data):
"""
Check if data send are valide for objec creation. Validate
Chek that each required fields are in data and check for their
type too.
Used to create new ressources
"""
if not isinstance(data, dict):
raise BadRequest()
for field in self.model.fields:
for validator in field.validators:
if not validator.validate(data[field.name]):
raise BadRequest()
def validate_fields(self, data):
"""
Validate only some fields of the ressource.
Used to update existing objects
"""
if not isinstance(data, dict):
raise BadRequest()
for k, v in data.iteritems():
if k not in self.model.get_fields_name():
raise BadRequest()
field = self.model.get_field(k)
for validator in field.validators:
if not validator.validate(v):
raise BadRequest()
|
import httplib2
import logging
import socket
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils import simplejson as json
import oauth2 as oauth
logger = logging.getLogger("oauth_consumer")
class ServiceFail(Exception):
pass
class oAuthConsumer(object):
def __init__(self, service):
self.service = service
self.signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.consumer = oauth.Consumer(self.key, self.secret)
@property
def key(self):
return self._obtain_setting("keys", "KEY")
@property
def secret(self):
return self._obtain_setting("keys", "SECRET")
@property
def request_token_url(self):
return self._obtain_setting("endpoints", "request_token")
@property
def access_token_url(self):
return self._obtain_setting("endpoints", "access_token")
@property
def authorize_url(self):
return self._obtain_setting("endpoints", "authorize")
def _obtain_setting(self, k1, k2):
name = "OAUTH_CONSUMER_SETTINGS"
service = self.service
try:
return getattr(settings, name)[service][k1][k2]
except AttributeError:
raise ImproperlyConfigured("%s must be defined in settings" % (name,))
except KeyError, e:
key = e.args[0]
if key == service:
raise ImproperlyConfigured("%s must contain '%s'" % (name, service))
elif key == k1:
raise ImproperlyConfigured("%s must contain '%s' for '%s'" % (name, k1, service))
elif key == k2:
raise ImproperlyConfigured("%s must contain '%s' for '%s' in '%s'" % (name, k2, k1, service))
else:
raise
def unauthorized_token(self):
if not hasattr(self, "_unauthorized_token"):
self._unauthorized_token = self.fetch_unauthorized_token()
return self._unauthorized_token
def fetch_unauthorized_token(self):
# @@@ fixme
base_url = "http://contacts-import.pinaxproject.com"
callback_url = reverse("oauth_callback", kwargs={"service": self.service})
request = oauth.Request.from_consumer_and_token(self.consumer,
http_url = self.request_token_url,
http_method = "POST",
parameters = {
"oauth_callback": "%s%s" % (base_url, callback_url),
}
)
request.sign_request(self.signature_method, self.consumer, None)
try:
return oauth.Token.from_string(self._oauth_response(request))
except KeyError, e:
if e.args[0] == "oauth_token":
raise ServiceFail()
raise
def authorized_token(self, token):
request = oauth.Request.from_consumer_and_token(self.consumer,
token = token,
http_url = self.access_token_url,
http_method = "POST",
)
request.sign_request(self.signature_method, self.consumer, token)
try:
return oauth.Token.from_string(self._oauth_response(request))
except KeyError:
raise ServiceFail()
def check_token(self, unauth_token, given_token):
token = oauth.Token.from_string(unauth_token)
if token.key == given_token:
return self.authorized_token(token)
else:
return None
def authorization_url(self, token):
request = oauth.Request.from_consumer_and_token(
self.consumer,
token = token,
http_url = self.authorize_url,
)
request.sign_request(self.signature_method, self.consumer, token)
return request.to_url()
def make_api_call(self, url, token, http_method="GET", **kwargs):
response = self._oauth_response(
self._oauth_request(url, token,
http_method = http_method,
params = kwargs,
)
)
if not response:
raise ServiceFail()
try:
return json.loads(response)
except ValueError:
raise ServiceFail()
def _oauth_request(self, url, token, http_method="GET", params=None):
request = oauth.Request.from_consumer_and_token(self.consumer,
token = token,
http_url = url,
parameters = params,
http_method = http_method,
)
request.sign_request(self.signature_method, self.consumer, token)
return request
def _oauth_response(self, request):
# @@@ not sure if this will work everywhere. need to explore more.
http = httplib2.Http()
headers = {}
headers.update(request.to_header())
if request.http_method == "POST":
ret = http.request(request.http_url, "POST",
data = request.to_postdata(),
headers = headers,
)
else:
ret = http.request(request.to_url(), "GET",
headers = headers,
)
response, content = ret
logger.debug(repr(ret))
return content
turns out this doesn't improve the situation
import httplib2
import logging
import socket
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils import simplejson as json
import oauth2 as oauth
logger = logging.getLogger("oauth_consumer")
class ServiceFail(Exception):
pass
class oAuthConsumer(object):
def __init__(self, service):
self.service = service
self.signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.consumer = oauth.Consumer(self.key, self.secret)
@property
def key(self):
return self._obtain_setting("keys", "KEY")
@property
def secret(self):
return self._obtain_setting("keys", "SECRET")
@property
def request_token_url(self):
return self._obtain_setting("endpoints", "request_token")
@property
def access_token_url(self):
return self._obtain_setting("endpoints", "access_token")
@property
def authorize_url(self):
return self._obtain_setting("endpoints", "authorize")
def _obtain_setting(self, k1, k2):
name = "OAUTH_CONSUMER_SETTINGS"
service = self.service
try:
return getattr(settings, name)[service][k1][k2]
except AttributeError:
raise ImproperlyConfigured("%s must be defined in settings" % (name,))
except KeyError, e:
key = e.args[0]
if key == service:
raise ImproperlyConfigured("%s must contain '%s'" % (name, service))
elif key == k1:
raise ImproperlyConfigured("%s must contain '%s' for '%s'" % (name, k1, service))
elif key == k2:
raise ImproperlyConfigured("%s must contain '%s' for '%s' in '%s'" % (name, k2, k1, service))
else:
raise
def unauthorized_token(self):
if not hasattr(self, "_unauthorized_token"):
self._unauthorized_token = self.fetch_unauthorized_token()
return self._unauthorized_token
def fetch_unauthorized_token(self):
# @@@ fixme
base_url = "http://contacts-import.pinaxproject.com"
callback_url = reverse("oauth_callback", kwargs={"service": self.service})
request = oauth.Request.from_consumer_and_token(self.consumer,
http_url = self.request_token_url,
http_method = "POST",
parameters = {
"oauth_callback": "%s%s" % (base_url, callback_url),
}
)
request.sign_request(self.signature_method, self.consumer, None)
try:
return oauth.Token.from_string(self._oauth_response(request))
except KeyError, e:
if e.args[0] == "oauth_token":
raise ServiceFail()
raise
def authorized_token(self, token):
request = oauth.Request.from_consumer_and_token(self.consumer,
token = token,
http_url = self.access_token_url,
http_method = "POST",
)
request.sign_request(self.signature_method, self.consumer, token)
try:
return oauth.Token.from_string(self._oauth_response(request))
except KeyError:
raise ServiceFail()
def check_token(self, unauth_token, given_token):
token = oauth.Token.from_string(unauth_token)
if token.key == given_token:
return self.authorized_token(token)
else:
return None
def authorization_url(self, token):
request = oauth.Request.from_consumer_and_token(
self.consumer,
token = token,
http_url = self.authorize_url,
)
request.sign_request(self.signature_method, self.consumer, token)
return request.to_url()
def make_api_call(self, url, token, http_method="GET", **kwargs):
response = self._oauth_response(
self._oauth_request(url, token,
http_method = http_method,
params = kwargs,
)
)
if not response:
raise ServiceFail()
try:
return json.loads(response)
except ValueError:
raise ServiceFail()
def _oauth_request(self, url, token, http_method="GET", params=None):
request = oauth.Request.from_consumer_and_token(self.consumer,
token = token,
http_url = url,
parameters = params,
http_method = http_method,
)
request.sign_request(self.signature_method, self.consumer, token)
return request
def _oauth_response(self, request):
# @@@ not sure if this will work everywhere. need to explore more.
http = httplib2.Http()
if request.http_method == "POST":
ret = http.request(request.http_url, "POST",
data = request.to_postdata(),
)
else:
ret = http.request(request.to_url(), "GET")
response, content = ret
logger.debug(repr(ret))
return content
|
#!/usr/bin/env python
DESC = """EPP script for Quant-iT mesurements to verify standards, calculate
concentrations and load input artifact-udfs and output file-udfs of the process
with concentation values and fluorescence intensity
Reads from:
--Lims fields--
"Saturation threshold of fluorescence intensity" process udf
"Allowed %CV of duplicates" process udf
"Fluorescence intensity 1" udf of input analytes to the process
"Fluorescence intensity 2" udf of input analytes to the process
--files--
"Standards File (.txt)" "shared result file" uploaded by user.
"Quant-iT Result File 1" "shared result file" uploaded by user.
"Quant-iT Result File 2" "shared result file" uploaded by user. (optional)
Writes to:
--Lims fields--
"Intensity check" udf of process artifacts (result file)
"%CV" udf of process artifacts (result file)
"QC" qc-flag of process artifacts (result file)
Logging:
The script outputs a regular log file with regular execution information.
1) compares the udfs "Fluorescence intensity 1" and "Fluorescence intensity 2"
with the Saturation threshold of fluorescence intensity. If either of these two
udfs >= Saturation threshold of fluorescence intensity, assign "Saturated" to
the udf "Intensity check" and assign "Fail" to the sample. Otherwise assign
"OK" to the analyte "Intensity check".
2) For a sample with duplicate measurements, "%CV" is calculated by the formula:
%CV= (SD of "Fluorescence intensity 1" and "Fluorescence intensity 2")/(Mean of
"Fluorescence intensity 1" and ""Fluorescence intensity 2).
Copy the values to the sample analyte "%CV".
3) If "%CV" >= Allowed %CV of duplicates, assign "Fail" to the sample.
4) For a sample with only one measurement, if it passes in step 2, a "Pass" should
be assigned to the QC flag. For a sample with duplicate measurements, if it passes
both step 2 and step 4, a "Pass" should be assigned to the QC flag.
Written by Maya Brandi
"""
import os
import sys
import logging
import numpy as np
from argparse import ArgumentParser
from requests import HTTPError
from genologics.lims import Lims
from genologics.config import BASEURI,USERNAME,PASSWORD
from genologics.entities import Process
from genologics.epp import EppLogger
from genologics.epp import set_field
from genologics.epp import ReadResultFiles
lims = Lims(BASEURI,USERNAME,PASSWORD)
class QunatiT():
def __init__(self, process):
self.udfs = dict(process.udf.items())
self.abstract = []
self.missing_udfs = []
self.hig_CV_fract = 0
self.saturated = 0
self.flour_int_missing = 0
self.no_failed = 0
def assign_QC_flag(self, result_file, treshold, allowed_dupl):
analyte_udfs = dict(result_file.udf.items())
if "Fluorescence intensity 2" in analyte_udfs.keys():
flour_int_2 = result_file.udf["Fluorescence intensity 2"]
else:
flour_int_2 = None
if "Fluorescence intensity 1" in analyte_udfs.keys():
flour_int_1 = result_file.udf["Fluorescence intensity 1"]
else:
flour_int_1 = None
if flour_int_1 or flour_int_2:
if (flour_int_1 >= treshold) or (flour_int_2 >= treshold):
result_file.udf["Intensity check"] = "Saturated"
result_file.qc_flag = "FAILED"
self.saturated +=1
else:
result_file.udf["Intensity check"] = "OK"
result_file.qc_flag = "PASSED"
if flour_int_1 and flour_int_2:
procent_CV = np.true_divide(np.std([flour_int_1, flour_int_2]),
np.mean([flour_int_1, flour_int_2]))
result_file.udf["%CV"] = procent_CV
if procent_CV >= allowed_dupl:
result_file.qc_flag = "FAILED"
self.hig_CV_fract +=1
set_field(result_file)
else:
self.flour_int_missing +=1
if result_file.qc_flag == "FAILED":
self.no_failed +=1
def main(lims, pid, epp_logger):
process = Process(lims,id = pid)
QiT = QunatiT(process)
requiered_udfs = set(["Saturation threshold of fluorescence intensity",
"Allowed %CV of duplicates"])
if requiered_udfs.issubset(QiT.udfs.keys()):
treshold = QiT.udfs["Saturation threshold of fluorescence intensity"]
allowed_dupl = QiT.udfs["Allowed %CV of duplicates"]
for result_file in process.result_files():
QiT.assign_QC_flag(result_file, treshold, allowed_dupl)
else:
QiT.missing_udfs.append(requiered_udfs)
if QiT.missing_udfs:
missing_udfs = ', '.join(QiT.missing_udfs)
QiT.abstract.append("Some of the folowing requiered udfs seems to be missing: {0}.".format(missing_udfs))
if QiT.flour_int_missing:
QiT.abstract.append("Fluorescence intensity is missing for {0} samples.".format(QiT.flour_int_missing))
QiT.abstract.append("{0} out of {1} samples failed QC. ".format(QiT.no_failed, len(process.result_files())))
if QiT.saturated:
QiT.abstract.append("{0} samples failed due to saturated fluorescence intensity.".format(QiT.saturated))
if QiT.hig_CV_fract:
QiT.abstract.append("{0} samples failed due to high %CV.".format(QiT.hig_CV_fract))
QiT.abstract = list(set(QiT.abstract))
print >> sys.stderr, ' '.join(QiT.abstract)
if __name__ == "__main__":
parser = ArgumentParser(description=DESC)
parser.add_argument('--pid', default = None , dest = 'pid',
help='Lims id for current Process')
parser.add_argument('--log', dest = 'log',
help=('File name for standard log file, '
'for runtime information and problems.'))
args = parser.parse_args()
lims = Lims(BASEURI,USERNAME,PASSWORD)
lims.check_version()
with EppLogger(log_file=args.log, lims=lims, prepend=True) as epp_logger:
main(lims, args.pid, epp_logger)
adding conc
#!/usr/bin/env python
DESC = """EPP script for Quant-iT mesurements to verify standards, calculate
concentrations and load input artifact-udfs and output file-udfs of the process
with concentation values and fluorescence intensity
Reads from:
--Lims fields--
"Saturation threshold of fluorescence intensity" process udf
"Allowed %CV of duplicates" process udf
"Fluorescence intensity 1" udf of input analytes to the process
"Fluorescence intensity 2" udf of input analytes to the process
--files--
"Standards File (.txt)" "shared result file" uploaded by user.
"Quant-iT Result File 1" "shared result file" uploaded by user.
"Quant-iT Result File 2" "shared result file" uploaded by user. (optional)
Writes to:
--Lims fields--
"Intensity check" udf of process artifacts (result file)
"%CV" udf of process artifacts (result file)
"QC" qc-flag of process artifacts (result file)
Logging:
The script outputs a regular log file with regular execution information.
1) compares the udfs "Fluorescence intensity 1" and "Fluorescence intensity 2"
with the Saturation threshold of fluorescence intensity. If either of these two
udfs >= Saturation threshold of fluorescence intensity, assign "Saturated" to
the udf "Intensity check" and assign "Fail" to the sample. Otherwise assign
"OK" to the analyte "Intensity check".
2) For a sample with duplicate measurements, "%CV" is calculated by the formula:
%CV= (SD of "Fluorescence intensity 1" and "Fluorescence intensity 2")/(Mean of
"Fluorescence intensity 1" and ""Fluorescence intensity 2).
Copy the values to the sample analyte "%CV".
3) If "%CV" >= Allowed %CV of duplicates, assign "Fail" to the sample.
4) For a sample with only one measurement, if it passes in step 2, a "Pass" should
be assigned to the QC flag. For a sample with duplicate measurements, if it passes
both step 2 and step 4, a "Pass" should be assigned to the QC flag.
Written by Maya Brandi
"""
import os
import sys
import logging
import numpy as np
from argparse import ArgumentParser
from requests import HTTPError
from genologics.lims import Lims
from genologics.config import BASEURI,USERNAME,PASSWORD
from genologics.entities import Process
from genologics.epp import EppLogger
from genologics.epp import set_field
from genologics.epp import ReadResultFiles
lims = Lims(BASEURI,USERNAME,PASSWORD)
class QunatiT():
def __init__(self, process):
self.udfs = dict(process.udf.items())
self.requiered_udfs = set(["Allowed %CV of duplicates",
"Saturation threshold of fluorescence intensity",
"Minimum required concentration (ng/ul)"])
self.abstract = []
self.missing_udfs = []
self.hig_CV_fract = 0
self.saturated = 0
self.low_conc
self.flour_int_missing = 0
self.no_failed = 0
def saturation_QC(self, result_file, udfs):
treshold = self.udfs["Saturation threshold of fluorescence intensity"]
allowed_dupl = self.udfs["Allowed %CV of duplicates"]
fint_2 = udfs["Fluorescence intensity 2"] if udfs.has_key("Fluorescence intensity 2") else None
fint_1 = udfs["Fluorescence intensity 1"] if udfs.has_key("Fluorescence intensity 1") else None
if fint_1 or fint_2:
qc_flag = "PASSED"
if (fint_1 >= treshold) or (fint_2 >= treshold):
result_file.udf["Intensity check"] = "Saturated"
qc_flag = "FAILED"
self.saturated +=1
else:
result_file.udf["Intensity check"] = "OK"
if fint_1 and fint_2:
std = np.std([fint_1, fint_2]
mean = np.mean([fint_1, fint_2])
procent_CV = np.true_divide(std,mean)
result_file.udf["%CV"] = procent_CV
if procent_CV >= allowed_dupl:
qc_flag = "FAILED"
self.hig_CV_fract +=1
return qc_flag
else:
self.flour_int_missing +=1
return None
def concentration_QC(self, result_file, result_file_udfs):
min_conc = self.udfs["Minimum required concentration (ng/ul)"]
if result_file_udfs['Concentration'] < min_conc:
return "FAILED"
self.low_conc +=1
else:
return "PASSED"
def assign_QC_flag(self, process):
analyte_udfs = dict(result_file.udf.items())
if self.requiered_udfs.issubset(self.udfs.keys()):
for result_file in process.result_files():
result_file_udfs = dict(result_file.udf.items())
QC = self.concentration_QC(result_file, result_file_udfs)
QC = self.saturation_QC(result_file, result_file_udfs)
self.no_failed +=1 if QC == "FAILED" else 0
if QC:
result_file.qc_flagg = QC
set_field(result_file)
else:
QiT.missing_udfs.append(requiered_udfs)
def main(lims, pid, epp_logger):
process = Process(lims,id = pid)
QiT = QunatiT(process)
QiT.assign_QC_flag()
if QiT.missing_udfs:
missing_udfs = ', '.join(QiT.missing_udfs)
QiT.abstract.append("Some of the folowing requiered udfs seems to be missing: {0}.".format(missing_udfs))
if QiT.flour_int_missing:
QiT.abstract.append("Fluorescence intensity is missing for {0} samples.".format(QiT.flour_int_missing))
QiT.abstract.append("{0} out of {1} samples failed QC. ".format(QiT.no_failed, len(process.result_files())))
if QiT.saturated:
QiT.abstract.append("{0} samples had saturated fluorescence intensity.".format(QiT.saturated))
if QiT.hig_CV_fract:
QiT.abstract.append("{0} samples had high %CV.".format(QiT.hig_CV_fract))
if QiT.low_conc:
QiT.abstract.append("{0} samples had high low concentration.".format(QiT.low_conc))
QiT.abstract = list(set(QiT.abstract))
print >> sys.stderr, ' '.join(QiT.abstract)
if __name__ == "__main__":
parser = ArgumentParser(description=DESC)
parser.add_argument('--pid', default = None , dest = 'pid',
help='Lims id for current Process')
parser.add_argument('--log', dest = 'log',
help=('File name for standard log file, '
'for runtime information and problems.'))
args = parser.parse_args()
lims = Lims(BASEURI,USERNAME,PASSWORD)
lims.check_version()
with EppLogger(log_file=args.log, lims=lims, prepend=True) as epp_logger:
main(lims, args.pid, epp_logger)
|
"""
Renderers
"""
import copy
from collections import OrderedDict
import inflection
from django.db.models import Manager, QuerySet
from django.utils import six, encoding
from rest_framework import relations
from rest_framework import renderers
from rest_framework.serializers import BaseSerializer, Serializer, ListSerializer
from rest_framework.settings import api_settings
from . import utils
class JSONRenderer(renderers.JSONRenderer):
"""
Render a JSON response per the JSON API spec:
{
"data": [{
"type": "companies",
"id": 1,
"attributes": {
"name": "Mozilla",
"slug": "mozilla",
"date-created": "2014-03-13 16:33:37"
}
}, {
"type": "companies",
"id": 2,
...
}]
}
"""
media_type = 'application/vnd.api+json'
format = 'vnd.api+json'
@classmethod
def extract_attributes(cls, fields, resource):
data = OrderedDict()
for field_name, field in six.iteritems(fields):
# ID is always provided in the root of JSON API so remove it from attributes
if field_name == 'id':
continue
# don't output a key for write only fields
if fields[field_name].write_only:
continue
# Skip fields with relations
if isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
# Skip read_only attribute fields when `resource` is an empty
# serializer. Prevents the "Raw Data" form of the browsable API
# from rendering `"foo": null` for read only fields
try:
resource[field_name]
except KeyError:
if fields[field_name].read_only:
continue
data.update({
field_name: resource.get(field_name)
})
return utils.format_keys(data)
@classmethod
def extract_relationships(cls, fields, resource, resource_instance):
# Avoid circular deps
from rest_framework_json_api.relations import ResourceRelatedField
data = OrderedDict()
# Don't try to extract relationships from a non-existent resource
if resource_instance is None:
return
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
if field.write_only:
continue
# Skip fields without relations
if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
source = field.source
relation_type = utils.get_related_resource_type(field)
if isinstance(field, relations.HyperlinkedIdentityField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
# special case for HyperlinkedIdentityField
relation_data = list()
# Don't try to query an empty relation
relation_queryset = relation_instance \
if relation_instance is not None else list()
for related_object in relation_queryset:
relation_data.append(
OrderedDict([('type', relation_type), ('id', encoding.force_text(related_object.pk))])
)
data.update({field_name: {
'links': {
"related": resource.get(field_name)},
'data': relation_data,
'meta': {
'count': len(relation_data)
}
}})
continue
if isinstance(field, ResourceRelatedField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
# special case for ResourceRelatedField
relation_data = {}
field_data = resource.get(field_name)
if field_data:
relation_data['data'] = field_data
field_links = field.get_links(resource_instance)
relation_data.update(
{'links': field_links}
if field_links else dict()
)
data.update({field_name: relation_data})
continue
if isinstance(field, (relations.PrimaryKeyRelatedField, relations.HyperlinkedRelatedField)):
resolved, relation = utils.get_relation_instance(resource_instance, '%s_id' % source, field.parent)
if not resolved:
continue
relation_id = relation if resource.get(field_name) else None
relation_data = {
'data': (
OrderedDict([('type', relation_type), ('id', encoding.force_text(relation_id))])
if relation_id is not None else None)
}
relation_data.update(
{'links': {'related': resource.get(field_name)}}
if isinstance(field, relations.HyperlinkedRelatedField) and resource.get(field_name) else dict()
)
data.update({field_name: relation_data})
continue
if isinstance(field, relations.ManyRelatedField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
if isinstance(field.child_relation, ResourceRelatedField):
# special case for ResourceRelatedField
relation_data = {
'data': resource.get(field_name)
}
field_links = field.child_relation.get_links(resource_instance)
relation_data.update(
{'links': field_links}
if field_links else dict()
)
relation_data.update(
{
'meta': {
'count': len(resource.get(field_name))
}
}
)
data.update({field_name: relation_data})
continue
relation_data = list()
for nested_resource_instance in relation_instance:
nested_resource_instance_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
relation_data.append(OrderedDict([
('type', nested_resource_instance_type),
('id', encoding.force_text(nested_resource_instance.pk))
]))
data.update({
field_name: {
'data': relation_data,
'meta': {
'count': len(relation_data)
}
}
})
continue
if isinstance(field, ListSerializer):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
relation_data = list()
serializer_data = resource.get(field_name)
resource_instance_queryset = list(relation_instance)
if isinstance(serializer_data, list):
for position in range(len(serializer_data)):
nested_resource_instance = resource_instance_queryset[position]
nested_resource_instance_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
relation_data.append(OrderedDict([
('type', nested_resource_instance_type),
('id', encoding.force_text(nested_resource_instance.pk))
]))
data.update({field_name: {'data': relation_data}})
continue
if isinstance(field, Serializer):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
data.update({
field_name: {
'data': (
OrderedDict([
('type', relation_type),
('id', encoding.force_text(relation_instance.pk))
]) if resource.get(field_name) else None)
}
})
continue
return utils.format_keys(data)
@classmethod
def extract_included(cls, fields, resource, resource_instance, included_resources):
# Avoid circular deps
from rest_framework_json_api.relations import ResourceRelatedField, SerializerMethodResourceRelatedField
# this function may be called with an empty record (example: Browsable Interface)
if not resource_instance:
return
included_data = list()
current_serializer = fields.serializer
context = current_serializer.context
included_serializers = utils.get_included_serializers(current_serializer)
included_resources = copy.copy(included_resources)
included_resources = [inflection.underscore(value) for value in included_resources]
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
# Skip fields without relations or serialized data
if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
try:
included_resources.remove(field_name)
except ValueError:
# Skip fields not in requested included resources
# If no child field, directly continue with the next field
if field_name not in [node.split('.')[0] for node in included_resources]:
continue
try:
if isinstance(field, SerializerMethodResourceRelatedField):
serializer_method = getattr(current_serializer, field.source)
relation_instance = serializer_method(resource_instance)
elif isinstance(field, ResourceRelatedField):
resource_instance_source = getattr(resource_instance, field.source)
if callable(resource_instance_source):
relation_instance = resource_instance_source()
else:
relation_instance = resource_instance_source
else:
relation_instance = getattr(resource_instance, field_name)
except AttributeError:
try:
# For ManyRelatedFields if `related_name` is not set we need to access `foo_set` from `source`
relation_instance = getattr(resource_instance, field.child_relation.source)
except AttributeError:
continue
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
new_included_resources = [key.replace('%s.' % field_name, '', 1)
for key in included_resources
if field_name == key.split('.')[0]]
serializer_data = resource.get(field_name)
if isinstance(field, relations.ManyRelatedField):
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=True, context=context)
serializer_data = field.data
if isinstance(field, relations.RelatedField):
if relation_instance is None:
continue
many = field._kwargs.get('child_relation', None) is not None
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=many, context=context)
serializer_data = field.data
if isinstance(field, ListSerializer):
serializer = field.child
relation_type = utils.get_resource_type_from_serializer(serializer)
relation_queryset = list(relation_instance)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(serializer)
if serializer_data:
for position in range(len(serializer_data)):
serializer_resource = serializer_data[position]
nested_resource_instance = relation_queryset[position]
resource_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_resource, nested_resource_instance, resource_type
)
)
included_data.extend(
cls.extract_included(
serializer_fields, serializer_resource, nested_resource_instance, new_included_resources
)
)
if isinstance(field, Serializer):
relation_type = utils.get_resource_type_from_serializer(field)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(field)
if serializer_data:
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_data,
relation_instance, relation_type)
)
included_data.extend(
cls.extract_included(
serializer_fields, serializer_data, relation_instance, new_included_resources
)
)
return utils.format_keys(included_data)
@classmethod
def extract_meta(cls, serializer, resource):
if hasattr(serializer, 'child'):
meta = getattr(serializer.child, 'Meta', None)
else:
meta = getattr(serializer, 'Meta', None)
meta_fields = getattr(meta, 'meta_fields', [])
data = OrderedDict()
for field_name in meta_fields:
data.update({
field_name: resource.get(field_name)
})
return data
@classmethod
def extract_root_meta(cls, serializer, resource):
many = False
if hasattr(serializer, 'child'):
many = True
serializer = serializer.child
data = {}
if getattr(serializer, 'get_root_meta', None):
json_api_meta = serializer.get_root_meta(resource, many)
assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'
data.update(json_api_meta)
return data
@classmethod
def build_json_resource_obj(cls, fields, resource, resource_instance, resource_name):
resource_data = [
('type', resource_name),
('id', encoding.force_text(resource_instance.pk) if resource_instance else None),
('attributes', cls.extract_attributes(fields, resource)),
]
relationships = cls.extract_relationships(fields, resource, resource_instance)
if relationships:
resource_data.append(('relationships', relationships))
# Add 'self' link if field is present and valid
if api_settings.URL_FIELD_NAME in resource and \
isinstance(fields[api_settings.URL_FIELD_NAME], relations.RelatedField):
resource_data.append(('links', {'self': resource[api_settings.URL_FIELD_NAME]}))
return OrderedDict(resource_data)
def render_relationship_view(self, data, accepted_media_type=None, renderer_context=None):
# Special case for RelationshipView
view = renderer_context.get("view", None)
render_data = OrderedDict([
('data', data)
])
links = view.get_links()
if links:
render_data.update({'links': links}),
return super(JSONRenderer, self).render(
render_data, accepted_media_type, renderer_context
)
def render_errors(self, data, accepted_media_type=None, renderer_context=None):
return super(JSONRenderer, self).render(
utils.format_errors(data), accepted_media_type, renderer_context
)
def render(self, data, accepted_media_type=None, renderer_context=None):
view = renderer_context.get("view", None)
request = renderer_context.get("request", None)
# Get the resource name.
resource_name = utils.get_resource_name(renderer_context)
# If this is an error response, skip the rest.
if resource_name == 'errors':
return self.render_errors(data, accepted_media_type, renderer_context)
from rest_framework_json_api.views import RelationshipView
if isinstance(view, RelationshipView):
return self.render_relationship_view(data, accepted_media_type, renderer_context)
# If `resource_name` is set to None then render default as the dev
# wants to build the output format manually.
if resource_name is None or resource_name is False:
return super(JSONRenderer, self).render(
data, accepted_media_type, renderer_context
)
json_api_data = data
json_api_included = list()
# initialize json_api_meta with pagination meta or an empty dict
json_api_meta = data.get('meta', {}) if isinstance(data, dict) else {}
if data and 'results' in data:
serializer_data = data["results"]
else:
serializer_data = data
serializer = getattr(serializer_data, 'serializer', None)
included_resources = utils.get_included_resources(request, serializer)
if serializer is not None:
# Get the serializer fields
fields = utils.get_serializer_fields(serializer)
# Extract root meta for any type of serializer
json_api_meta.update(self.extract_root_meta(serializer, serializer_data))
if getattr(serializer, 'many', False):
json_api_data = list()
for position in range(len(serializer_data)):
resource = serializer_data[position] # Get current resource
resource_instance = serializer.instance[position] # Get current instance
json_resource_obj = self.build_json_resource_obj(fields, resource, resource_instance, resource_name)
meta = self.extract_meta(serializer, resource)
if meta:
json_resource_obj.update({'meta': utils.format_keys(meta)})
json_api_data.append(json_resource_obj)
included = self.extract_included(fields, resource, resource_instance, included_resources)
if included:
json_api_included.extend(included)
else:
resource_instance = serializer.instance
json_api_data = self.build_json_resource_obj(fields, serializer_data, resource_instance, resource_name)
meta = self.extract_meta(serializer, serializer_data)
if meta:
json_api_data.update({'meta': utils.format_keys(meta)})
included = self.extract_included(fields, serializer_data, resource_instance, included_resources)
if included:
json_api_included.extend(included)
# Make sure we render data in a specific order
render_data = OrderedDict()
if isinstance(data, dict) and data.get('links'):
render_data['links'] = data.get('links')
# format the api root link list
if view.__class__ and view.__class__.__name__ == 'APIRoot':
render_data['data'] = None
render_data['links'] = json_api_data
else:
render_data['data'] = json_api_data
if len(json_api_included) > 0:
# Iterate through compound documents to remove duplicates
seen = set()
unique_compound_documents = list()
for included_dict in json_api_included:
type_tuple = tuple((included_dict['type'], included_dict['id']))
if type_tuple not in seen:
seen.add(type_tuple)
unique_compound_documents.append(included_dict)
# Sort the items by type then by id
render_data['included'] = sorted(unique_compound_documents, key=lambda item: (item['type'], item['id']))
if json_api_meta:
render_data['meta'] = utils.format_keys(json_api_meta)
return super(JSONRenderer, self).render(
render_data, accepted_media_type, renderer_context
)
Ignore errors on determining a type for many related fields that are
sourced from a function
"""
Renderers
"""
import copy
from collections import OrderedDict
import inflection
from django.db.models import Manager, QuerySet
from django.utils import six, encoding
from rest_framework import relations
from rest_framework import renderers
from rest_framework.serializers import BaseSerializer, Serializer, ListSerializer
from rest_framework.settings import api_settings
from . import utils
class JSONRenderer(renderers.JSONRenderer):
"""
Render a JSON response per the JSON API spec:
{
"data": [{
"type": "companies",
"id": 1,
"attributes": {
"name": "Mozilla",
"slug": "mozilla",
"date-created": "2014-03-13 16:33:37"
}
}, {
"type": "companies",
"id": 2,
...
}]
}
"""
media_type = 'application/vnd.api+json'
format = 'vnd.api+json'
@classmethod
def extract_attributes(cls, fields, resource):
data = OrderedDict()
for field_name, field in six.iteritems(fields):
# ID is always provided in the root of JSON API so remove it from attributes
if field_name == 'id':
continue
# don't output a key for write only fields
if fields[field_name].write_only:
continue
# Skip fields with relations
if isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
# Skip read_only attribute fields when `resource` is an empty
# serializer. Prevents the "Raw Data" form of the browsable API
# from rendering `"foo": null` for read only fields
try:
resource[field_name]
except KeyError:
if fields[field_name].read_only:
continue
data.update({
field_name: resource.get(field_name)
})
return utils.format_keys(data)
@classmethod
def extract_relationships(cls, fields, resource, resource_instance):
# Avoid circular deps
from rest_framework_json_api.relations import ResourceRelatedField
data = OrderedDict()
# Don't try to extract relationships from a non-existent resource
if resource_instance is None:
return
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
if field.write_only:
continue
# Skip fields without relations
if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
source = field.source
# Pretty big hack, we don't actually care about the relation type in the case of ManyRelatedField
try:
relation_type = utils.get_related_resource_type(field)
except:
relation_type = None
if isinstance(field, relations.HyperlinkedIdentityField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
# special case for HyperlinkedIdentityField
relation_data = list()
# Don't try to query an empty relation
relation_queryset = relation_instance \
if relation_instance is not None else list()
for related_object in relation_queryset:
relation_data.append(
OrderedDict([('type', relation_type), ('id', encoding.force_text(related_object.pk))])
)
data.update({field_name: {
'links': {
"related": resource.get(field_name)},
'data': relation_data,
'meta': {
'count': len(relation_data)
}
}})
continue
if isinstance(field, ResourceRelatedField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
# special case for ResourceRelatedField
relation_data = {}
field_data = resource.get(field_name)
if field_data:
relation_data['data'] = field_data
field_links = field.get_links(resource_instance)
relation_data.update(
{'links': field_links}
if field_links else dict()
)
data.update({field_name: relation_data})
continue
if isinstance(field, (relations.PrimaryKeyRelatedField, relations.HyperlinkedRelatedField)):
resolved, relation = utils.get_relation_instance(resource_instance, '%s_id' % source, field.parent)
if not resolved:
continue
relation_id = relation if resource.get(field_name) else None
relation_data = {
'data': (
OrderedDict([('type', relation_type), ('id', encoding.force_text(relation_id))])
if relation_id is not None else None)
}
relation_data.update(
{'links': {'related': resource.get(field_name)}}
if isinstance(field, relations.HyperlinkedRelatedField) and resource.get(field_name) else dict()
)
data.update({field_name: relation_data})
continue
if isinstance(field, relations.ManyRelatedField):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
if isinstance(field.child_relation, ResourceRelatedField):
# special case for ResourceRelatedField
relation_data = {
'data': resource.get(field_name)
}
field_links = field.child_relation.get_links(resource_instance)
relation_data.update(
{'links': field_links}
if field_links else dict()
)
relation_data.update(
{
'meta': {
'count': len(resource.get(field_name))
}
}
)
data.update({field_name: relation_data})
continue
relation_data = list()
for nested_resource_instance in relation_instance:
nested_resource_instance_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
relation_data.append(OrderedDict([
('type', nested_resource_instance_type),
('id', encoding.force_text(nested_resource_instance.pk))
]))
data.update({
field_name: {
'data': relation_data,
'meta': {
'count': len(relation_data)
}
}
})
continue
if isinstance(field, ListSerializer):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
relation_data = list()
serializer_data = resource.get(field_name)
resource_instance_queryset = list(relation_instance)
if isinstance(serializer_data, list):
for position in range(len(serializer_data)):
nested_resource_instance = resource_instance_queryset[position]
nested_resource_instance_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
relation_data.append(OrderedDict([
('type', nested_resource_instance_type),
('id', encoding.force_text(nested_resource_instance.pk))
]))
data.update({field_name: {'data': relation_data}})
continue
if isinstance(field, Serializer):
resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
if not resolved:
continue
data.update({
field_name: {
'data': (
OrderedDict([
('type', relation_type),
('id', encoding.force_text(relation_instance.pk))
]) if resource.get(field_name) else None)
}
})
continue
return utils.format_keys(data)
@classmethod
def extract_included(cls, fields, resource, resource_instance, included_resources):
# Avoid circular deps
from rest_framework_json_api.relations import ResourceRelatedField, SerializerMethodResourceRelatedField
# this function may be called with an empty record (example: Browsable Interface)
if not resource_instance:
return
included_data = list()
current_serializer = fields.serializer
context = current_serializer.context
included_serializers = utils.get_included_serializers(current_serializer)
included_resources = copy.copy(included_resources)
included_resources = [inflection.underscore(value) for value in included_resources]
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
# Skip fields without relations or serialized data
if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
try:
included_resources.remove(field_name)
except ValueError:
# Skip fields not in requested included resources
# If no child field, directly continue with the next field
if field_name not in [node.split('.')[0] for node in included_resources]:
continue
try:
if isinstance(field, SerializerMethodResourceRelatedField):
serializer_method = getattr(current_serializer, field.source)
relation_instance = serializer_method(resource_instance)
elif isinstance(field, ResourceRelatedField):
resource_instance_source = getattr(resource_instance, field.source)
if callable(resource_instance_source):
relation_instance = resource_instance_source()
else:
relation_instance = resource_instance_source
else:
relation_instance = getattr(resource_instance, field_name)
except AttributeError:
try:
# For ManyRelatedFields if `related_name` is not set we need to access `foo_set` from `source`
relation_instance = getattr(resource_instance, field.child_relation.source)
except AttributeError:
continue
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
new_included_resources = [key.replace('%s.' % field_name, '', 1)
for key in included_resources
if field_name == key.split('.')[0]]
serializer_data = resource.get(field_name)
if isinstance(field, relations.ManyRelatedField):
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=True, context=context)
serializer_data = field.data
if isinstance(field, relations.RelatedField):
if relation_instance is None:
continue
many = field._kwargs.get('child_relation', None) is not None
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=many, context=context)
serializer_data = field.data
if isinstance(field, ListSerializer):
serializer = field.child
relation_type = utils.get_resource_type_from_serializer(serializer)
relation_queryset = list(relation_instance)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(serializer)
if serializer_data:
for position in range(len(serializer_data)):
serializer_resource = serializer_data[position]
nested_resource_instance = relation_queryset[position]
resource_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_resource, nested_resource_instance, resource_type
)
)
included_data.extend(
cls.extract_included(
serializer_fields, serializer_resource, nested_resource_instance, new_included_resources
)
)
if isinstance(field, Serializer):
relation_type = utils.get_resource_type_from_serializer(field)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(field)
if serializer_data:
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_data,
relation_instance, relation_type)
)
included_data.extend(
cls.extract_included(
serializer_fields, serializer_data, relation_instance, new_included_resources
)
)
return utils.format_keys(included_data)
@classmethod
def extract_meta(cls, serializer, resource):
if hasattr(serializer, 'child'):
meta = getattr(serializer.child, 'Meta', None)
else:
meta = getattr(serializer, 'Meta', None)
meta_fields = getattr(meta, 'meta_fields', [])
data = OrderedDict()
for field_name in meta_fields:
data.update({
field_name: resource.get(field_name)
})
return data
@classmethod
def extract_root_meta(cls, serializer, resource):
many = False
if hasattr(serializer, 'child'):
many = True
serializer = serializer.child
data = {}
if getattr(serializer, 'get_root_meta', None):
json_api_meta = serializer.get_root_meta(resource, many)
assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'
data.update(json_api_meta)
return data
@classmethod
def build_json_resource_obj(cls, fields, resource, resource_instance, resource_name):
resource_data = [
('type', resource_name),
('id', encoding.force_text(resource_instance.pk) if resource_instance else None),
('attributes', cls.extract_attributes(fields, resource)),
]
relationships = cls.extract_relationships(fields, resource, resource_instance)
if relationships:
resource_data.append(('relationships', relationships))
# Add 'self' link if field is present and valid
if api_settings.URL_FIELD_NAME in resource and \
isinstance(fields[api_settings.URL_FIELD_NAME], relations.RelatedField):
resource_data.append(('links', {'self': resource[api_settings.URL_FIELD_NAME]}))
return OrderedDict(resource_data)
def render_relationship_view(self, data, accepted_media_type=None, renderer_context=None):
# Special case for RelationshipView
view = renderer_context.get("view", None)
render_data = OrderedDict([
('data', data)
])
links = view.get_links()
if links:
render_data.update({'links': links}),
return super(JSONRenderer, self).render(
render_data, accepted_media_type, renderer_context
)
def render_errors(self, data, accepted_media_type=None, renderer_context=None):
return super(JSONRenderer, self).render(
utils.format_errors(data), accepted_media_type, renderer_context
)
def render(self, data, accepted_media_type=None, renderer_context=None):
view = renderer_context.get("view", None)
request = renderer_context.get("request", None)
# Get the resource name.
resource_name = utils.get_resource_name(renderer_context)
# If this is an error response, skip the rest.
if resource_name == 'errors':
return self.render_errors(data, accepted_media_type, renderer_context)
from rest_framework_json_api.views import RelationshipView
if isinstance(view, RelationshipView):
return self.render_relationship_view(data, accepted_media_type, renderer_context)
# If `resource_name` is set to None then render default as the dev
# wants to build the output format manually.
if resource_name is None or resource_name is False:
return super(JSONRenderer, self).render(
data, accepted_media_type, renderer_context
)
json_api_data = data
json_api_included = list()
# initialize json_api_meta with pagination meta or an empty dict
json_api_meta = data.get('meta', {}) if isinstance(data, dict) else {}
if data and 'results' in data:
serializer_data = data["results"]
else:
serializer_data = data
serializer = getattr(serializer_data, 'serializer', None)
included_resources = utils.get_included_resources(request, serializer)
if serializer is not None:
# Get the serializer fields
fields = utils.get_serializer_fields(serializer)
# Extract root meta for any type of serializer
json_api_meta.update(self.extract_root_meta(serializer, serializer_data))
if getattr(serializer, 'many', False):
json_api_data = list()
for position in range(len(serializer_data)):
resource = serializer_data[position] # Get current resource
resource_instance = serializer.instance[position] # Get current instance
json_resource_obj = self.build_json_resource_obj(fields, resource, resource_instance, resource_name)
meta = self.extract_meta(serializer, resource)
if meta:
json_resource_obj.update({'meta': utils.format_keys(meta)})
json_api_data.append(json_resource_obj)
included = self.extract_included(fields, resource, resource_instance, included_resources)
if included:
json_api_included.extend(included)
else:
resource_instance = serializer.instance
json_api_data = self.build_json_resource_obj(fields, serializer_data, resource_instance, resource_name)
meta = self.extract_meta(serializer, serializer_data)
if meta:
json_api_data.update({'meta': utils.format_keys(meta)})
included = self.extract_included(fields, serializer_data, resource_instance, included_resources)
if included:
json_api_included.extend(included)
# Make sure we render data in a specific order
render_data = OrderedDict()
if isinstance(data, dict) and data.get('links'):
render_data['links'] = data.get('links')
# format the api root link list
if view.__class__ and view.__class__.__name__ == 'APIRoot':
render_data['data'] = None
render_data['links'] = json_api_data
else:
render_data['data'] = json_api_data
if len(json_api_included) > 0:
# Iterate through compound documents to remove duplicates
seen = set()
unique_compound_documents = list()
for included_dict in json_api_included:
type_tuple = tuple((included_dict['type'], included_dict['id']))
if type_tuple not in seen:
seen.add(type_tuple)
unique_compound_documents.append(included_dict)
# Sort the items by type then by id
render_data['included'] = sorted(unique_compound_documents, key=lambda item: (item['type'], item['id']))
if json_api_meta:
render_data['meta'] = utils.format_keys(json_api_meta)
return super(JSONRenderer, self).render(
render_data, accepted_media_type, renderer_context
)
|
# register_brain.py
import numpy as np
import requests
import tifffile as tf
import sys
import os
import ndreg
from ndreg import preprocessor, util
import SimpleITK as sitk
import numpy as np
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import skimage
import argparse
import time
#from ndpull import ndpull
import configparser
from configparser import ConfigParser
dimension = 3
vectorComponentType = sitk.sitkFloat32
vectorType = sitk.sitkVectorFloat32
affine = sitk.AffineTransform(dimension)
identityAffine = list(affine.GetParameters())
identityDirection = identityAffine[0:9]
zeroOrigin = [0] * dimension
zeroIndex = [0] * dimension
ndToSitkDataTypes = {'uint8': sitk.sitkUInt8,
'uint16': sitk.sitkUInt16,
'uint32': sitk.sitkUInt32,
'float32': sitk.sitkFloat32,
'uint64': sitk.sitkUInt64}
sitkToNpDataTypes = {sitk.sitkUInt8: np.uint8,
sitk.sitkUInt16: np.uint16,
sitk.sitkUInt32: np.uint32,
sitk.sitkInt8: np.int8,
sitk.sitkInt16: np.int16,
sitk.sitkInt32: np.int32,
sitk.sitkFloat32: np.float32,
sitk.sitkFloat64: np.float64,
}
# Boss Stuff:
def setup_experiment_boss(remote, collection, experiment):
"""
Get experiment and coordinate frame information from the boss.
"""
exp_setup = ExperimentResource(experiment, collection)
try:
exp_actual = remote.get_project(exp_setup)
coord_setup = CoordinateFrameResource(exp_actual.coord_frame)
coord_actual = remote.get_project(coord_setup)
return (exp_setup, coord_actual)
except HTTPError as e:
print(e.message)
def setup_channel_boss(
remote,
collection,
experiment,
channel,
channel_type='image',
datatype='uint16'):
(exp_setup, coord_actual) = setup_experiment_boss(
remote, collection, experiment)
chan_setup = ChannelResource(
channel,
collection,
experiment,
channel_type,
datatype=datatype)
try:
chan_actual = remote.get_project(chan_setup)
return (exp_setup, coord_actual, chan_actual)
except HTTPError as e:
print(e.message)
# Note: The following functions assume an anisotropic dataset. This is generally a bad assumption. These
# functions are stopgaps until proper coordinate frame at resulution
# support exists in intern.
def get_xyz_extents(rmt, ch_rsc, res=0, iso=True):
boss_url = 'https://api.boss.neurodata.io/v1/'
ds = boss_url + \
'/downsample/{}?iso={}'.format(ch_rsc.get_cutout_route(), iso)
headers = {'Authorization': 'Token ' + rmt.token_project}
r_ds = requests.get(ds, headers=headers)
response = r_ds.json()
x_range = [0, response['extent']['{}'.format(res)][0]]
y_range = [0, response['extent']['{}'.format(res)][1]]
z_range = [0, response['extent']['{}'.format(res)][2]]
spacing = response['voxel_size']['{}'.format(res)]
return (x_range, y_range, z_range, spacing)
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def get_image_size_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_stop / (2.**res)),
int(coord_frame.y_stop / (2.**res)),
int(coord_frame.z_stop / (2.**res)) if isotropic else coord_frame.z_stop]
def imgDownload_boss(
remote,
channel_resource,
coordinate_frame_resource,
resolution=0,
size=[],
start=[],
isotropic=False):
"""
Download image with given token from given server at given resolution.
If channel isn't specified the first channel is downloaded.
"""
# TODO: Fix size and start parameters
voxel_unit = coordinate_frame_resource.voxel_unit
voxel_units = ('nanometers', 'micrometers', 'millimeters', 'centimeters')
factor_divide = (1e-6, 1e-3, 1, 10)
fact_div = factor_divide[voxel_units.index(voxel_unit)]
spacingBoss = [
coordinate_frame_resource.x_voxel_size,
coordinate_frame_resource.y_voxel_size,
coordinate_frame_resource.z_voxel_size]
spacing = [x * fact_div for x in spacingBoss] # Convert spacing to mm
if isotropic:
spacing = [x * 2**resolution for x in spacing]
else:
spacing[0] = spacing[0] * 2**resolution
spacing[1] = spacing[1] * 2**resolution
# z spacing unchanged since not isotropic
if size == []:
size = get_image_size_boss(
coordinate_frame_resource, resolution, isotropic)
if start == []:
start = get_offset_boss(
coordinate_frame_resource, resolution, isotropic)
# if isotropic:
# x_range, y_range, z_range, spacing = get_xyz_extents(
# remote, channel_resource, res=resolution, iso=isotropic)
# size[2] = 200
# dataType = metadata['channels'][channel]['datatype']
dataType = channel_resource.datatype
# Download all image data from specified channel
array = remote.get_cutout(
channel_resource, resolution, [
start[0], size[0]], [
start[1], size[1]], [
start[2], size[2]])
# Cast downloaded image to server's data type
# convert numpy array to sitk image
img = sitk.Cast(sitk.GetImageFromArray(array), ndToSitkDataTypes[dataType])
# Reverse axes order
# img = sitk.PermuteAxesImageFilter().Execute(img,range(dimension-1,-1,-1))
img.SetDirection(identityDirection)
img.SetSpacing(spacing)
# Convert to 2D if only one slice
img = util.imgCollapseDimension(img)
return img
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def create_channel_resource(rmt, chan_name, coll_name, exp_name, type='image',
base_resolution=0, sources=[], datatype='uint16', new_channel=True):
channel_resource = ChannelResource(chan_name, coll_name, exp_name, type=type,
base_resolution=base_resolution, sources=sources, datatype=datatype)
if new_channel:
new_rsc = rmt.create_project(channel_resource)
return new_rsc
return channel_resource
def upload_to_boss(rmt, data, channel_resource, resolution=0):
Z_LOC = 0
size = data.shape
for i in range(0, data.shape[Z_LOC], 16):
last_z = i+16
if last_z > data.shape[Z_LOC]:
last_z = data.shape[Z_LOC]
print(resolution, [0, size[2]], [0, size[1]], [i, last_z])
rmt.create_cutout(channel_resource, resolution,
[0, size[2]], [0, size[1]], [i, last_z],
np.asarray(data[i:last_z,:,:], order='C'))
def download_ara(rmt, resolution, type='average'):
if resolution not in [10, 25, 50, 100]:
print('Please provide a resolution that is among the following: 10, 25, 50, 100')
return
REFERENCE_COLLECTION = 'ara_2016'
REFERENCE_EXPERIMENT = 'sagittal_{}um'.format(resolution)
REFERENCE_COORDINATE_FRAME = 'ara_2016_{}um'.format(resolution)
REFERENCE_CHANNEL = '{}_{}um'.format(type, resolution)
refImg = download_image(rmt, REFERENCE_COLLECTION, REFERENCE_EXPERIMENT, REFERENCE_CHANNEL, ara_res=resolution)
return refImg
def download_image(rmt, collection, experiment, channel, res=0, isotropic=True, ara_res=None):
(exp_resource, coord_resource, channel_resource) = setup_channel_boss(rmt, collection, experiment, channel)
img = imgDownload_boss(rmt, channel_resource, coord_resource, resolution=res, isotropic=isotropic)
return img
#def download_image(config_file, collection, experiment, channel, outdir, res=0, isotropic=False, full_extent=True):
# # conversion factor from mm to um
# um_to_mm = 1e-3
#
# # set up ndpull
# meta = ndpull.BossMeta(collection, experiment, channel, res=res)
# token, boss_url = ndpull.get_boss_config(config_file)
# rmt = ndpull.BossRemote(boss_url, token, meta)
# # get args
# args = ndpull.collect_input_args(collection, experiment, channel, config_file=config_file, outdir=outdir, res=res, iso=isotropic, full_extent=full_extent)
# result, rmt = ndpull.validate_args(args)
# # download slices
# img = ndpull.download_slices(result, rmt, return_numpy=True, threads=8)
# img_sitk = sitk.GetImageFromArray(img)
# coord_frame = rmt.get_coord_frame_metadata()
# scale_factor = 2.0 ** res
# vox_sizes = np.array([coord_frame['x_voxel_size'], coord_frame['y_voxel_size'], coord_frame['z_voxel_size']])
# vox_sizes *= scale_factor * um_to_mm
# img_sitk.SetSpacing(vox_sizes)
# return img_sitk
#
def main():
t_start_overall = time.time()
parser = argparse.ArgumentParser(description='Register a brain in the BOSS and upload it back in a new experiment.')
parser.add_argument('--collection', help='Name of collection to upload tif stack to', type=str)
parser.add_argument('--experiment', help='Name of experiment to upload tif stack to', type=str)
parser.add_argument('--channel', help='Name of channel to upload tif stack to. Default is new channel will be created unless otherwise specified. See --new_channel', type=str)
parser.add_argument('--image_orientation', help='Orientation of brain image. 3-letter orientation of brain. For example can be PIR: Posterior, Inferior, Right.', type=str)
parser.add_argument('--modality', help='The imaging modality the data were collected with. The options are either "colm" or "lavision"', type=str)
parser.add_argument('--outdir', help='set the directory in which you want to save the intermediates. default is ./{collection}_{experiment}_{channel}_reg', type=str, default=None)
parser.add_argument('--config', help='Path to configuration file with Boss API token. Default: ~/.intern/intern.cfg', default=os.path.expanduser('~/.intern/intern.cfg'))
args = parser.parse_args()
# mm to um conversion factor
mm_to_um = 1000.0
# download image
rmt = BossRemote(cfg_file_or_dict=args.config)
# resolution level from 0-6
if args.modality.lower() == 'colm':
resolution_image = 5
image_isotropic = False
else:
resolution_image = 3
image_isotropic = True
# resolution in microns
resolution_atlas = 50
# ensure outdir is default value if None
if args.outdir is None:
outdir = '{}_{}_{}_reg/'.format(args.collection, args.experiment, args.channel)
util.dir_make(outdir)
else: outdir = args.outdir
# downloading image
print('downloading experiment: {}, channel: {}...'.format(args.experiment, args.channel))
t1 = time.time()
img = download_image(rmt, args.collection, args.experiment, args.channel, res=resolution_image, isotropic=image_isotropic)
print("time to download image at res {} um: {} seconds".format(img.GetSpacing()[0] * mm_to_um, time.time()-t1))
# download atlas
print('downloading atlas...')
t1 = time.time()
atlas = download_ara(rmt, resolution_atlas, type='average')
print("time to download atlas at {} um: {} seconds".format(resolution_atlas, time.time()-t1))
print("preprocessing image")
img_p = preprocessor.preprocess_brain(img, atlas.GetSpacing(), args.modality, args.image_orientation)
img_p.SetOrigin((0.0,0.0,0.0))
print("preprocessing done!")
print("running registration")
assert(img_p.GetOrigin() == atlas.GetOrigin())
assert(img_p.GetDirection() == atlas.GetDirection())
assert(img_p.GetSpacing() == atlas.GetSpacing())
atlas_registered = ndreg.register_brain(atlas, img_p, outdir=outdir)
print("registration done")
end_time = time.time()
print("Overall time taken through all steps: {} seconds ({} minutes)".format(end_time - t_start_overall, (end_time - t_start_overall)/60.0))
# print("uploading annotations to the BOSS")
anno_channel = 'atlas'
source_channel = args.channel
ch_rsc_og = create_channel_resource(rmt, args.channel, args.collection, args.experiment, new_channel=False)
# # set up ndpull
# meta = ndpull.BossMeta(collection, experiment, channel)
# token, boss_url = ndpull.get_boss_config(config_file)
# rmt = ndpull.BossRemote(boss_url, token, meta)
# # end set up ndpull
print("loading atlas labels")
anno_10 = tf.imread('./ara_annotation_10um.tif')
anno_10 = sitk.GetImageFromArray(anno_10.astype('uint32'))
anno_10.SetSpacing((0.01, 0.01, 0.01))
trans = sitk.ReadTransform('{}/atlas_to_observed_affine.txt'.format(outdir))
field = util.imgRead('{}/lddmm/field.vtk'.format(outdir))
meta = get_xyz_extents(rmt, ch_rsc_og)
spacing = np.array(meta[-1])/mm_to_um
x_size = meta[0][1]
y_size = meta[1][1]
z_size = meta[2][1]
size = (x_size, y_size, z_size)
# need to reorient size to match atlas
# i am hard coding the size assuming
# the image is oriented LPS
size_r = (y_size, z_size, x_size)
print("applying affine transformation to atlas labels")
img_affine = ndreg.imgApplyAffine(anno_10, trans, spacing=spacing.tolist(), useNearest=True)
print(img_affine.GetSize())
print(img_affine.GetSpacing())
print("applying displacement field transformation to atlas labels")
img_lddmm = ndreg.imgApplyField(img_affine, field, spacing=spacing.tolist(),
size=size_r, useNearest=True)
print(img_lddmm.GetSize())
print(img_lddmm.GetSpacing())
# reorient annotations to match image
print("reorienting the labels to match the image")
img_lddmm_r = preprocessor.imgReorient(img_lddmm, 'pir', args.image_orientation)
# coll_reg = 'cell_detection'
ch_rsc_anno = create_channel_resource(rmt, anno_channel, args.collection, args.experiment, sources=source_channel, datatype='uint64', type='annotation')
print("uploading atlas labels to the BOSS")
anno = sitk.GetArrayFromImage(img_lddmm_r)
if anno.dtype != 'uint64':
anno = anno.astype('uint64')
upload_to_boss(rmt, anno, ch_rsc_anno)
if __name__ == "__main__":
main()
updated registration script
# register_brain.py
import numpy as np
import requests
import tifffile as tf
import sys
import os
import ndreg
from ndreg import preprocessor, util
import SimpleITK as sitk
import numpy as np
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import skimage
import argparse
import time
#from ndpull import ndpull
import configparser
from configparser import ConfigParser
dimension = 3
vectorComponentType = sitk.sitkFloat32
vectorType = sitk.sitkVectorFloat32
affine = sitk.AffineTransform(dimension)
identityAffine = list(affine.GetParameters())
identityDirection = identityAffine[0:9]
zeroOrigin = [0] * dimension
zeroIndex = [0] * dimension
ndToSitkDataTypes = {'uint8': sitk.sitkUInt8,
'uint16': sitk.sitkUInt16,
'uint32': sitk.sitkUInt32,
'float32': sitk.sitkFloat32,
'uint64': sitk.sitkUInt64}
sitkToNpDataTypes = {sitk.sitkUInt8: np.uint8,
sitk.sitkUInt16: np.uint16,
sitk.sitkUInt32: np.uint32,
sitk.sitkInt8: np.int8,
sitk.sitkInt16: np.int16,
sitk.sitkInt32: np.int32,
sitk.sitkFloat32: np.float32,
sitk.sitkFloat64: np.float64,
}
# Boss Stuff:
def setup_experiment_boss(remote, collection, experiment):
"""
Get experiment and coordinate frame information from the boss.
"""
exp_setup = ExperimentResource(experiment, collection)
try:
exp_actual = remote.get_project(exp_setup)
coord_setup = CoordinateFrameResource(exp_actual.coord_frame)
coord_actual = remote.get_project(coord_setup)
return (exp_setup, coord_actual)
except HTTPError as e:
print(e.message)
def setup_channel_boss(
remote,
collection,
experiment,
channel,
channel_type='image',
datatype='uint16'):
(exp_setup, coord_actual) = setup_experiment_boss(
remote, collection, experiment)
chan_setup = ChannelResource(
channel,
collection,
experiment,
channel_type,
datatype=datatype)
try:
chan_actual = remote.get_project(chan_setup)
return (exp_setup, coord_actual, chan_actual)
except HTTPError as e:
print(e.message)
# Note: The following functions assume an anisotropic dataset. This is generally a bad assumption. These
# functions are stopgaps until proper coordinate frame at resulution
# support exists in intern.
def get_xyz_extents(rmt, ch_rsc, res=0, iso=True):
boss_url = 'https://api.boss.neurodata.io/v1/'
ds = boss_url + \
'/downsample/{}?iso={}'.format(ch_rsc.get_cutout_route(), iso)
headers = {'Authorization': 'Token ' + rmt.token_project}
r_ds = requests.get(ds, headers=headers)
response = r_ds.json()
x_range = [0, response['extent']['{}'.format(res)][0]]
y_range = [0, response['extent']['{}'.format(res)][1]]
z_range = [0, response['extent']['{}'.format(res)][2]]
spacing = response['voxel_size']['{}'.format(res)]
return (x_range, y_range, z_range, spacing)
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def get_image_size_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_stop / (2.**res)),
int(coord_frame.y_stop / (2.**res)),
int(coord_frame.z_stop / (2.**res)) if isotropic else coord_frame.z_stop]
def imgDownload_boss(
remote,
channel_resource,
coordinate_frame_resource,
resolution=0,
size=[],
start=[],
isotropic=False):
"""
Download image with given token from given server at given resolution.
If channel isn't specified the first channel is downloaded.
"""
# TODO: Fix size and start parameters
voxel_unit = coordinate_frame_resource.voxel_unit
voxel_units = ('nanometers', 'micrometers', 'millimeters', 'centimeters')
factor_divide = (1e-6, 1e-3, 1, 10)
fact_div = factor_divide[voxel_units.index(voxel_unit)]
spacingBoss = [
coordinate_frame_resource.x_voxel_size,
coordinate_frame_resource.y_voxel_size,
coordinate_frame_resource.z_voxel_size]
spacing = [x * fact_div for x in spacingBoss] # Convert spacing to mm
if isotropic:
spacing = [x * 2**resolution for x in spacing]
else:
spacing[0] = spacing[0] * 2**resolution
spacing[1] = spacing[1] * 2**resolution
# z spacing unchanged since not isotropic
if size == []:
size = get_image_size_boss(
coordinate_frame_resource, resolution, isotropic)
if start == []:
start = get_offset_boss(
coordinate_frame_resource, resolution, isotropic)
# if isotropic:
# x_range, y_range, z_range, spacing = get_xyz_extents(
# remote, channel_resource, res=resolution, iso=isotropic)
# size[2] = 200
# dataType = metadata['channels'][channel]['datatype']
dataType = channel_resource.datatype
# Download all image data from specified channel
array = remote.get_cutout(
channel_resource, resolution, [
start[0], size[0]], [
start[1], size[1]], [
start[2], size[2]])
# Cast downloaded image to server's data type
# convert numpy array to sitk image
img = sitk.Cast(sitk.GetImageFromArray(array), ndToSitkDataTypes[dataType])
# Reverse axes order
# img = sitk.PermuteAxesImageFilter().Execute(img,range(dimension-1,-1,-1))
img.SetDirection(identityDirection)
img.SetSpacing(spacing)
# Convert to 2D if only one slice
img = util.imgCollapseDimension(img)
return img
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def create_channel_resource(rmt, chan_name, coll_name, exp_name, type='image',
base_resolution=0, sources=[], datatype='uint16', new_channel=True):
channel_resource = ChannelResource(chan_name, coll_name, exp_name, type=type,
base_resolution=base_resolution, sources=sources, datatype=datatype)
if new_channel:
new_rsc = rmt.create_project(channel_resource)
return new_rsc
return channel_resource
def upload_to_boss(rmt, data, channel_resource, resolution=0):
Z_LOC = 0
size = data.shape
for i in range(0, data.shape[Z_LOC], 16):
last_z = i+16
if last_z > data.shape[Z_LOC]:
last_z = data.shape[Z_LOC]
print(resolution, [0, size[2]], [0, size[1]], [i, last_z])
rmt.create_cutout(channel_resource, resolution,
[0, size[2]], [0, size[1]], [i, last_z],
np.asarray(data[i:last_z,:,:], order='C'))
def download_ara(rmt, resolution, type='average'):
if resolution not in [10, 25, 50, 100]:
print('Please provide a resolution that is among the following: 10, 25, 50, 100')
return
REFERENCE_COLLECTION = 'ara_2016'
REFERENCE_EXPERIMENT = 'sagittal_{}um'.format(resolution)
REFERENCE_COORDINATE_FRAME = 'ara_2016_{}um'.format(resolution)
REFERENCE_CHANNEL = '{}_{}um'.format(type, resolution)
refImg = download_image(rmt, REFERENCE_COLLECTION, REFERENCE_EXPERIMENT, REFERENCE_CHANNEL, ara_res=resolution)
return refImg
def download_image(rmt, collection, experiment, channel, res=0, isotropic=True, ara_res=None):
(exp_resource, coord_resource, channel_resource) = setup_channel_boss(rmt, collection, experiment, channel)
img = imgDownload_boss(rmt, channel_resource, coord_resource, resolution=res, isotropic=isotropic)
return img
#def download_image(config_file, collection, experiment, channel, outdir, res=0, isotropic=False, full_extent=True):
# # conversion factor from mm to um
# um_to_mm = 1e-3
#
# # set up ndpull
# meta = ndpull.BossMeta(collection, experiment, channel, res=res)
# token, boss_url = ndpull.get_boss_config(config_file)
# rmt = ndpull.BossRemote(boss_url, token, meta)
# # get args
# args = ndpull.collect_input_args(collection, experiment, channel, config_file=config_file, outdir=outdir, res=res, iso=isotropic, full_extent=full_extent)
# result, rmt = ndpull.validate_args(args)
# # download slices
# img = ndpull.download_slices(result, rmt, return_numpy=True, threads=8)
# img_sitk = sitk.GetImageFromArray(img)
# coord_frame = rmt.get_coord_frame_metadata()
# scale_factor = 2.0 ** res
# vox_sizes = np.array([coord_frame['x_voxel_size'], coord_frame['y_voxel_size'], coord_frame['z_voxel_size']])
# vox_sizes *= scale_factor * um_to_mm
# img_sitk.SetSpacing(vox_sizes)
# return img_sitk
#
def main():
t_start_overall = time.time()
parser = argparse.ArgumentParser(description='Register a brain in the BOSS and upload it back in a new experiment.')
parser.add_argument('--collection', help='Name of collection to upload tif stack to', type=str)
parser.add_argument('--experiment', help='Name of experiment to upload tif stack to', type=str)
parser.add_argument('--channel', help='Name of channel to upload tif stack to. Default is new channel will be created unless otherwise specified. See --new_channel', type=str)
parser.add_argument('--image_orientation', help='Orientation of brain image. 3-letter orientation of brain. For example can be PIR: Posterior, Inferior, Right.', type=str)
parser.add_argument('--modality', help='The imaging modality the data were collected with. The options are either "colm" or "lavision"', type=str)
parser.add_argument('--outdir', help='set the directory in which you want to save the intermediates. default is ./{collection}_{experiment}_{channel}_reg', type=str, default=None)
parser.add_argument('--config', help='Path to configuration file with Boss API token. Default: ~/.intern/intern.cfg', default=os.path.expanduser('~/.intern/intern.cfg'))
args = parser.parse_args()
# mm to um conversion factor
mm_to_um = 1000.0
# download image
rmt = BossRemote(cfg_file_or_dict=args.config)
# resolution level from 0-6
if args.modality.lower() == 'colm':
resolution_image = 3
image_isotropic = False
else:
resolution_image = 3
image_isotropic = True
# resolution in microns
resolution_atlas = 50
# ensure outdir is default value if None
if args.outdir is None:
outdir = '{}_{}_{}_reg/'.format(args.collection, args.experiment, args.channel)
util.dir_make(outdir)
else: outdir = args.outdir
# downloading image
print('downloading experiment: {}, channel: {}...'.format(args.experiment, args.channel))
t1 = time.time()
img = download_image(rmt, args.collection, args.experiment, args.channel, res=resolution_image, isotropic=image_isotropic)
print("time to download image at res {} um: {} seconds".format(img.GetSpacing()[0] * mm_to_um, time.time()-t1))
# download atlas
print('downloading atlas...')
t1 = time.time()
atlas = download_ara(rmt, resolution_atlas, type='average')
print("time to download atlas at {} um: {} seconds".format(resolution_atlas, time.time()-t1))
# print("preprocessing image")
# img_p = preprocessor.preprocess_brain(img, atlas.GetSpacing(), args.modality, args.image_orientation)
# # z-axis param in correcting grid is hardcoded assuming z_axis = 2 (3rd axis given original image is IPL)
## if args.modality.lower() == 'colm': img_p = preprocessor.remove_grid_artifact(img_p, z_axis=2,)
# img_p.SetOrigin((0.0,0.0,0.0))
# print("preprocessing done!")
# print("running registration")
# assert(img_p.GetOrigin() == atlas.GetOrigin())
# assert(img_p.GetDirection() == atlas.GetDirection())
# assert(img_p.GetSpacing() == atlas.GetSpacing())
#
# atlas_registered = ndreg.register_brain(atlas, img_p, outdir=outdir)
# print("registration done")
#
# end_time = time.time()
#
# print("Overall time taken through all steps: {} seconds ({} minutes)".format(end_time - t_start_overall, (end_time - t_start_overall)/60.0))
# print("uploading annotations to the BOSS")
anno_channel = 'atlas'
source_channel = args.channel
ch_rsc_og = create_channel_resource(rmt, args.channel, args.collection, args.experiment, new_channel=False)
# # set up ndpull
# meta = ndpull.BossMeta(collection, experiment, channel)
# token, boss_url = ndpull.get_boss_config(config_file)
# rmt = ndpull.BossRemote(boss_url, token, meta)
# # end set up ndpull
print("loading atlas labels")
anno_10 = tf.imread('./ara_annotation_10um.tif')
anno_10 = sitk.GetImageFromArray(anno_10.astype('uint32'))
anno_10.SetSpacing((0.01, 0.01, 0.01))
trans = sitk.ReadTransform('{}/atlas_to_observed_affine.txt'.format(outdir))
field = util.imgRead('{}/lddmm/field.vtk'.format(outdir))
meta = get_xyz_extents(rmt, ch_rsc_og)
spacing = np.array(meta[-1])/mm_to_um
x_size = meta[0][1]
y_size = meta[1][1]
z_size = meta[2][1]
size = (x_size, y_size, z_size)
# need to reorient size to match atlas
# i am hard coding the size assuming
# the image is oriented LPS
# size_r = (y_size, z_size, x_size)
# this size is hardocoded assuming
# input image is IPL (atlas is PIR)
size_r = (y_size, x_size, z_size)
print("applying affine transformation to atlas labels")
img_affine = ndreg.imgApplyAffine(anno_10, trans, spacing=spacing.tolist(), useNearest=True)
print(img_affine.GetSize())
print(img_affine.GetSpacing())
print("applying displacement field transformation to atlas labels")
img_lddmm = ndreg.imgApplyField(img_affine, field, spacing=spacing.tolist(),
size=size_r, useNearest=True)
print(img_lddmm.GetSize())
print(img_lddmm.GetSpacing())
# reorient annotations to match image
print("reorienting the labels to match the image")
img_lddmm_r = preprocessor.imgReorient(img_lddmm, 'pir', args.image_orientation)
# coll_reg = 'cell_detection'
ch_rsc_anno = create_channel_resource(rmt, anno_channel, args.collection, args.experiment, sources=source_channel, datatype='uint64', type='annotation')
print("uploading atlas labels to the BOSS")
anno = sitk.GetArrayFromImage(img_lddmm_r)
if anno.dtype != 'uint64':
anno = anno.astype('uint64')
upload_to_boss(rmt, anno, ch_rsc_anno)
if __name__ == "__main__":
main()
|
from shapely.geometry import Point, box, Polygon
from toolz.dicttoolz import valmap
from functools import reduce
import math
class CottleWrapper:
def __init__(self, stage):
self.cottle = Cottle(stage)
def row_for_stage(self):
return [
self.cottle.relatedness_all(),
self.cottle.dominance_all(),
self.cottle.relatedness_group(),
self.cottle.dominance_group(),
]
def row_for_element(self, element):
results = [
self.cottle.relatedness_each(),
self.cottle.dominance_each(),
]
return list([x[element] for x in results])
class Cottle:
def __init__(self, stage):
extractor = ShapeExtractor()
results = extractor.shapes_for(stage)
elements = stage.stage_elements()
tolerance = 0.1
self.stage = stage
self.elements = elements
for e1 in elements:
results[e1]['dominance'] = 0
results[e1]['relatedness'] = 0
p1 = results[e1]['point']
for e2 in elements:
if e1 == e2:
continue
p2 = results[e2]['point']
if p1.area/p2.area > 1 + tolerance:
results[e1]['dominance'] += 2
intersection = p1.intersection(p2)
if intersection.area == p1.area or intersection.area == p2.area:
results[e1]['relatedness'] += 6
elif intersection.area/p1.area > tolerance:
results[e1]['relatedness'] += 4
elif (p1.distance(p2)**2)/p1.area < tolerance:
results[e1]['relatedness'] += 2
# don't count twice any border
relatedness = 0
for i in range(len(elements)):
e1 = elements[i]
p1 = results[e1]['point']
for j in range(i+1, len(elements)):
e2 = elements[j]
p2 = results[e2]['point']
intersection = p1.intersection(p2)
if intersection.area == p1.area or intersection.area == p2.area:
relatedness += 6
elif intersection.area/p1.area > tolerance:
relatedness += 4
elif (p1.distance(p2)**2)/p1.area < tolerance:
relatedness += 2
self._relatedness = relatedness
self.results = results
def relatedness_each(self):
# atomistic, contiguous, integrated_projected
return valmap(lambda x: x['relatedness'], self.results)
def dominance_each(self):
# 0 - abscence, 2 - secondary, 4 - dominance
return valmap(lambda x: x['dominance'], self.results)
def relatedness_all(self):
return self._relatedness
def dominance_all(self):
return sum(self.dominance_each().values())
def relatedness_group(self):
value = self.relatedness_all()
if value < 2 * (len(self.elements) - 1):
return "atomistic"
elif value < 4 * (len(self.elements) - 1):
return "contiguous"
else:
return "integrated_projected"
def dominance_group(self):
value = self.dominance_all()
if value == 0:
return "absence"
elif value < len(self.elements) * (len(self.elements)-1):
return "secondary"
else:
return "dominance"
class ShapeExtractor:
def shapes_for(self, stage):
self.elements = stage.stage_elements()
return stage.visit(self)
def case_present_past_future(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
p = Point(data['center_x'], 500 - data['center_y'])
results[e] = {
'point': p.buffer(data['radius'])
}
return results
def case_seasons_of_year(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': box(
data['center_x'] - data['size_x']/2,
500 - data['center_y'] - data['size_y']/2,
data['center_x'] + data['size_x']/2,
500 - data['center_y'] + data['size_y']/2)
}
return results
def case_days_of_week(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': box(
data['center_x'] - 50/2,
500 - data['center_y'] - data['size_y']/2,
data['center_x'] + 50/2,
500 - data['center_y'] + data['size_y']/2)
}
return results
def case_parts_of_day(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': CircularAdapter(data['rotation'], data['size'], 200)
}
return results
@staticmethod
def intersection(one, two):
return one.intersection(two).area
@staticmethod
def distance(one, two):
return one.distance(two)
class CircularAdapter:
def __init__(self, rotation, size, radius):
self._rotation = rotation
self._size = size
self._radius = radius
points = [(0, 0)]
count = max(2, math.ceil(self._size * 16 / 360))
angle = math.radians(self._rotation - self._size/2)
size = math.radians(self._size)
for i in range(count+1):
x = math.cos(angle + i/count * size) * self._radius
y = math.sin(angle + i/count * size) * self._radius
points.append((x, y))
self._shape = Polygon(points)
@property
def area(self):
return self._shape.area
def intersection(self, other):
result = self._shape.intersection(other._shape)
return CircularWrapper(result)
def union(self, other):
result = self._shape.union(other._shape)
return CircularWrapper(result)
def distance(self, other):
dist_a = (self._rotation - other._rotation) % 360
dist_b = (other._rotation - self._rotation) % 360
dist = min(dist_a, dist_b) - (self._size + other._size) / 2
return max(0, dist)
class CircularWrapper:
def __init__(self, shape):
self._shape = shape
@property
def area(self):
return self._shape.area
def intersection(self, other):
result = self._shape.intersection(other._shape)
return CircularWrapper(result)
def union(self, other):
result = self._shape.union(other._shape)
return CircularWrapper(result)
Make Cottle metric linear to the number of figures
from shapely.geometry import Point, box, Polygon
from toolz.dicttoolz import valmap
from functools import reduce
import math
class CottleWrapper:
def __init__(self, stage):
self.cottle = Cottle(stage)
def row_for_stage(self):
return [
self.cottle.relatedness_all(),
self.cottle.dominance_all(),
self.cottle.relatedness_group(),
self.cottle.dominance_group(),
]
def row_for_element(self, element):
results = [
self.cottle.relatedness_each(),
self.cottle.dominance_each(),
]
return list([x[element] for x in results])
class Cottle:
def __init__(self, stage):
extractor = ShapeExtractor()
results = extractor.shapes_for(stage)
elements = stage.stage_elements()
tolerance = 0.1
self.stage = stage
self.elements = elements
for e1 in elements:
results[e1]['dominance'] = 0
results[e1]['relatedness'] = 0
p1 = results[e1]['point']
for e2 in elements:
if e1 == e2:
continue
p2 = results[e2]['point']
if p1.area/p2.area > 1 + tolerance:
results[e1]['dominance'] += 2
intersection = p1.intersection(p2)
if intersection.area == p1.area or intersection.area == p2.area:
results[e1]['relatedness'] += 6
elif intersection.area/p1.area > tolerance:
results[e1]['relatedness'] += 4
elif (p1.distance(p2)**2)/p1.area < tolerance:
results[e1]['relatedness'] += 2
# don't count twice any border
relatedness = 0
for i in range(len(elements)):
e1 = elements[i]
p1 = results[e1]['point']
for j in range(i+1, len(elements)):
e2 = elements[j]
p2 = results[e2]['point']
intersection = p1.intersection(p2)
if intersection.area == p1.area or intersection.area == p2.area:
relatedness += 6
elif intersection.area/p1.area > tolerance:
relatedness += 4
elif (p1.distance(p2)**2)/p1.area < tolerance:
relatedness += 2
self._relatedness = relatedness
self._dominance = sum(x['dominance'] for x in results.values())
self.results = results
def relatedness_each(self):
# atomistic, contiguous, integrated_projected
norm_factor = 6 * (len(self.elements) - 1)
return valmap(lambda x: x['relatedness'] / norm_factor, self.results)
def dominance_each(self):
# 0 - abscence, 2 - secondary, 4 - dominance
norm_factor = 2 * (len(self.elements) - 1)
return valmap(lambda x: x['dominance'] / norm_factor, self.results)
def relatedness_all(self):
norm_factor = 6 * len(self.elements) * (len(self.elements) - 1) / 2
return math.sqrt(self._relatedness / norm_factor)
def dominance_all(self):
norm_factor = 2 * len(self.elements) * (len(self.elements) - 1) / 2
return math.sqrt(self._dominance / norm_factor)
def relatedness_group(self):
#value = self.relatedness_all()
value = self._relatedness / (6 * (len(self.elements) - 1))
if value < 1/3:
return "atomistic"
elif value < 2/3:
return "contiguous"
else:
return "integrated_projected"
def dominance_group(self):
#value = self.dominance_all()
value = self._dominance / (2 * (len(self.elements) - 1))
if value < 1/3:
return "absence"
elif value < 2/3:
return "secondary"
else:
return "dominance"
class ShapeExtractor:
def shapes_for(self, stage):
self.elements = stage.stage_elements()
return stage.visit(self)
def case_present_past_future(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
p = Point(data['center_x'], 500 - data['center_y'])
results[e] = {
'point': p.buffer(data['radius'])
}
return results
def case_seasons_of_year(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': box(
data['center_x'] - data['size_x']/2,
500 - data['center_y'] - data['size_y']/2,
data['center_x'] + data['size_x']/2,
500 - data['center_y'] + data['size_y']/2)
}
return results
def case_days_of_week(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': box(
data['center_x'] - 50/2,
500 - data['center_y'] - data['size_y']/2,
data['center_x'] + 50/2,
500 - data['center_y'] + data['size_y']/2)
}
return results
def case_parts_of_day(self, stage):
results = {}
for e in self.elements:
data = stage.element_data(e)
results[e] = {
'point': CircularAdapter(data['rotation'], data['size'], 200)
}
return results
@staticmethod
def intersection(one, two):
return one.intersection(two).area
@staticmethod
def distance(one, two):
return one.distance(two)
class CircularAdapter:
def __init__(self, rotation, size, radius):
self._rotation = rotation
self._size = size
self._radius = radius
points = [(0, 0)]
count = max(2, math.ceil(self._size * 16 / 360))
angle = math.radians(self._rotation - self._size/2)
size = math.radians(self._size)
for i in range(count+1):
x = math.cos(angle + i/count * size) * self._radius
y = math.sin(angle + i/count * size) * self._radius
points.append((x, y))
self._shape = Polygon(points)
@property
def area(self):
return self._shape.area
def intersection(self, other):
result = self._shape.intersection(other._shape)
return CircularWrapper(result)
def union(self, other):
result = self._shape.union(other._shape)
return CircularWrapper(result)
def distance(self, other):
dist_a = (self._rotation - other._rotation) % 360
dist_b = (other._rotation - self._rotation) % 360
dist = min(dist_a, dist_b) - (self._size + other._size) / 2
return max(0, dist)
class CircularWrapper:
def __init__(self, shape):
self._shape = shape
@property
def area(self):
return self._shape.area
def intersection(self, other):
result = self._shape.intersection(other._shape)
return CircularWrapper(result)
def union(self, other):
result = self._shape.union(other._shape)
return CircularWrapper(result)
|
import pathlib
import diffhtml
import flask
from flask import request
from markupsafe import Markup
app = flask.Flask(
'Diff-HTML Demo',
template_folder=pathlib.Path(__file__).parent.joinpath('templates'),
)
DEFAULT_A = """
I am the very model of a modern Major-General,
I've information vegetable, animal, and mineral,
I know the kings of England, and I quote the fights historical,
From Marathon to Waterloo, in order categorical.
"""
DEFAULT_B = """
I am the very model of a cartoon individual,
My animation's comical, unusual, and whimsical,
I'm quite adept at funny gags, comedic theory I have read,
From wicked puns and stupid jokes to anvils that drop on your head.
"""
@app.route('/ndiff', methods=['GET', 'POST'])
def ndiff():
a = request.form.get('a', DEFAULT_A)
b = request.form.get('b', DEFAULT_B)
try:
cutoff = float(request.form.get('cutoff', 0.6))
except ValueError:
cutoff = 0.6
context = {
'result': None,
'cutoff': cutoff,
'input': {'a': a, 'b': b},
}
if request.method == 'POST':
context['result'] = Markup('<br>').join(diffhtml.ndiff(
a.splitlines(), b.splitlines(), cutoff=cutoff,
))
return flask.render_template('ndiff.html', **context)
@app.route('/')
def home():
return flask.redirect(flask.url_for('ndiff'))
if __name__ == '__main__':
app.run()
The correct™ lines
import pathlib
import diffhtml
import flask
from flask import request
from markupsafe import Markup
app = flask.Flask(
'Diff-HTML Demo',
template_folder=pathlib.Path(__file__).parent.joinpath('templates'),
)
DEFAULT_A = """
I am the very model of a modern Major-General,
I've information vegetable, animal, and mineral,
I know the kings of England, and I quote the fights historical,
From Marathon to Waterloo, in order categorical.
"""
DEFAULT_B = """
I am the very model of an anime individual,
I've information on comical, unusual, and moe girl,
I know the girls from galgames, and I quote the lines all chuunibyo,
From Neo Eva to SAO, down to the very last detail.
"""
@app.route('/ndiff', methods=['GET', 'POST'])
def ndiff():
a = request.form.get('a', DEFAULT_A)
b = request.form.get('b', DEFAULT_B)
try:
cutoff = float(request.form.get('cutoff', 0.6))
except ValueError:
cutoff = 0.6
context = {
'result': None,
'cutoff': cutoff,
'input': {'a': a, 'b': b},
}
if request.method == 'POST':
context['result'] = Markup('<br>').join(diffhtml.ndiff(
a.splitlines(), b.splitlines(), cutoff=cutoff,
))
return flask.render_template('ndiff.html', **context)
@app.route('/')
def home():
return flask.redirect(flask.url_for('ndiff'))
if __name__ == '__main__':
app.run()
|
# $Id$
"""Border Gateway Protocol."""
import dpkt
import struct
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\x01' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('as', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.parameters))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.parameters))
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + \
attr_len + \
len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + \
attr_len_str + \
str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + \
2 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + \
as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('as', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (val >= 0x00000000L and val <= 0x0000ffffL) or \
(val >= 0xffff0000L and val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('as', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA:
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.data[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.data[:(self.len + 7) / 8]
if __name__ == '__main__':
import unittest
class BGPTestCase(unittest.TestCase):
def testPack(self):
b1 = BGP(self.bgp1)
self.failUnless(self.bgp1 == str(b1))
b2 = BGP(self.bgp2)
self.failUnless(self.bgp2 == str(b2))
b3 = BGP(self.bgp3)
self.failUnless(self.bgp3 == str(b3))
b4 = BGP(self.bgp4)
self.failUnless(self.bgp4 == str(b4))
def testUnpack(self):
b1 = BGP(self.bgp1)
self.failUnless(b1.len == 19)
self.failUnless(b1.type == KEEPALIVE)
self.failUnless(b1.keepalive is not None)
b2 = BGP(self.bgp2)
self.failUnless(b2.type == UPDATE)
self.failUnless(len(b2.update.withdrawn) == 0)
self.failUnless(len(b2.update.announced) == 1)
self.failUnless(len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
self.failUnless(a.type == AS_PATH)
self.failUnless(a.len == 10)
self.failUnless(len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
self.failUnless(s.type == AS_SET)
self.failUnless(s.len == 2)
self.failUnless(len(s.path) == 2)
self.failUnless(s.path[0] == 500)
a = b2.update.attributes[6]
self.failUnless(a.type == COMMUNITIES)
self.failUnless(a.len == 12)
self.failUnless(len(a.communities.list) == 3)
c = a.communities.list[0]
self.failUnless(c.as == 65215)
self.failUnless(c.value == 1)
r = b2.update.announced[0]
self.failUnless(r.len == 22)
self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(self.bgp3)
self.failUnless(b3.type == UPDATE)
self.failUnless(len(b3.update.withdrawn) == 0)
self.failUnless(len(b3.update.announced) == 0)
self.failUnless(len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
self.failUnless(a.optional == False)
self.failUnless(a.transitive == True)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == False)
self.failUnless(a.type == ORIGIN)
self.failUnless(a.len == 1)
o = a.origin
self.failUnless(o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
self.failUnless(a.optional == True)
self.failUnless(a.transitive == False)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == True)
self.failUnless(a.type == MP_REACH_NLRI)
self.failUnless(a.len == 30)
m = a.mp_reach_nlri
self.failUnless(m.afi == AFI_IPV4)
self.failUnless(len(m.snpas) == 0)
self.failUnless(len(m.announced) == 1)
p = m.announced[0]
self.failUnless(p.len == 96)
b4 = BGP(self.bgp4)
self.failUnless(b4.len == 45)
self.failUnless(b4.type == OPEN)
self.failUnless(b4.open.as == 237)
self.failUnless(b4.open.param_len == 16)
self.failUnless(len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
self.failUnless(p.type == CAPABILITY)
self.failUnless(p.len == 6)
c = p.capability
self.failUnless(c.code == CAP_MULTIPROTOCOL)
self.failUnless(c.len == 4)
self.failUnless(c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
self.failUnless(c.code == CAP_ROUTE_REFRESH)
self.failUnless(c.len == 0)
bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
unittest.main()
rename 'as' to 'asn' as it is a reserved keyword in python 2.6. this breaks api compatibility with the bgp module and should be noted in the release notes (although users of the affected attribute should already be seeing the python-generated warnings in their apps).
git-svn-id: c24decc6cc9e43397d358c131c6d3542a5bd9019@51 30ee8f51-df1a-0410-9115-614dbba9287e
# $Id$
"""Border Gateway Protocol."""
import dpkt
import struct
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\x01' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.parameters))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.parameters))
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + \
attr_len + \
len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + \
attr_len_str + \
str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + \
2 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + \
as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (val >= 0x00000000L and val <= 0x0000ffffL) or \
(val >= 0xffff0000L and val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA:
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.data[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.data[:(self.len + 7) / 8]
if __name__ == '__main__':
import unittest
class BGPTestCase(unittest.TestCase):
def testPack(self):
b1 = BGP(self.bgp1)
self.failUnless(self.bgp1 == str(b1))
b2 = BGP(self.bgp2)
self.failUnless(self.bgp2 == str(b2))
b3 = BGP(self.bgp3)
self.failUnless(self.bgp3 == str(b3))
b4 = BGP(self.bgp4)
self.failUnless(self.bgp4 == str(b4))
def testUnpack(self):
b1 = BGP(self.bgp1)
self.failUnless(b1.len == 19)
self.failUnless(b1.type == KEEPALIVE)
self.failUnless(b1.keepalive is not None)
b2 = BGP(self.bgp2)
self.failUnless(b2.type == UPDATE)
self.failUnless(len(b2.update.withdrawn) == 0)
self.failUnless(len(b2.update.announced) == 1)
self.failUnless(len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
self.failUnless(a.type == AS_PATH)
self.failUnless(a.len == 10)
self.failUnless(len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
self.failUnless(s.type == AS_SET)
self.failUnless(s.len == 2)
self.failUnless(len(s.path) == 2)
self.failUnless(s.path[0] == 500)
a = b2.update.attributes[6]
self.failUnless(a.type == COMMUNITIES)
self.failUnless(a.len == 12)
self.failUnless(len(a.communities.list) == 3)
c = a.communities.list[0]
self.failUnless(c.as == 65215)
self.failUnless(c.value == 1)
r = b2.update.announced[0]
self.failUnless(r.len == 22)
self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(self.bgp3)
self.failUnless(b3.type == UPDATE)
self.failUnless(len(b3.update.withdrawn) == 0)
self.failUnless(len(b3.update.announced) == 0)
self.failUnless(len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
self.failUnless(a.optional == False)
self.failUnless(a.transitive == True)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == False)
self.failUnless(a.type == ORIGIN)
self.failUnless(a.len == 1)
o = a.origin
self.failUnless(o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
self.failUnless(a.optional == True)
self.failUnless(a.transitive == False)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == True)
self.failUnless(a.type == MP_REACH_NLRI)
self.failUnless(a.len == 30)
m = a.mp_reach_nlri
self.failUnless(m.afi == AFI_IPV4)
self.failUnless(len(m.snpas) == 0)
self.failUnless(len(m.announced) == 1)
p = m.announced[0]
self.failUnless(p.len == 96)
b4 = BGP(self.bgp4)
self.failUnless(b4.len == 45)
self.failUnless(b4.type == OPEN)
self.failUnless(b4.open.as == 237)
self.failUnless(b4.open.param_len == 16)
self.failUnless(len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
self.failUnless(p.type == CAPABILITY)
self.failUnless(p.len == 6)
c = p.capability
self.failUnless(c.code == CAP_MULTIPROTOCOL)
self.failUnless(c.len == 4)
self.failUnless(c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
self.failUnless(c.code == CAP_ROUTE_REFRESH)
self.failUnless(c.len == 0)
bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
unittest.main()
|
from django.conf import settings
from rest_framework import test
from . import fixtures
class BaseTest(test.APITransactionTestCase):
def setUp(self):
settings.WALDUR_SUPPORT['ACTIVE_BACKEND'] = 'SupportBackend'
self.fixture = fixtures.SupportFixture()
update tests configuration
from django.conf import settings
from rest_framework import test
from . import fixtures
class BaseTest(test.APITransactionTestCase):
def setUp(self):
support_backend = 'nodeconductor_assembly_waldur.support.backend.atlassian:SupportBackend'
settings.WALDUR_SUPPORT['ACTIVE_BACKEND'] = support_backend
self.fixture = fixtures.SupportFixture()
|
#!/usr/bin/env python
""" Listens on the street sign prediction topic and uses the resultant
predictions to obey street signs. """
from geometry_msgs.msg import PoseStamped, Pose, Quaternion, Point
from std_msgs.msg import Header
from std_msgs.msg import String
from time import sleep
import threading
import smach
import smach_ros
import rospy
###### State Machine States ######
class StateMachine():
def __init__(self, publisher, data={}):
# state initialization goes here
self.res = 'forward'
self.data = data
self.publishGoal = publisher
self.stop = False
# if a function returns [this string], execute [this function]
self.transitions = {
'rturn': self.rturn,
#'lturn': self.lturn,
#'uturn': self.uturn,
'forward': self.forward,
'stop': self.stop
}
def run(self):
while not self.stop:
res = self.transitions[self.res]()
self.res = res
# the robot should 'latch' slightly once it's made a decision
sleep(.5)
""" Each function is a different state. """
def forward(self):
print("The sign says: {}".format(self.data['sign']))
self.publishGoal(0,0,0)
if self.data['sign'] and self.data['sign'] == 'rturn':
return 'rturn'
elif self.data['sign'] == 'lturn':
return 'lturn'
elif self.data['sign'] == 'uturn':
return 'uturn'
elif self.data['sign'] == 'stop':
return 'stop'
else:
return 'forward'
def rturn(self):
sleep(2)
"""if self.data.sign == 'rturn':
self.certainty += 1
else:
self.certainty -= 1
if self.certainty > 3:"""
if self.data['sign'] == 'rturn':
return 'rturn'
else:
return 'forward'
def stop(self):
# stop the robbit
pass
###### Node Class ######
class StreetSignFollower(object):
""" Issues goals based on a state machine transitioned by
listening on the street sign topic. """
def __init__(self, sign_topic):
""" Initialize the state machine """
rospy.init_node('sign_follower')
self.pub = rospy.Publisher("/move_base_simple/goal", PoseStamped, queue_size=1)
rospy.Subscriber(sign_topic, String, self.process_sign)
def process_sign(self, msg):
""" Process sign predictions, use them to transition the state machine. """
self.sm.data['sign'] = msg.data
@staticmethod
def publishGoal(x=0.0, y=0.0, z=0.0):
"""point_msg = Point(x, y, z)
quat_msg = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
pose_msg = Pose(position=point_msg, orientation=quat_msg)
header_msg = Header(stamp=rospy.Time.now(),
frame_id='base_link')
pose_stamped = PoseStamped(header=header_msg, pose=pose_msg)
self.pub.Publish(pose_stamped)"""
print("IT WORKS")
def run(self):
""" The main run loop - create a state machine, and set it off """
# Create a state machine
self.sm = StateMachine(self.publishGoal, {'sign': None})
# Execute the machine
# threading required for control-C-ability
# Create a thread to execute the smach container
smach_thread = threading.Thread(target=self.sm.run)
smach_thread.start()
# Wait for ctrl-c
rospy.spin()
# stop the state machine
self.sm.stop = True
# Block until everything is shut down
smach_thread.join()
if __name__ == '__main__':
node = StreetSignFollower("/sign_predictions")
node.run()
state machine is theoretically functional, but untested
#!/usr/bin/env python
""" Listens on the street sign prediction topic and uses the resultant
predictions to obey street signs. """
from geometry_msgs.msg import PoseStamped, Pose, Quaternion, Point
from std_msgs.msg import Header
from std_msgs.msg import String
from time import sleep
import threading
import smach
import smach_ros
import rospy
###### State Machine States ######
class StateMachine():
def __init__(self, publisher, data={}):
# state initialization goes here
self.res = 'forward'
self.data = data
self.publishGoal = publisher
self.end = False
# if a function returns [this string], execute [this function]
self.transitions = {
'rturn': self.rturn,
'lturn': self.lturn,
'uturn': self.uturn,
'forward': self.forward,
'stop': self.stop
}
def run(self):
while not self.end:
res = self.transitions[self.res]()
self.res = res
# the robot should 'latch' slightly once it's made a decision
sleep(.5)
""" Each function is a different state. """
def forward(self):
print("The sign says: {}".format(self.data['sign']))
# publish a goal 3m ahead
self.publishGoal(3,0,0)
if self.data['sign'] and self.data['sign'] == 'rturn':
return 'rturn'
elif self.data['sign'] == 'lturn':
return 'lturn'
elif self.data['sign'] == 'uturn':
return 'uturn'
elif self.data['sign'] == 'stop':
print("forward --> stop")
return 'stop'
else:
return 'forward'
def rturn(self):
# publish a goal 1m ahead and 3m to the right
self.publishGoal(1,-3,0)
if self.data['sign'] == 'rturn':
return 'rturn'
else:
return 'forward'
def lturn(self):
# publish a goal 1m ahead and 3m to the left
self.publishGoal(1,3,0)
if self.data['sign'] == 'lturn':
return 'lturn'
else:
return 'forward'
def uturn(self):
# publish a goal 3m behind us
self.publishGoal(-3,0,0)
if self.data['sign'] == 'uturn':
return 'uturn'
else:
return 'forward'
def stop(self):
# stop the robbit
# publish a goal on top of us
self.publishGoal(0,0,0)
# stay stopped forever
print("stop --> stop")
return 'stop'
###### Node Class ######
class StreetSignFollower(object):
""" Issues goals based on a state machine transitioned by
listening on the street sign topic. """
def __init__(self, sign_topic):
""" Initialize the state machine """
rospy.init_node('sign_follower')
self.pub = rospy.Publisher("/move_base_simple/goal", PoseStamped, queue_size=1)
rospy.Subscriber(sign_topic, String, self.process_sign)
def process_sign(self, msg):
""" Process sign predictions, use them to transition the state machine. """
self.sm.data['sign'] = msg.data
@staticmethod
def publishGoal(x=0.0, y=0.0, z=0.0):
print("Publishing goal at ({},{},{})".format(x,y,z))
"""point_msg = Point(x, y, z)
quat_msg = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
pose_msg = Pose(position=point_msg, orientation=quat_msg)
header_msg = Header(stamp=rospy.Time.now(),
frame_id='base_link')
pose_stamped = PoseStamped(header=header_msg, pose=pose_msg)
self.pub.Publish(pose_stamped)"""
def run(self):
""" The main run loop - create a state machine, and set it off """
# Create a state machine
self.sm = StateMachine(self.publishGoal, {'sign': None})
# Execute the machine
# threading required for control-C-ability
# Create a thread to execute the smach container
smach_thread = threading.Thread(target=self.sm.run)
smach_thread.start()
# Wait for ctrl-c
rospy.spin()
# stop the state machine
self.sm.end = True
# Block until everything is shut down
smach_thread.join()
if __name__ == '__main__':
node = StreetSignFollower("/sign_predictions")
node.run()
|
#!/usr/bin/env python
from uuid import uuid4
from random import randint
from importd import d
d(INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'ajaxinclude',
])
from django.contrib import admin
admin.autodiscover()
d.urlpatterns += d.patterns('',
d.url(r'^admin/', include(admin.site.urls))
)
@d("/random/<int:input>/", name='user_number_output')
def randomised(request, input):
return d.HttpResponse("a number by you: %d" % int(input))
@d("/random/", name='random_output')
def randomised(request):
return d.HttpResponse("a random number for you: %d" % randint(1, 100))
@d("/hello/world/", name='fixed_output')
def hello_world(request):
return d.HttpResponse("hello world")
from ajaxinclude.views import AjaxIncludeProxy
d.add_view('/ajaxinclude/', AjaxIncludeProxy.as_view(), name='ajaxinclude')
@d("/<slug:one>/<slug:two>/", name='user_text_output')
def index(request, one, two):
return d.HttpResponse(u'first parameter was %s, the second was %s' % (one, two))
@d("^$")
def index(request):
ctx = {
'random_number': randint(1, 1000),
'random_text1': str(uuid4()),
'random_text2': str(uuid4()),
}
return d.render_to_response('index.html', ctx, context_instance=d.RequestContext(request))
emulate some errors, and also add sessions so the link to the admin works
#!/usr/bin/env python
from uuid import uuid4
from random import randint
from importd import d
from django.conf.urls import include
from django.http import HttpResponsePermanentRedirect, HttpResponseServerError, HttpResponseForbidden
d(INSTALLED_APPS=[
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'ajaxinclude',
])
from django.contrib import admin
admin.autodiscover()
d.urlpatterns += d.patterns('',
d.url(r'^admin/', include(admin.site.urls))
)
@d('/bad/location/', name='http_301')
def gone(request):
return HttpResponsePermanentRedirect(redirect_to='http://google.com/')
@d('/bad/server/', name='http_500')
def ise(request):
return HttpResponseServerError('oops')
@d('/bad/access/', name='http_403')
def forbidden(request):
return HttpResponseForbidden('go away!')
@d("/random/<int:input>/", name='user_number_output')
def randomised(request, input):
return d.HttpResponse("a number by you: %d" % int(input))
@d("/random/", name='random_output')
def randomised(request):
return d.HttpResponse("a random number for you: %d" % randint(1, 100))
@d("/hello/world/", name='fixed_output')
def hello_world(request):
return d.HttpResponse("hello world")
from ajaxinclude.views import AjaxIncludeProxy
d.add_view('/ajaxinclude/', AjaxIncludeProxy.as_view(), name='ajaxinclude')
@d("/<slug:one>/<slug:two>/", name='user_text_output')
def index(request, one, two):
return d.HttpResponse(u'first parameter was %s, the second was %s' % (one, two))
@d("^$")
def index(request):
ctx = {
'random_number': randint(1, 1000),
'random_text1': str(uuid4()),
'random_text2': str(uuid4()),
}
return d.render_to_response('index.html', ctx, context_instance=d.RequestContext(request))
|
# coding: utf-8
"""
TODO:
- n1.0073 Halpha is emission
"""
# Standard library
import os
from os import path
# Third-party
import astropy.coordinates as coord
from astropy.time import Time
from astropy.constants import c
import astropy.units as u
from astropy.io import fits
from astropy.table import Table, Column
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('apw-notebook')
import emcee
import corner
import schwimmbad
# Project
from comoving_rv.log import logger
from comoving_rv.longslit import GlobImageFileCollection
from comoving_rv.longslit.fitting import fit_spec_line_GP, gp_to_fit_pars
from comoving_rv.longslit.models import voigt_polynomial
from comoving_rv.velocity import bary_vel_corr, kitt_peak
def log_probability(params, gp, flux_data):
gp.set_parameter_vector(params)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
# HACK: Gaussian prior on log(rho)
var = 1.
lp += -0.5*(params[1]-1)**2/var - 0.5*np.log(2*np.pi*var)
if params[4] < -10. or params[5] < -10.:
return -np.inf
ll = gp.log_likelihood(flux_data)
if not np.isfinite(ll):
return -np.inf
return ll + lp
def main(night_path, overwrite=False):
night_path = path.realpath(path.expanduser(night_path))
if not path.exists(night_path):
raise IOError("Path '{}' doesn't exist".format(night_path))
if path.isdir(night_path):
data_file = None
logger.info("Reading data from path: {}".format(night_path))
elif path.isfile(night_path):
data_file = night_path
base_path, name = path.split(night_path)
night_path = base_path
logger.info("Reading file: {}".format(data_file))
else:
raise RuntimeError("how?!")
plot_path = path.join(night_path, 'plots')
root_path = path.abspath(path.join(night_path, '..'))
table_path = path.join(root_path, 'velocity.fits')
# air wavelength of Halpha -- wavelength calibration from comp lamp is done
# at air wavelengths, so this is where Halpha should be, right?
Halpha = 6562.8 * u.angstrom
# [OI] emission lines -- wavelengths from:
# http://physics.nist.gov/PhysRefData/ASD/lines_form.html
sky_lines = [5577.3387, 6300.304, 6363.776]
if not path.exists(plot_path):
os.makedirs(plot_path, exist_ok=True)
if not path.exists(table_path):
logger.debug('Creating table at {}'.format(table_path))
tbl_init = [Column(name='object_name', dtype='|S30', data=[], length=0),
Column(name='group_id', dtype=int, length=0),
Column(name='smoh_index', dtype=int, length=0),
Column(name='ra', dtype=float, unit=u.degree, length=0),
Column(name='dec', dtype=float, unit=u.degree, length=0),
Column(name='secz', dtype=float, length=0),
Column(name='filename', dtype='|S128', data=[], length=0),
Column(name='Ha_centroid', dtype=float, unit=u.angstrom, length=0),
Column(name='Ha_centroid_err', dtype=float, unit=u.angstrom, length=0),
Column(name='bary_rv_shift', dtype=float, unit=u.km/u.s, length=0),
Column(name='sky_shift_flag', dtype=int, length=0),
Column(name='sky_wave_shift', dtype=float, unit=u.angstrom, length=0, shape=(len(sky_lines,))),
Column(name='rv', dtype=float, unit=u.km/u.s, length=0),
Column(name='rv_err', dtype=float, unit=u.km/u.s, length=0)]
velocity_tbl = Table(tbl_init)
velocity_tbl.write(table_path, format='fits')
logger.debug('Table: {}'.format(velocity_tbl.colnames))
else:
logger.debug('Table exists, reading ({})'.format(table_path))
velocity_tbl = Table.read(table_path, format='fits')
if data_file is None:
ic = GlobImageFileCollection(night_path, glob_include='1d_*')
files = ic.files_filtered(imagetyp='OBJECT')
else:
files = [data_file]
for filename in files:
file_path = path.join(night_path, filename)
filebase,ext = path.splitext(filename)
# read FITS header
hdr = fits.getheader(file_path, 0)
object_name = hdr['OBJECT']
# HACK: for testing
if 'HIP' not in object_name:
continue
if object_name in velocity_tbl['object_name']:
if overwrite:
logger.debug('Object {} already done - overwriting!'.format(object_name))
idx, = np.where(velocity_tbl['object_name'] == object_name)
for i in idx:
velocity_tbl.remove_row(i)
else:
logger.debug('Object {} already done.'.format(object_name))
continue
# read the spectrum data and get wavelength solution
spec = Table.read(file_path)
# Define data arrays to be used in fitting below
near_Ha = (np.isfinite(spec['wavelength']) &
(spec['wavelength'] > 6510) & (spec['wavelength'] < 6615))
flux_data = np.array(spec['source_flux'][near_Ha])
ivar_data = np.array(spec['source_ivar'][near_Ha])
wave_data = np.array(spec['wavelength'][near_Ha])
_idx = wave_data.argsort()
wave_data = wave_data[_idx]
flux_data = flux_data[_idx]
ivar_data = ivar_data[_idx]
err_data = 1/np.sqrt(ivar_data)
# grid of wavelengths for plotting
wave_grid = np.linspace(wave_data.min(), wave_data.max(), 256)
# start by doing a maximum likelihood GP fit
# TODO: figure out if it's emission or absorption...for now just assume
# absorption
absorp_emiss = -1.
gp = fit_spec_line_GP(wave_data, flux_data, ivar_data,
absorp_emiss=absorp_emiss,
fwhm_L0=4., std_G0=1., n_bg_coef=2)
if gp.get_parameter_dict()['mean:ln_amp'] < 0.5: # MAGIC NUMBER
# try again with emission line
logger.error('absorption line has tiny amplitude! did '
'auto-determination of absorption/emission fail?')
# TODO: what now?
continue
fit_pars = gp_to_fit_pars(gp, absorp_emiss)
# Make the maximum likelihood prediction
mu, var = gp.predict(flux_data, wave_grid, return_var=True)
std = np.sqrt(var)
# ------------------------------------------------------------------------
# Plot the maximum likelihood model
fig,ax = plt.subplots()
# data
ax.plot(wave_data, flux_data, drawstyle='steps-mid', marker='')
ax.errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
# mean model
ax.plot(wave_grid, voigt_polynomial(wave_grid, **fit_pars),
marker='', alpha=0.5)
# full GP model
gp_color = "#ff7f0e"
ax.plot(wave_grid, mu, color=gp_color, marker='')
ax.fill_between(wave_grid, mu+std, mu-std, color=gp_color,
alpha=0.3, edgecolor="none")
ax.set_xlabel(r'wavelength [$\AA$]')
ax.set_ylabel('flux')
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_maxlike.png'.format(filebase)), dpi=256)
plt.close(fig)
# ------------------------------------------------------------------------
# Run `emcee` instead to sample over GP model parameters:
if fit_pars['std_G'] < 1E-2:
gp.freeze_parameter('mean:ln_std_G')
initial = np.array(gp.get_parameter_vector())
if initial[4] < -10:
initial[4] = -8.
if initial[5] < -10:
initial[5] = -8.
ndim, nwalkers = len(initial), 64
with schwimmbad.MultiPool() as pool:
# with schwimmbad.SerialPool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, pool=pool,
args=(gp, flux_data))
logger.debug("Running burn-in...")
p0 = initial + 1e-6 * np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 256)
logger.debug("Running 2nd burn-in...")
sampler.reset()
p0 = p0[lp.argmax()] + 1e-3 * np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 512)
logger.debug("Running production...")
sampler.reset()
pos, lp, _ = sampler.run_mcmc(p0, 512)
# --------------------------------------------------------------------
# plot MCMC traces
fig,axes = plt.subplots(2,4,figsize=(18,6))
for i in range(sampler.dim):
for walker in sampler.chain[...,i]:
axes.flat[i].plot(walker, marker='', drawstyle='steps-mid', alpha=0.2)
axes.flat[i].set_title(gp.get_parameter_names()[i], fontsize=12)
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_mcmc_trace.png'.format(filebase)), dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# plot samples
fig,axes = plt.subplots(3, 1, figsize=(6,9), sharex=True)
samples = sampler.flatchain
for s in samples[np.random.randint(len(samples), size=32)]:
gp.set_parameter_vector(s)
fit_pars = gp_to_fit_pars(gp, absorp_emiss)
_mean_model = voigt_polynomial(wave_grid, **fit_pars)
axes[0].plot(wave_grid, _mean_model,
marker='', alpha=0.25, color='#3182bd', zorder=-10)
mu = gp.predict(flux_data, wave_grid, return_cov=False)
axes[1].plot(wave_grid, mu-_mean_model, color=gp_color, alpha=0.25, marker='')
axes[2].plot(wave_grid, mu, color='#756bb1', alpha=0.25, marker='')
axes[2].plot(wave_data, flux_data, drawstyle='steps-mid', marker='', zorder=-6)
axes[2].errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
axes[2].set_ylabel('flux')
axes[2].set_xlabel(r'wavelength [$\AA$]')
axes[0].set_title('mean model (voigt + poly.)')
axes[1].set_title('noise model (GP)')
axes[2].set_title('full model')
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_mcmc_fits.png'.format(filebase)), dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# corner plot
fig = corner.corner(sampler.flatchain[::10, :],
labels=[x.split(':')[1] for x in gp.get_parameter_names()])
fig.savefig(path.join(plot_path, '{}_corner.png'.format(filebase)), dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# object naming stuff
if '-' in object_name:
group_id,smoh_index,*_ = object_name.split('-')
smoh_index = int(smoh_index)
else:
group_id = 0
smoh_index = 0
# Now estimate raw radial velocity and precision:
x0 = sampler.flatchain[:, 3] * u.angstrom
MAD = np.median(np.abs(x0 - np.median(x0)))
centroid = np.median(x0)
centroid_err = 1.48 * MAD # convert to stddev
# compute shifts for sky lines, uncertainty, quality flag
width = 100. # window size in angstroms, centered on line
absorp_emiss = 1. # all sky lines are emission lines
wave_shifts = np.full(len(sky_lines), np.nan) * u.angstrom
for j,sky_line in enumerate(sky_lines):
mask = (spec['wavelength'] > (sky_line-width/2)) & (spec['wavelength'] < (sky_line+width/2))
flux_data = spec['background_flux'][mask]
ivar_data = spec['background_ivar'][mask]
wave_data = spec['wavelength'][mask]
_idx = wave_data.argsort()
wave_data = wave_data[_idx]
flux_data = flux_data[_idx]
ivar_data = ivar_data[_idx]
err_data = 1/np.sqrt(ivar_data)
gp = fit_spec_line_GP(wave_data, flux_data, ivar_data,
absorp_emiss=absorp_emiss,
fwhm_L0=2., std_G0=1., n_bg_coef=2)
pars = gp.get_parameter_dict()
dlam = sky_line - pars['mean:x0']
if ((pars['mean:ln_fwhm_L'] < -0.5 and pars['mean:ln_std_G'] < (-0.5)) or
pars['mean:ln_amp'] > 10. or pars['mean:ln_amp'] < 3.5):
title = 'fucked'
else:
title = '{:.2f}'.format(pars['mean:ln_amp'])
wave_shifts[j] = dlam * u.angstrom
# Make the maximum likelihood prediction
wave_grid = np.linspace(wave_data.min(), wave_data.max(), 256)
mu, var = gp.predict(flux_data, wave_grid, return_var=True)
std = np.sqrt(var)
# ----------------------------------------------------------------
# Plot the fit and data
fig,ax = plt.subplots(1,1)
# data
ax.plot(wave_data, flux_data, drawstyle='steps-mid', marker='')
ax.errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
# full GP model
ax.plot(wave_grid, mu, color=gp_color, marker='')
ax.fill_between(wave_grid, mu+std, mu-std,
color=gp_color, alpha=0.3, edgecolor="none")
ax.set_title(title)
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_maxlike_sky_{:.0f}.png'
.format(filebase, sky_line)), dpi=256)
plt.close(fig)
# some quality checks on sky velocity shifts
filter_ = np.isnan(wave_shifts) | (np.abs(wave_shifts) > (5*u.angstrom))
wave_shifts[filter_] = np.nan * u.angstrom
# compute barycenter velocity given coordinates of where the
# telescope was pointing
t = Time(hdr['JD'], format='jd', scale='utc')
sc = coord.SkyCoord(ra=hdr['RA'], dec=hdr['DEC'],
unit=(u.hourangle, u.degree))
vbary = bary_vel_corr(t, sc, location=kitt_peak)
# sky shift flag is:
# - 0 if both lines were fit
# - 1 if only 6300
# - 2 if neither lines are there
sky_flag = np.isnan(wave_shifts).sum()
shift = np.nanmedian(wave_shifts, axis=-1)
if np.isnan(shift):
shift = 0. * u.angstrom
rv = (centroid + shift - Halpha) / Halpha * c.to(u.km/u.s) + vbary
rv_err = centroid_err / Halpha * c.to(u.km/u.s)
rv_err = np.sqrt(rv_err**2 + (10.*u.km/u.s)**2)
# convert ra,dec to quantities
ra = sc.ra.degree * u.deg
dec = sc.dec.degree * u.deg
velocity_tbl.add_row(dict(object_name=object_name, group_id=group_id,
smoh_index=smoh_index,
ra=ra, dec=dec, secz=hdr['AIRMASS'],
filename=file_path,
Ha_centroid=centroid,
Ha_centroid_err=centroid_err,
bary_rv_shift=vbary,
sky_shift_flag=sky_flag,
sky_wave_shift=wave_shifts,
rv=rv,
rv_err=rv_err))
logger.info('{} [{}]: x0={x0:.3f} σ={err:.3f} rv={rv:.3f}'
.format(object_name, filebase, x0=centroid,
err=centroid_err, rv=rv))
velocity_tbl.write(table_path, format='fits', overwrite=True)
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
vq_group = parser.add_mutually_exclusive_group()
vq_group.add_argument('-v', '--verbose', action='count', default=0, dest='verbosity')
vq_group.add_argument('-q', '--quiet', action='count', default=0, dest='quietness')
parser.add_argument('-s', '--seed', dest='seed', default=None,
type=int, help='Random number generator seed.')
parser.add_argument('-o', '--overwrite', action='store_true', dest='overwrite',
default=False, help='Destroy everything.')
parser.add_argument('-p', '--path', dest='night_path', required=True,
help='Path to a PROCESSED night or chunk of data to '
'process. Or, path to a specific comp file.')
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbosity != 0:
if args.verbosity == 1:
logger.setLevel(logging.DEBUG)
else: # anything >= 2
logger.setLevel(1)
elif args.quietness != 0:
if args.quietness == 1:
logger.setLevel(logging.WARNING)
else: # anything >= 2
logger.setLevel(logging.ERROR)
else: # default
logger.setLevel(logging.INFO)
if args.seed is not None:
np.random.seed(args.seed)
main(night_path=args.night_path, overwrite=args.overwrite)
fix pep8 and add pool support
# coding: utf-8
"""
TODO:
- n1.0073 Halpha is emission
"""
# Standard library
import os
from os import path
# Third-party
import astropy.coordinates as coord
from astropy.time import Time
from astropy.constants import c
import astropy.units as u
from astropy.io import fits
from astropy.table import Table, Column
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('apw-notebook')
import emcee
import corner
import schwimmbad
from schwimmbad import choose_pool
# Project
from comoving_rv.log import logger
from comoving_rv.longslit import GlobImageFileCollection
from comoving_rv.longslit.fitting import fit_spec_line_GP, gp_to_fit_pars
from comoving_rv.longslit.models import voigt_polynomial
from comoving_rv.velocity import bary_vel_corr, kitt_peak
def log_probability(params, gp, flux_data):
gp.set_parameter_vector(params)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
# HACK: Gaussian prior on log(rho)
var = 1.
lp += -0.5*(params[1]-1)**2/var - 0.5*np.log(2*np.pi*var)
if params[4] < -10. or params[5] < -10.:
return -np.inf
ll = gp.log_likelihood(flux_data)
if not np.isfinite(ll):
return -np.inf
return ll + lp
def main(night_path, overwrite=False, pool=None):
if pool is None:
pool = schwimmbad.SerialPool()
night_path = path.realpath(path.expanduser(night_path))
if not path.exists(night_path):
raise IOError("Path '{}' doesn't exist".format(night_path))
if path.isdir(night_path):
data_file = None
logger.info("Reading data from path: {}".format(night_path))
elif path.isfile(night_path):
data_file = night_path
base_path, name = path.split(night_path)
night_path = base_path
logger.info("Reading file: {}".format(data_file))
else:
raise RuntimeError("how?!")
plot_path = path.join(night_path, 'plots')
root_path = path.abspath(path.join(night_path, '..'))
table_path = path.join(root_path, 'velocity.fits')
# air wavelength of Halpha -- wavelength calibration from comp lamp is done
# at air wavelengths, so this is where Halpha should be, right?
Halpha = 6562.8 * u.angstrom
# [OI] emission lines -- wavelengths from:
# http://physics.nist.gov/PhysRefData/ASD/lines_form.html
sky_lines = [5577.3387, 6300.304, 6363.776]
if not path.exists(plot_path):
os.makedirs(plot_path, exist_ok=True)
if not path.exists(table_path):
logger.debug('Creating table at {}'.format(table_path))
tbl_init = [Column(name='object_name', dtype='|S30', data=[], length=0),
Column(name='group_id', dtype=int, length=0),
Column(name='smoh_index', dtype=int, length=0),
Column(name='ra', dtype=float, unit=u.degree, length=0),
Column(name='dec', dtype=float, unit=u.degree, length=0),
Column(name='secz', dtype=float, length=0),
Column(name='filename', dtype='|S128', data=[], length=0),
Column(name='Ha_centroid', dtype=float, unit=u.angstrom, length=0),
Column(name='Ha_centroid_err', dtype=float, unit=u.angstrom, length=0),
Column(name='bary_rv_shift', dtype=float, unit=u.km/u.s, length=0),
Column(name='sky_shift_flag', dtype=int, length=0),
Column(name='sky_wave_shift', dtype=float, unit=u.angstrom, length=0, shape=(len(sky_lines,))),
Column(name='rv', dtype=float, unit=u.km/u.s, length=0),
Column(name='rv_err', dtype=float, unit=u.km/u.s, length=0)]
velocity_tbl = Table(tbl_init)
velocity_tbl.write(table_path, format='fits')
logger.debug('Table: {}'.format(velocity_tbl.colnames))
else:
logger.debug('Table exists, reading ({})'.format(table_path))
velocity_tbl = Table.read(table_path, format='fits')
if data_file is None:
ic = GlobImageFileCollection(night_path, glob_include='1d_*')
files = ic.files_filtered(imagetyp='OBJECT')
else:
files = [data_file]
for filename in files:
file_path = path.join(night_path, filename)
filebase,ext = path.splitext(filename)
# read FITS header
hdr = fits.getheader(file_path, 0)
object_name = hdr['OBJECT']
# HACK: for testing
if 'HIP' not in object_name:
continue
if object_name in velocity_tbl['object_name']:
if overwrite:
logger.debug('Object {} already done - overwriting!'
.format(object_name))
idx, = np.where(velocity_tbl['object_name'] == object_name)
for i in idx:
velocity_tbl.remove_row(i)
else:
logger.debug('Object {} already done.'.format(object_name))
continue
# read the spectrum data and get wavelength solution
spec = Table.read(file_path)
# Define data arrays to be used in fitting below
near_Ha = (np.isfinite(spec['wavelength']) &
(spec['wavelength'] > 6510) & (spec['wavelength'] < 6615))
flux_data = np.array(spec['source_flux'][near_Ha])
ivar_data = np.array(spec['source_ivar'][near_Ha])
wave_data = np.array(spec['wavelength'][near_Ha])
_idx = wave_data.argsort()
wave_data = wave_data[_idx]
flux_data = flux_data[_idx]
ivar_data = ivar_data[_idx]
err_data = 1/np.sqrt(ivar_data)
# grid of wavelengths for plotting
wave_grid = np.linspace(wave_data.min(), wave_data.max(), 256)
# start by doing a maximum likelihood GP fit
# TODO: figure out if it's emission or absorption...for now just assume
# absorption
absorp_emiss = -1.
gp = fit_spec_line_GP(wave_data, flux_data, ivar_data,
absorp_emiss=absorp_emiss,
fwhm_L0=4., std_G0=1., n_bg_coef=2)
if gp.get_parameter_dict()['mean:ln_amp'] < 0.5: # MAGIC NUMBER
# try again with emission line
logger.error('absorption line has tiny amplitude! did '
'auto-determination of absorption/emission fail?')
# TODO: what now?
continue
fit_pars = gp_to_fit_pars(gp, absorp_emiss)
# Make the maximum likelihood prediction
mu, var = gp.predict(flux_data, wave_grid, return_var=True)
std = np.sqrt(var)
# ------------------------------------------------------------------------
# Plot the maximum likelihood model
fig,ax = plt.subplots()
# data
ax.plot(wave_data, flux_data, drawstyle='steps-mid', marker='')
ax.errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
# mean model
ax.plot(wave_grid, voigt_polynomial(wave_grid, **fit_pars),
marker='', alpha=0.5)
# full GP model
gp_color = "#ff7f0e"
ax.plot(wave_grid, mu, color=gp_color, marker='')
ax.fill_between(wave_grid, mu+std, mu-std, color=gp_color,
alpha=0.3, edgecolor="none")
ax.set_xlabel(r'wavelength [$\AA$]')
ax.set_ylabel('flux')
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_maxlike.png'.format(filebase)),
dpi=256)
plt.close(fig)
# ------------------------------------------------------------------------
# Run `emcee` instead to sample over GP model parameters:
if fit_pars['std_G'] < 1E-2:
gp.freeze_parameter('mean:ln_std_G')
initial = np.array(gp.get_parameter_vector())
if initial[4] < -10:
initial[4] = -8.
if initial[5] < -10:
initial[5] = -8.
ndim, nwalkers = len(initial), 64
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability,
pool=pool, args=(gp, flux_data))
logger.debug("Running burn-in...")
p0 = initial + 1e-6 * np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 256)
logger.debug("Running 2nd burn-in...")
sampler.reset()
p0 = p0[lp.argmax()] + 1e-3 * np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 512)
logger.debug("Running production...")
sampler.reset()
pos, lp, _ = sampler.run_mcmc(p0, 512)
pool.close()
# --------------------------------------------------------------------
# plot MCMC traces
fig,axes = plt.subplots(2,4,figsize=(18,6))
for i in range(sampler.dim):
for walker in sampler.chain[...,i]:
axes.flat[i].plot(walker, marker='',
drawstyle='steps-mid', alpha=0.2)
axes.flat[i].set_title(gp.get_parameter_names()[i], fontsize=12)
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_mcmc_trace.png'.format(filebase)),
dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# plot samples
fig,axes = plt.subplots(3, 1, figsize=(6,9), sharex=True)
samples = sampler.flatchain
for s in samples[np.random.randint(len(samples), size=32)]:
gp.set_parameter_vector(s)
fit_pars = gp_to_fit_pars(gp, absorp_emiss)
_mean_model = voigt_polynomial(wave_grid, **fit_pars)
axes[0].plot(wave_grid, _mean_model,
marker='', alpha=0.25, color='#3182bd', zorder=-10)
mu = gp.predict(flux_data, wave_grid, return_cov=False)
axes[1].plot(wave_grid, mu-_mean_model, color=gp_color,
alpha=0.25, marker='')
axes[2].plot(wave_grid, mu, color='#756bb1',
alpha=0.25, marker='')
axes[2].plot(wave_data, flux_data, drawstyle='steps-mid',
marker='', zorder=-6)
axes[2].errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
axes[2].set_ylabel('flux')
axes[2].set_xlabel(r'wavelength [$\AA$]')
axes[0].set_title('mean model (voigt + poly.)')
axes[1].set_title('noise model (GP)')
axes[2].set_title('full model')
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_mcmc_fits.png'.format(filebase)),
dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# corner plot
fig = corner.corner(sampler.flatchain[::10, :],
labels=[x.split(':')[1]
for x in gp.get_parameter_names()])
fig.savefig(path.join(plot_path, '{}_corner.png'.format(filebase)),
dpi=256)
plt.close(fig)
# --------------------------------------------------------------------
# object naming stuff
if '-' in object_name:
group_id,smoh_index,*_ = object_name.split('-')
smoh_index = int(smoh_index)
else:
group_id = 0
smoh_index = 0
# Now estimate raw radial velocity and precision:
x0 = sampler.flatchain[:, 3] * u.angstrom
MAD = np.median(np.abs(x0 - np.median(x0)))
centroid = np.median(x0)
centroid_err = 1.48 * MAD # convert to stddev
# compute shifts for sky lines, uncertainty, quality flag
width = 100. # window size in angstroms, centered on line
absorp_emiss = 1. # all sky lines are emission lines
wave_shifts = np.full(len(sky_lines), np.nan) * u.angstrom
for j,sky_line in enumerate(sky_lines):
mask = ((spec['wavelength'] > (sky_line-width/2)) &
(spec['wavelength'] < (sky_line+width/2)))
flux_data = spec['background_flux'][mask]
ivar_data = spec['background_ivar'][mask]
wave_data = spec['wavelength'][mask]
_idx = wave_data.argsort()
wave_data = wave_data[_idx]
flux_data = flux_data[_idx]
ivar_data = ivar_data[_idx]
err_data = 1/np.sqrt(ivar_data)
gp = fit_spec_line_GP(wave_data, flux_data, ivar_data,
absorp_emiss=absorp_emiss,
fwhm_L0=2., std_G0=1., n_bg_coef=2)
pars = gp.get_parameter_dict()
dlam = sky_line - pars['mean:x0']
if ((pars['mean:ln_fwhm_L'] < -0.5 and pars['mean:ln_std_G'] < (-0.5)) or
pars['mean:ln_amp'] > 10. or pars['mean:ln_amp'] < 3.5):
title = 'fucked'
else:
title = '{:.2f}'.format(pars['mean:ln_amp'])
wave_shifts[j] = dlam * u.angstrom
# Make the maximum likelihood prediction
wave_grid = np.linspace(wave_data.min(), wave_data.max(), 256)
mu, var = gp.predict(flux_data, wave_grid, return_var=True)
std = np.sqrt(var)
# ----------------------------------------------------------------
# Plot the fit and data
fig,ax = plt.subplots(1,1)
# data
ax.plot(wave_data, flux_data, drawstyle='steps-mid', marker='')
ax.errorbar(wave_data, flux_data, err_data,
marker='', ls='none', ecolor='#666666', zorder=-10)
# full GP model
ax.plot(wave_grid, mu, color=gp_color, marker='')
ax.fill_between(wave_grid, mu+std, mu-std,
color=gp_color, alpha=0.3, edgecolor="none")
ax.set_title(title)
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_maxlike_sky_{:.0f}.png'
.format(filebase, sky_line)), dpi=256)
plt.close(fig)
# some quality checks on sky velocity shifts
filter_ = np.isnan(wave_shifts) | (np.abs(wave_shifts) > (5*u.angstrom))
wave_shifts[filter_] = np.nan * u.angstrom
# compute barycenter velocity given coordinates of where the
# telescope was pointing
t = Time(hdr['JD'], format='jd', scale='utc')
sc = coord.SkyCoord(ra=hdr['RA'], dec=hdr['DEC'],
unit=(u.hourangle, u.degree))
vbary = bary_vel_corr(t, sc, location=kitt_peak)
# sky shift flag is:
# - 0 if both lines were fit
# - 1 if only 6300
# - 2 if neither lines are there
sky_flag = np.isnan(wave_shifts).sum()
shift = np.nanmedian(wave_shifts, axis=-1)
if np.isnan(shift):
shift = 0. * u.angstrom
rv = (centroid + shift - Halpha) / Halpha * c.to(u.km/u.s) + vbary
rv_err = centroid_err / Halpha * c.to(u.km/u.s)
rv_err = np.sqrt(rv_err**2 + (10.*u.km/u.s)**2)
# convert ra,dec to quantities
ra = sc.ra.degree * u.deg
dec = sc.dec.degree * u.deg
velocity_tbl.add_row(dict(object_name=object_name, group_id=group_id,
smoh_index=smoh_index,
ra=ra, dec=dec, secz=hdr['AIRMASS'],
filename=file_path,
Ha_centroid=centroid,
Ha_centroid_err=centroid_err,
bary_rv_shift=vbary,
sky_shift_flag=sky_flag,
sky_wave_shift=wave_shifts,
rv=rv,
rv_err=rv_err))
logger.info('{} [{}]: x0={x0:.3f} σ={err:.3f} rv={rv:.3f}'
.format(object_name, filebase, x0=centroid,
err=centroid_err, rv=rv))
velocity_tbl.write(table_path, format='fits', overwrite=True)
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
vq_group = parser.add_mutually_exclusive_group()
vq_group.add_argument('-v', '--verbose', action='count',
default=0, dest='verbosity')
vq_group.add_argument('-q', '--quiet', action='count',
default=0, dest='quietness')
parser.add_argument('-s', '--seed', dest='seed', default=None,
type=int, help='Random number generator seed.')
parser.add_argument('-o', '--overwrite', action='store_true',
dest='overwrite', default=False,
help='Destroy everything.')
parser.add_argument('-p', '--path', dest='night_path', required=True,
help='Path to a PROCESSED night or chunk of data to '
'process. Or, path to a specific comp file.')
# multiprocessing options
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', dest='n_cores', default=1,
type=int, help='Number of CPU cores to use.')
group.add_argument('--mpi', dest='mpi', default=False,
action='store_true', help='Run with MPI.')
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbosity != 0:
if args.verbosity == 1:
logger.setLevel(logging.DEBUG)
else: # anything >= 2
logger.setLevel(1)
elif args.quietness != 0:
if args.quietness == 1:
logger.setLevel(logging.WARNING)
else: # anything >= 2
logger.setLevel(logging.ERROR)
else: # default
logger.setLevel(logging.INFO)
if args.seed is not None:
np.random.seed(args.seed)
pool = choose_pool(mpi=args.mpi, processes=args.n_cores)
logger.info("Using pool: {}".format(pool.__class__))
main(night_path=args.night_path, overwrite=args.overwrite, pool=pool)
|
import argparse
import os
import stat
import numpy as np
import simulation.model.eval
import simulation.model.job
import simulation.model.constants
import simulation.util.data_base
import simulation.constants
import util.options
import util.io.fs
import util.batch.universal.system
import util.index_database.general
#TODO check read only for finished jobs
#TODO check cache option files available
#TODO at multiple runs check if right successor
ERROR_IGNORE_LIST = ("librdmacm: Fatal: unable to get RDMA device list"+os.linesep, "librdmacm: Warning: couldn't read ABI version."+os.linesep, "librdmacm: Warning: assuming: 4"+os.linesep, 'cpuinfo: error while loading shared libraries: libgcc_s.so.1: cannot open shared object file: No such file or directory'+os.linesep)
def check_db_entry_integrity_spinup(spinup_dir, is_spinup):
run_dirs = util.io.fs.get_dirs(spinup_dir, use_absolute_filenames=True)
run_dirs.sort()
n = len(run_dirs)
if n == 0:
print('No run dirs in ' + spinup_dir + '.')
else:
## check run dirs
for run_dir_index in range(n):
run_dir = run_dirs[run_dir_index]
## check if dirs in run dir exist
if len(util.io.fs.get_dirs(run_dir)) > 0:
print('Directories found in {}.'.format(run_dir))
## check job file
try:
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
## check if started
if not job.is_started():
print('Job in {} is not started!'.format(run_dir))
try:
is_running = job.is_running()
except util.batch.universal.system.JobError as e:
print(e)
break
job_output_file = job.output_file
try:
job_id = job.id
except Exception:
print('Job in {} is not started!'.format(run_dir))
break
## check read only
if not job.options.is_read_only():
print('Job option file in {} is writeable!'.format(run_dir))
## check options
options_file = os.path.join(run_dir, 'job_options.hdf5')
with util.options.OptionsFile(options_file, replace_environment_vars_at_set=False, replace_environment_vars_at_get=False) as options:
## check files and dirs
file_entry_prefix = '${{{}}}'.format(simulation.constants.SIMULATION_OUTPUT_DIR_ENV_NAME)
should_have_tracer_input = not is_spinup or run_dir_index > 0
for file_key, should_exists in [('/job/id_file', True), ('/job/option_file', True), ('/job/output_file', True), ('/job/finished_file', True), ('/job/unfinished_file', True), ('/model/tracer_input_dir', should_have_tracer_input), ('/metos3d/tracer_input_dir', should_have_tracer_input), ('/metos3d/tracer_output_dir', True), ('/metos3d/output_dir', True), ('/metos3d/option_file', True)]:
try:
value = options[file_key]
except KeyError:
if should_exists:
print('Job option {} in {} is missing.'.format(file_key, run_dir))
else:
if should_exists:
if not value.startswith(file_entry_prefix):
print('Job option {} in {} is not okay. It should start with {} but its is {}.'.format(file_key, run_dir, file_entry_prefix, value))
else:
print('Job option {} in {} should not exist.'.format(file_key, run_dir))
## check tracer input dir
if should_have_tracer_input:
try:
if not simulation.model.constants.DATABASE_SPINUP_DIRNAME in options['/model/tracer_input_dir']:
print('Model tracer input dir {} in job file in {} is not a spinup run dir.'.format(options['/model/tracer_input_dir'], run_dir))
except KeyError:
pass
try:
if options['/metos3d/tracer_input_dir'] != options['/metos3d/output_dir']:
print('Metos3d tracer input dir {} is not the expected {} in job option file in {}.'.format(options['/metos3d/tracer_input_dir'], correct_metos3d_tracer_input_dir, run_dir))
except KeyError:
pass
try:
if options['/metos3d/tracer_input_dir'] != options['/metos3d/tracer_output_dir']:
print('Metos3D tracer input dir {} and tracer output dir in job file in {} are not the same.'.format(options['/metos3d/tracer_input_dir'], options['/metos3d/tracer_output_dir'], run_dir))
except KeyError:
pass
## check tracer input filenmes
try:
options['/metos3d/input_filenames']
except KeyError:
if should_have_tracer_input:
print('Metos3D tracer input filnames are missing in job file in {}.'.format(run_dir))
else:
if not should_have_tracer_input:
print('Metos3D tracer input filnames found in job file in {} where not expected.'.format(run_dir))
except (OSError, IOError):
print('Job file in {} is not okay.'.format(run_dir))
break
## check petsc input files
for input_filename in ('dop_input.petsc', 'po4_input.petsc') :
input_file = os.path.join(run_dir, input_filename)
if run_dir_index == 0 and is_spinup:
if os.path.exists(input_file) or os.path.lexists(input_file):
print('Petsc input files for run index == 0 found in {}!'.format(run_dir))
break
else:
if not os.path.lexists(input_file):
if is_spinup:
print('No petsc input files for run index > 0 found in {}!'.format(run_dir))
else:
print('No petsc input files for derivative run found in {}!'.format(run_dir))
break
elif not os.path.exists(input_file):
print('Link for petsc input files for run index > 0 found in {} broken!'.format(run_dir))
break
## check if petsc output files exist
petsc_output_files_exist = []
for petsc_output_filename in ('dop_output.petsc', 'po4_output.petsc'):
petsc_output_file = os.path.join(run_dir, petsc_output_filename)
petsc_output_files_exist.append(os.path.exists(petsc_output_file))
if not np.all(petsc_output_files_exist) and (run_dir_index != n-1 or not is_running):
if run_dir_index != n-1:
print('Petsc output files in {} do not exist, but it has not the last run index!'.format(run_dir))
else:
print('Petsc output files in {} do not exist, but the job is not started or finished!'.format(run_dir))
break
## check finish file
finished_file = os.path.join(run_dir, 'finished.txt')
if np.any(petsc_output_files_exist) and not os.path.exists(finished_file):
print('Petsc output files in {} exist but finished file does not exist!'.format(run_dir))
break
if is_running:
## check if really running
try:
is_really_running = util.batch.universal.system.BATCH_SYSTEM.is_job_running(job_id)
if not is_really_running:
print('Job in {} should run but it does not!'.format(run_dir))
break
except ConnectionError:
print('Cannot connect to job server. Please check job id {}'.format(job_id))
## check if petsc output files exist
if np.any(petsc_output_files_exist):
print('Job is running but petsc output files in {} do exist!'.format(run_dir))
break
else:
## check exit code
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
exit_code = job.exit_code
if exit_code != 0:
print('Job in {} has exit code {}!'.format(run_dir, exit_code))
## check job output file
if os.path.exists(job_output_file):
try:
with open(job_output_file) as output_file_object:
for line in output_file_object:
line_lower = line.lower()
if ('error' in line_lower and not 'error_path' in line_lower) or 'warning' in line_lower or 'fatal' in line_lower or 'permission denied' in line_lower:
if line not in ERROR_IGNORE_LIST:
print('There are errors in the job output file {}: {}.'.format(job_output_file, line))
break
except:
print('The job output file {} could not be opened!'.format(job_output_file))
else:
print('Job output file {} does not exist!'.format(job_output_file))
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
try:
job.last_year
except:
print('The job output file {} format is not correct! Last year could not be computed'.format(job_output_file))
try:
job.last_tolerance
except:
print('The job output file {} format is not correct! Last tolerance could not be computed'.format(job_output_file))
def check_db_entry_integrity(model_name='dop_po4', time_step=1, parameter_set_dirs_to_check=None, check_for_same_parameters=True):
from simulation.model.constants import DATABASE_OUTPUT_DIR, DATABASE_MODEL_DIRNAME, DATABASE_TIME_STEP_DIRNAME, DATABASE_SPINUP_DIRNAME, DATABASE_DERIVATIVE_DIRNAME, JOB_OPTIONS_FILENAME, DATABASE_PARAMETERS_FILENAME
from simulation.util.constants import CACHE_DIRNAME, WOD_F_FILENAME, WOD_DF_FILENAME
wod_m = simulation.util.data_base.WOD().m
model_dirname = DATABASE_MODEL_DIRNAME.format(model_name)
model_dir = os.path.join(DATABASE_OUTPUT_DIR, model_dirname)
time_step_dirname = DATABASE_TIME_STEP_DIRNAME.format(time_step)
time_step_dir = os.path.join(model_dir, time_step_dirname)
df_step_sizes = [10**(-6), 10**(-7)]
check_all_parameter_sets = parameter_set_dirs_to_check is None or (len(parameter_set_dirs_to_check) == 1 and parameter_set_dirs_to_check[0] is None)
check_for_same_parameters = check_for_same_parameters and (check_all_parameter_sets or len(parameter_set_dirs_to_check) > 1)
if check_all_parameter_sets or check_for_same_parameters:
parameter_set_dirs_all = util.io.fs.get_dirs(time_step_dir, use_absolute_filenames=True)
if check_all_parameter_sets:
parameter_set_dirs_to_check = parameter_set_dirs_all
for parameter_set_dir in parameter_set_dirs_to_check:
print('Checking integrity of parameter set {}.'.format(parameter_set_dir))
## check spinup dir
spinup_dir = os.path.join(parameter_set_dir, DATABASE_SPINUP_DIRNAME)
check_db_entry_integrity_spinup(spinup_dir, True)
## check derivative dir
for df_step_size in df_step_sizes:
derivative_dir = os.path.join(parameter_set_dir, DATABASE_DERIVATIVE_DIRNAME.format(df_step_size))
partial_derivative_dirs = util.io.fs.get_dirs(derivative_dir, use_absolute_filenames=True)
for partial_derivative_dir in partial_derivative_dirs:
check_db_entry_integrity_spinup(partial_derivative_dir, False)
## check for parameters
p = np.loadtxt(os.path.join(parameter_set_dir, DATABASE_PARAMETERS_FILENAME))
if not np.all(np.isfinite(p)):
print('Parameters {} in set {} are not finite!'.format(p, parameter_set_dir))
## check for same parameters
if check_for_same_parameters:
for parameter_set_dir_i in parameter_set_dirs_all:
if parameter_set_dir_i != parameter_set_dir:
p_i = np.loadtxt(os.path.join(parameter_set_dir_i, DATABASE_PARAMETERS_FILENAME))
# if np.allclose(p, p_i):
if np.all(p == p_i):
print('Parameter set {} and {} have same parameters!'.format(parameter_set_dir, parameter_set_dir_i))
## check WOD output
f_wod_file = os.path.join(parameter_set_dir, CACHE_DIRNAME, WOD_F_FILENAME)
try:
f_wod = np.load(f_wod_file)
except FileNotFoundError:
f_wod = None
if f_wod is not None:
if f_wod.ndim != 1 or len(f_wod) != wod_m:
print('Wod f file {} has wrong shape {}!'.format(f_wod_file, f_wod.shape))
df_wod_file = os.path.join(parameter_set_dir, CACHE_DIRNAME, WOD_DF_FILENAME)
try:
df_wod = np.load(df_wod_file)
except FileNotFoundError:
df_wod = None
if df_wod is not None:
if df_wod.ndim != 2 or len(df_wod) != wod_m or df_wod.shape[1] != len(p):
print('Wod df file {} has wrong shape {}!'.format(df_wod_file, df_wod.shape))
## check value cache
value_cache_option_files = util.io.fs.find_with_filename_pattern(parameter_set_dir, '*options.npy', exclude_dirs=True, use_absolute_filenames=True, recursive=True)
for value_cache_option_file in value_cache_option_files:
value_cache_option = np.load(value_cache_option_file)
if not value_cache_option.ndim == 1:
print('Value cache option {} has ndim {}!'.format(value_cache_option_file, value_cache_option.ndim))
if not len(value_cache_option) in [3, 6]:
print('Value cache option {} has len {}!'.format(value_cache_option_file, len(value_cache_option)))
## check file permissions
def check_file(file):
permissions = os.stat(file)[stat.ST_MODE]
if not (permissions & stat.S_IRUSR and permissions & stat.S_IRGRP):
print('File {} is not readable!'.format(file))
def check_dir(file):
permissions = os.stat(file)[stat.ST_MODE]
if not (permissions & stat.S_IRUSR and permissions & stat.S_IXUSR and permissions & stat.S_IRGRP and permissions & stat.S_IXGRP):
print('Dir {} is not readable!'.format(file))
util.io.fs.walk_all_in_dir(parameter_set_dir, check_file, check_dir, exclude_dir=False, topdown=True)
def check_db_integrity(model_name='dop_po4', time_step=1):
print('Checking parameter database integrity.')
model_options = {'time_step': time_step}
m = simulation.model.eval.Model(model_options=model_options)
array_db = m._parameter_db.array_db
file_db = m._parameter_db.file_db
for index in array_db.used_indices():
v_a = array_db.get_value(index)
try:
v_f = file_db.get_value(index)
except util.index_database.general.DatabaseIndexError:
print('Array db hast value at index {} and file db has not value their!'.format(index))
else:
if not array_db.are_values_equal(v_a, v_f):
print('Array db and file db value at index {} are not equal: {} != {}!'.format(index, v_a, v_f))
if __name__ == "__main__":
## configure arguments
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip_same_parameter_check', action='store_true')
parser.add_argument('parameter_set_dir', nargs='?', default=None)
args = parser.parse_args()
## run check
time_step = 1
if args.parameter_set_dir is None:
check_db_integrity(time_step=time_step)
check_db_entry_integrity(time_step=time_step, parameter_set_dirs_to_check=(args.parameter_set_dir,), check_for_same_parameters=not args.skip_same_parameter_check)
print('Check completed.')
MAINT: simulation.model.check_integrity: add todo: check right df step size
import argparse
import os
import stat
import numpy as np
import simulation.model.eval
import simulation.model.job
import simulation.model.constants
import simulation.util.data_base
import simulation.constants
import util.options
import util.io.fs
import util.batch.universal.system
import util.index_database.general
#TODO check read only for finished jobs
#TODO check cache option files available
#TODO at multiple runs check if right successor
#TODO check right df step size
ERROR_IGNORE_LIST = ("librdmacm: Fatal: unable to get RDMA device list"+os.linesep, "librdmacm: Warning: couldn't read ABI version."+os.linesep, "librdmacm: Warning: assuming: 4"+os.linesep, 'cpuinfo: error while loading shared libraries: libgcc_s.so.1: cannot open shared object file: No such file or directory'+os.linesep)
def check_db_entry_integrity_spinup(spinup_dir, is_spinup):
run_dirs = util.io.fs.get_dirs(spinup_dir, use_absolute_filenames=True)
run_dirs.sort()
n = len(run_dirs)
if n == 0:
print('No run dirs in ' + spinup_dir + '.')
else:
## check run dirs
for run_dir_index in range(n):
run_dir = run_dirs[run_dir_index]
## check if dirs in run dir exist
if len(util.io.fs.get_dirs(run_dir)) > 0:
print('Directories found in {}.'.format(run_dir))
## check job file
try:
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
## check if started
if not job.is_started():
print('Job in {} is not started!'.format(run_dir))
try:
is_running = job.is_running()
except util.batch.universal.system.JobError as e:
print(e)
break
job_output_file = job.output_file
try:
job_id = job.id
except Exception:
print('Job in {} is not started!'.format(run_dir))
break
## check read only
if not job.options.is_read_only():
print('Job option file in {} is writeable!'.format(run_dir))
## check options
options_file = os.path.join(run_dir, 'job_options.hdf5')
with util.options.OptionsFile(options_file, replace_environment_vars_at_set=False, replace_environment_vars_at_get=False) as options:
## check files and dirs
file_entry_prefix = '${{{}}}'.format(simulation.constants.SIMULATION_OUTPUT_DIR_ENV_NAME)
should_have_tracer_input = not is_spinup or run_dir_index > 0
for file_key, should_exists in [('/job/id_file', True), ('/job/option_file', True), ('/job/output_file', True), ('/job/finished_file', True), ('/job/unfinished_file', True), ('/model/tracer_input_dir', should_have_tracer_input), ('/metos3d/tracer_input_dir', should_have_tracer_input), ('/metos3d/tracer_output_dir', True), ('/metos3d/output_dir', True), ('/metos3d/option_file', True)]:
try:
value = options[file_key]
except KeyError:
if should_exists:
print('Job option {} in {} is missing.'.format(file_key, run_dir))
else:
if should_exists:
if not value.startswith(file_entry_prefix):
print('Job option {} in {} is not okay. It should start with {} but its is {}.'.format(file_key, run_dir, file_entry_prefix, value))
else:
print('Job option {} in {} should not exist.'.format(file_key, run_dir))
## check tracer input dir
if should_have_tracer_input:
try:
if not simulation.model.constants.DATABASE_SPINUP_DIRNAME in options['/model/tracer_input_dir']:
print('Model tracer input dir {} in job file in {} is not a spinup run dir.'.format(options['/model/tracer_input_dir'], run_dir))
except KeyError:
pass
try:
if options['/metos3d/tracer_input_dir'] != options['/metos3d/output_dir']:
print('Metos3d tracer input dir {} is not the expected {} in job option file in {}.'.format(options['/metos3d/tracer_input_dir'], correct_metos3d_tracer_input_dir, run_dir))
except KeyError:
pass
try:
if options['/metos3d/tracer_input_dir'] != options['/metos3d/tracer_output_dir']:
print('Metos3D tracer input dir {} and tracer output dir in job file in {} are not the same.'.format(options['/metos3d/tracer_input_dir'], options['/metos3d/tracer_output_dir'], run_dir))
except KeyError:
pass
## check tracer input filenmes
try:
options['/metos3d/input_filenames']
except KeyError:
if should_have_tracer_input:
print('Metos3D tracer input filnames are missing in job file in {}.'.format(run_dir))
else:
if not should_have_tracer_input:
print('Metos3D tracer input filnames found in job file in {} where not expected.'.format(run_dir))
except (OSError, IOError):
print('Job file in {} is not okay.'.format(run_dir))
break
## check petsc input files
for input_filename in ('dop_input.petsc', 'po4_input.petsc') :
input_file = os.path.join(run_dir, input_filename)
if run_dir_index == 0 and is_spinup:
if os.path.exists(input_file) or os.path.lexists(input_file):
print('Petsc input files for run index == 0 found in {}!'.format(run_dir))
break
else:
if not os.path.lexists(input_file):
if is_spinup:
print('No petsc input files for run index > 0 found in {}!'.format(run_dir))
else:
print('No petsc input files for derivative run found in {}!'.format(run_dir))
break
elif not os.path.exists(input_file):
print('Link for petsc input files for run index > 0 found in {} broken!'.format(run_dir))
break
## check if petsc output files exist
petsc_output_files_exist = []
for petsc_output_filename in ('dop_output.petsc', 'po4_output.petsc'):
petsc_output_file = os.path.join(run_dir, petsc_output_filename)
petsc_output_files_exist.append(os.path.exists(petsc_output_file))
if not np.all(petsc_output_files_exist) and (run_dir_index != n-1 or not is_running):
if run_dir_index != n-1:
print('Petsc output files in {} do not exist, but it has not the last run index!'.format(run_dir))
else:
print('Petsc output files in {} do not exist, but the job is not started or finished!'.format(run_dir))
break
## check finish file
finished_file = os.path.join(run_dir, 'finished.txt')
if np.any(petsc_output_files_exist) and not os.path.exists(finished_file):
print('Petsc output files in {} exist but finished file does not exist!'.format(run_dir))
break
if is_running:
## check if really running
try:
is_really_running = util.batch.universal.system.BATCH_SYSTEM.is_job_running(job_id)
if not is_really_running:
print('Job in {} should run but it does not!'.format(run_dir))
break
except ConnectionError:
print('Cannot connect to job server. Please check job id {}'.format(job_id))
## check if petsc output files exist
if np.any(petsc_output_files_exist):
print('Job is running but petsc output files in {} do exist!'.format(run_dir))
break
else:
## check exit code
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
exit_code = job.exit_code
if exit_code != 0:
print('Job in {} has exit code {}!'.format(run_dir, exit_code))
## check job output file
if os.path.exists(job_output_file):
try:
with open(job_output_file) as output_file_object:
for line in output_file_object:
line_lower = line.lower()
if ('error' in line_lower and not 'error_path' in line_lower) or 'warning' in line_lower or 'fatal' in line_lower or 'permission denied' in line_lower:
if line not in ERROR_IGNORE_LIST:
print('There are errors in the job output file {}: {}.'.format(job_output_file, line))
break
except:
print('The job output file {} could not be opened!'.format(job_output_file))
else:
print('Job output file {} does not exist!'.format(job_output_file))
with simulation.model.job.Metos3D_Job(run_dir, force_load=True) as job:
try:
job.last_year
except:
print('The job output file {} format is not correct! Last year could not be computed'.format(job_output_file))
try:
job.last_tolerance
except:
print('The job output file {} format is not correct! Last tolerance could not be computed'.format(job_output_file))
def check_db_entry_integrity(model_name='dop_po4', time_step=1, parameter_set_dirs_to_check=None, check_for_same_parameters=True):
from simulation.model.constants import DATABASE_OUTPUT_DIR, DATABASE_MODEL_DIRNAME, DATABASE_TIME_STEP_DIRNAME, DATABASE_SPINUP_DIRNAME, DATABASE_DERIVATIVE_DIRNAME, JOB_OPTIONS_FILENAME, DATABASE_PARAMETERS_FILENAME
from simulation.util.constants import CACHE_DIRNAME, WOD_F_FILENAME, WOD_DF_FILENAME
wod_m = simulation.util.data_base.WOD().m
model_dirname = DATABASE_MODEL_DIRNAME.format(model_name)
model_dir = os.path.join(DATABASE_OUTPUT_DIR, model_dirname)
time_step_dirname = DATABASE_TIME_STEP_DIRNAME.format(time_step)
time_step_dir = os.path.join(model_dir, time_step_dirname)
df_step_sizes = [10**(-6), 10**(-7)]
check_all_parameter_sets = parameter_set_dirs_to_check is None or (len(parameter_set_dirs_to_check) == 1 and parameter_set_dirs_to_check[0] is None)
check_for_same_parameters = check_for_same_parameters and (check_all_parameter_sets or len(parameter_set_dirs_to_check) > 1)
if check_all_parameter_sets or check_for_same_parameters:
parameter_set_dirs_all = util.io.fs.get_dirs(time_step_dir, use_absolute_filenames=True)
if check_all_parameter_sets:
parameter_set_dirs_to_check = parameter_set_dirs_all
for parameter_set_dir in parameter_set_dirs_to_check:
print('Checking integrity of parameter set {}.'.format(parameter_set_dir))
## check spinup dir
spinup_dir = os.path.join(parameter_set_dir, DATABASE_SPINUP_DIRNAME)
check_db_entry_integrity_spinup(spinup_dir, True)
## check derivative dir
for df_step_size in df_step_sizes:
derivative_dir = os.path.join(parameter_set_dir, DATABASE_DERIVATIVE_DIRNAME.format(df_step_size))
partial_derivative_dirs = util.io.fs.get_dirs(derivative_dir, use_absolute_filenames=True)
for partial_derivative_dir in partial_derivative_dirs:
check_db_entry_integrity_spinup(partial_derivative_dir, False)
## check for parameters
p = np.loadtxt(os.path.join(parameter_set_dir, DATABASE_PARAMETERS_FILENAME))
if not np.all(np.isfinite(p)):
print('Parameters {} in set {} are not finite!'.format(p, parameter_set_dir))
## check for same parameters
if check_for_same_parameters:
for parameter_set_dir_i in parameter_set_dirs_all:
if parameter_set_dir_i != parameter_set_dir:
p_i = np.loadtxt(os.path.join(parameter_set_dir_i, DATABASE_PARAMETERS_FILENAME))
# if np.allclose(p, p_i):
if np.all(p == p_i):
print('Parameter set {} and {} have same parameters!'.format(parameter_set_dir, parameter_set_dir_i))
## check WOD output
f_wod_file = os.path.join(parameter_set_dir, CACHE_DIRNAME, WOD_F_FILENAME)
try:
f_wod = np.load(f_wod_file)
except FileNotFoundError:
f_wod = None
if f_wod is not None:
if f_wod.ndim != 1 or len(f_wod) != wod_m:
print('Wod f file {} has wrong shape {}!'.format(f_wod_file, f_wod.shape))
df_wod_file = os.path.join(parameter_set_dir, CACHE_DIRNAME, WOD_DF_FILENAME)
try:
df_wod = np.load(df_wod_file)
except FileNotFoundError:
df_wod = None
if df_wod is not None:
if df_wod.ndim != 2 or len(df_wod) != wod_m or df_wod.shape[1] != len(p):
print('Wod df file {} has wrong shape {}!'.format(df_wod_file, df_wod.shape))
## check value cache
value_cache_option_files = util.io.fs.find_with_filename_pattern(parameter_set_dir, '*options.npy', exclude_dirs=True, use_absolute_filenames=True, recursive=True)
for value_cache_option_file in value_cache_option_files:
value_cache_option = np.load(value_cache_option_file)
if not value_cache_option.ndim == 1:
print('Value cache option {} has ndim {}!'.format(value_cache_option_file, value_cache_option.ndim))
if not len(value_cache_option) in [3, 6]:
print('Value cache option {} has len {}!'.format(value_cache_option_file, len(value_cache_option)))
## check file permissions
def check_file(file):
permissions = os.stat(file)[stat.ST_MODE]
if not (permissions & stat.S_IRUSR and permissions & stat.S_IRGRP):
print('File {} is not readable!'.format(file))
def check_dir(file):
permissions = os.stat(file)[stat.ST_MODE]
if not (permissions & stat.S_IRUSR and permissions & stat.S_IXUSR and permissions & stat.S_IRGRP and permissions & stat.S_IXGRP):
print('Dir {} is not readable!'.format(file))
util.io.fs.walk_all_in_dir(parameter_set_dir, check_file, check_dir, exclude_dir=False, topdown=True)
def check_db_integrity(model_name='dop_po4', time_step=1):
print('Checking parameter database integrity.')
model_options = {'time_step': time_step}
m = simulation.model.eval.Model(model_options=model_options)
array_db = m._parameter_db.array_db
file_db = m._parameter_db.file_db
for index in array_db.used_indices():
v_a = array_db.get_value(index)
try:
v_f = file_db.get_value(index)
except util.index_database.general.DatabaseIndexError:
print('Array db hast value at index {} and file db has not value their!'.format(index))
else:
if not array_db.are_values_equal(v_a, v_f):
print('Array db and file db value at index {} are not equal: {} != {}!'.format(index, v_a, v_f))
if __name__ == "__main__":
## configure arguments
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip_same_parameter_check', action='store_true')
parser.add_argument('parameter_set_dir', nargs='?', default=None)
args = parser.parse_args()
## run check
time_step = 1
if args.parameter_set_dir is None:
check_db_integrity(time_step=time_step)
check_db_entry_integrity(time_step=time_step, parameter_set_dirs_to_check=(args.parameter_set_dir,), check_for_same_parameters=not args.skip_same_parameter_check)
print('Check completed.') |
import ab_game
import alpha_beta
from gui import *
from player import *
from utility_calculator import *
import threading
class AIPlayer(Player):
""" Yes there is a circular dependancy between AIPlayer and Game """
def __init__(self, search_filter, *args, **vargs):
Player.__init__(self, *args, **vargs)
self.max_depth = 1
self.search_filter = search_filter
self.utility_calculator = UtilityCalculator()
def __eq__(self, other):
return self.genome == other.genome
def set_max_depth(self, max_depth):
self.max_depth = max_depth
def get_utility_calculator(self):
return self.utility_calculator
def get_priority_filter(self):
return self.search_filter
def attach_to_game(self, base_game):
self.ab_game = ab_game.ABGame(
base_game, search_filter=self.search_filter,
utility_calculator=self.utility_calculator)
def prompt_for_action(self, base_game, gui, test=False):
if test:
return self.do_the_search()
else:
t = threading.Thread(target=self.search_thread, args=(gui,))
# Allow the program to be exited quickly
t.daemon = True
t.start()
return "%s is thinking" % self.get_name()
def get_type(self):
return "computer"
def search_thread(self, gui):
action = self.do_the_search()
if action:
gui.enqueue_action(action)
gui.trig()
def do_the_search(self):
ab_game = self.ab_game
ab_game.reset_transposition_table()
md = self.max_depth
# TODO: Move these to ABState.__repr__
#print ab_game.current_state
move, value = alpha_beta.alphabeta_search(ab_game.current_state,
ab_game, max_depth=md)
if self.ab_game.interrupted:
return
action = move[0]
if value < -INFINITY / 1000:
# No matter what we do, there is a forceable loss.
# Just take the first move suggested by the search filter -
# it will look better than the AB suggestion
sf = ab_game.current_state.utility_stats.search_filter
our_colour = ab_game.current_state.to_move_colour()
action = sf.get_iter(our_colour).next()
#print " => %s" % (action,)
return action
def set_interrupted(self):
self.ab_game.interrupted = True
Give the genome an initial value of None
import ab_game
import alpha_beta
from gui import *
from player import *
from utility_calculator import *
import threading
class AIPlayer(Player):
""" Yes there is a circular dependancy between AIPlayer and Game """
def __init__(self, search_filter, *args, **vargs):
Player.__init__(self, *args, **vargs)
self.max_depth = 1
self.search_filter = search_filter
self.genome = None # temp hack
self.utility_calculator = UtilityCalculator()
def __eq__(self, other):
return self.genome == other.genome
def set_max_depth(self, max_depth):
self.max_depth = max_depth
def get_utility_calculator(self):
return self.utility_calculator
def get_priority_filter(self):
return self.search_filter
def attach_to_game(self, base_game):
self.ab_game = ab_game.ABGame(
base_game, search_filter=self.search_filter,
utility_calculator=self.utility_calculator)
def prompt_for_action(self, base_game, gui, test=False):
if test:
return self.do_the_search()
else:
t = threading.Thread(target=self.search_thread, args=(gui,))
# Allow the program to be exited quickly
t.daemon = True
t.start()
return "%s is thinking" % self.get_name()
def get_type(self):
return "computer"
def search_thread(self, gui):
action = self.do_the_search()
if action:
gui.enqueue_action(action)
gui.trig()
def do_the_search(self):
ab_game = self.ab_game
ab_game.reset_transposition_table()
md = self.max_depth
# TODO: Move these to ABState.__repr__
#print ab_game.current_state
move, value = alpha_beta.alphabeta_search(ab_game.current_state,
ab_game, max_depth=md)
if self.ab_game.interrupted:
return
action = move[0]
if value < -INFINITY / 1000:
# No matter what we do, there is a forceable loss.
# Just take the first move suggested by the search filter -
# it will look better than the AB suggestion
sf = ab_game.current_state.utility_stats.search_filter
our_colour = ab_game.current_state.to_move_colour()
action = sf.get_iter(our_colour).next()
#print " => %s" % (action,)
return action
def set_interrupted(self):
self.ab_game.interrupted = True
|
prepared submission model for #41, completed TODOs
|
###############################################################################
##
## Copyright (C) 2011-2013, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import base64
from vistrails.core import modules
from vistrails.core.common import *
from vistrails.core.data_structures.bijectivedict import Bidict
import vistrails.core.db.io
from vistrails.core.log.controller import DummyLogController
from vistrails.core.modules.basic_modules import identifier as basic_pkg
from vistrails.core.modules.vistrails_module import ModuleConnector, \
ModuleHadError, ModuleError, ModuleBreakpoint, ModuleErrors
from vistrails.core.utils import DummyView
from vistrails.core.vistrail.annotation import Annotation
from vistrails.core.vistrail.vistrail import Vistrail
import copy
import vistrails.core.interpreter.base
from vistrails.core.interpreter.base import AbortExecution
import vistrails.core.interpreter.utils
import vistrails.core.system
import vistrails.core.vistrail.pipeline
import gc
import cPickle
import unittest
import vistrails.core.packagemanager
# from core.modules.module_utils import FilePool
##############################################################################
class CachedInterpreter(vistrails.core.interpreter.base.BaseInterpreter):
def __init__(self):
vistrails.core.interpreter.base.BaseInterpreter.__init__(self)
self.debugger = None
self.create()
def create(self):
# FIXME moved here because otherwise we hit the registry too early
from vistrails.core.modules.module_utils import FilePool
self._file_pool = FilePool()
self._persistent_pipeline = vistrails.core.vistrail.pipeline.Pipeline()
self._objects = {}
self._executed = {}
self.filePool = self._file_pool
def clear(self):
self._file_pool.cleanup()
self._persistent_pipeline.clear()
for obj in self._objects.itervalues():
obj.clear()
self._objects = {}
self._executed = {}
def __del__(self):
self.clear()
def clean_modules(self, modules_to_clean):
"""clean_modules(modules_to_clean: list of persistent module ids)
Removes modules from the persistent pipeline, and the modules that
depend on them."""
if not modules_to_clean:
return
g = self._persistent_pipeline.graph
modules_to_clean = (set(modules_to_clean) &
set(self._persistent_pipeline.modules.iterkeys()))
dependencies = g.vertices_topological_sort(modules_to_clean)
for v in dependencies:
self._persistent_pipeline.delete_module(v)
del self._objects[v]
def clean_non_cacheable_modules(self):
"""clean_non_cacheable_modules() -> None
Removes all modules that are not cacheable from the persistent
pipeline, and the modules that depend on them, and
previously suspended modules """
non_cacheable_modules = [i for
(i, mod) in self._objects.iteritems()
if not mod.is_cacheable() or \
mod.suspended]
self.clean_modules(non_cacheable_modules)
def _clear_package(self, identifier):
"""clear_package(identifier: str) -> None
Removes all modules from the given package from the persistent
pipeline.
"""
modules = [mod.id
for mod in self._persistent_pipeline.module_list
if mod.module_descriptor.identifier == identifier]
self.clean_modules(modules)
def setup_pipeline(self, pipeline, **kwargs):
"""setup_pipeline(controller, pipeline, locator, currentVersion,
view, aliases, **kwargs)
Matches a pipeline with the persistent pipeline and creates
instances of modules that aren't in the cache.
"""
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
stop_on_error = fetch('stop_on_error', True)
reg = modules.module_registry.get_module_registry()
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to setup_pipeline: %s' % kwargs)
def create_null():
"""Creates a Null value"""
getter = modules.module_registry.registry.get_descriptor_by_name
descriptor = getter(basic_pkg, 'Null')
return descriptor.module()
def create_constant(param, module):
"""Creates a Constant from a parameter spec"""
getter = reg.get_descriptor_by_name
desc = getter(param.identifier, param.type, param.namespace)
constant = desc.module()
constant.id = module.id
# if param.evaluatedStrValue:
# constant.setValue(param.evaluatedStrValue)
if param.strValue != '':
constant.setValue(param.strValue)
else:
constant.setValue( \
constant.translate_to_string(constant.default_value))
return constant
### BEGIN METHOD ###
# if self.debugger:
# self.debugger.update()
to_delete = []
errors = {}
if controller is not None:
# Controller is none for sub_modules
controller.validate(pipeline)
else:
pipeline.validate()
self.resolve_aliases(pipeline, aliases)
if vistrail_variables:
self.resolve_variables(vistrail_variables, pipeline)
self.update_params(pipeline, params)
(tmp_to_persistent_module_map,
conn_map,
module_added_set,
conn_added_set) = self.add_to_persistent_pipeline(pipeline)
# Create the new objects
for i in module_added_set:
persistent_id = tmp_to_persistent_module_map[i]
module = self._persistent_pipeline.modules[persistent_id]
obj = self._objects[persistent_id] = module.summon()
obj.interpreter = self
obj.id = persistent_id
obj.is_breakpoint = module.is_breakpoint
obj.signature = module._signature
# Checking if output should be stored
if module.has_annotation_with_key('annotate_output'):
annotate_output = module.get_annotation_by_key('annotate_output')
#print annotate_output
if annotate_output:
obj.annotate_output = True
for f in module.functions:
connector = None
if len(f.params) == 0:
connector = ModuleConnector(create_null(), 'value')
elif len(f.params) == 1:
p = f.params[0]
try:
constant = create_constant(p, module)
connector = ModuleConnector(constant, 'value')
except ValueError, e:
err = ModuleError(self, 'Cannot convert parameter '
'value "%s"\n' % p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
except Exception, e:
err = ModuleError(self, 'Uncaught exception: "%s"' % \
p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
else:
tupleModule = vistrails.core.interpreter.base.InternalTuple()
tupleModule.length = len(f.params)
for (j,p) in enumerate(f.params):
try:
constant = create_constant(p, module)
constant.update()
connector = ModuleConnector(constant, 'value')
tupleModule.set_input_port(j, connector)
except ValueError, e:
err = ModuleError(self, "Cannot convert parameter "
"value '%s'\n" % p.strValue + \
str(e))
errors[i] = err
to_delete.append(obj.id)
except Exception, e:
err = ModuleError(self, 'Uncaught exception: '
'"%s"' % p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
connector = ModuleConnector(tupleModule, 'value')
if connector:
obj.set_input_port(f.name, connector, is_method=True)
# Create the new connections
for i in conn_added_set:
persistent_id = conn_map[i]
conn = self._persistent_pipeline.connections[persistent_id]
src = self._objects[conn.sourceId]
dst = self._objects[conn.destinationId]
conn.makeConnection(src, dst)
if self.done_summon_hook:
self.done_summon_hook(self._persistent_pipeline, self._objects)
for callable_ in done_summon_hooks:
callable_(self._persistent_pipeline, self._objects)
tmp_id_to_module_map = {}
for i, j in tmp_to_persistent_module_map.iteritems():
tmp_id_to_module_map[i] = self._objects[j]
return (tmp_id_to_module_map, tmp_to_persistent_module_map.inverse,
module_added_set, conn_added_set, to_delete, errors)
def execute_pipeline(self, pipeline, tmp_id_to_module_map,
persistent_to_tmp_id_map, **kwargs):
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
module_executed_hook = fetch('module_executed_hook', [])
module_suspended_hook = fetch('module_suspended_hook', [])
done_summon_hooks = fetch('done_summon_hooks', [])
clean_pipeline = fetch('clean_pipeline', False)
stop_on_error = fetch('stop_on_error', True)
# parent_exec = fetch('parent_exec', None)
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to execute_pipeline: %s' % kwargs)
errors = {}
executed = {}
suspended = {}
cached = {}
# LOGGING SETUP
def get_remapped_id(id):
return persistent_to_tmp_id_map[id]
# the executed dict works on persistent ids
def add_to_executed(obj):
executed[obj.id] = True
for callable_ in module_executed_hook:
callable_(obj.id)
# the suspended dict works on persistent ids
def add_to_suspended(obj):
suspended[obj.id] = obj.suspended
for callable_ in module_suspended_hook:
callable_(obj.id)
def set_computing(obj):
i = get_remapped_id(obj.id)
view.set_module_computing(i)
# views work on local ids
def begin_compute(obj):
i = get_remapped_id(obj.id)
view.set_module_computing(i)
reg = modules.module_registry.get_module_registry()
module_name = reg.get_descriptor(obj.__class__).name
# !!!self.parent_execs is mutated!!!
logger.start_execution(obj, i, module_name,
parent_execs=self.parent_execs)
# views and loggers work on local ids
def begin_update(obj):
i = get_remapped_id(obj.id)
view.set_module_active(i)
def update_cached(obj):
cached[obj.id] = True
i = get_remapped_id(obj.id)
reg = modules.module_registry.get_module_registry()
module_name = reg.get_descriptor(obj.__class__).name
# !!!self.parent_execs is mutated!!!
logger.start_execution(obj, i, module_name,
parent_execs=self.parent_execs,
cached=1)
view.set_module_not_executed(i)
num_pops = logger.finish_execution(obj,'', self.parent_execs)
# views and loggers work on local ids
def end_update(obj, error='', errorTrace=None, was_suspended = False):
i = get_remapped_id(obj.id)
if was_suspended:
view.set_module_suspended(i, error)
error = error.msg
elif not error:
view.set_module_success(i)
else:
view.set_module_error(i, error)
# !!!self.parent_execs is mutated!!!
logger.finish_execution(obj, error, self.parent_execs, errorTrace,
was_suspended)
# views and loggers work on local ids
def annotate(obj, d):
i = get_remapped_id(obj.id)
logger.insert_module_annotations(obj, d)
# views and loggers work on local ids
def update_progress(obj, percentage=0.0):
i = get_remapped_id(obj.id)
view.set_module_progress(i, percentage)
def add_exec(exec_):
logger.add_exec(exec_, self.parent_execs)
logging_obj = InstanceObject(signalSuccess=add_to_executed,
signalSuspended=add_to_suspended,
begin_update=begin_update,
begin_compute=begin_compute,
update_progress=update_progress,
end_update=end_update,
update_cached=update_cached,
set_computing=set_computing,
add_exec = add_exec,
annotate=annotate,
log=logger)
# PARAMETER CHANGES SETUP
parameter_changes = []
def change_parameter(obj, name, value):
parameter_changes.append((get_remapped_id(obj.id),
name, value))
def make_change_parameter(obj):
return lambda *args: change_parameter(obj, *args)
# Update **all** modules in the current pipeline
for i, obj in tmp_id_to_module_map.iteritems():
obj.logging = logging_obj
obj.change_parameter = make_change_parameter(obj)
# Update object pipeline information
obj.moduleInfo['locator'] = locator
obj.moduleInfo['version'] = current_version
obj.moduleInfo['moduleId'] = i
obj.moduleInfo['pipeline'] = pipeline
obj.moduleInfo['controller'] = controller
if extra_info is not None:
obj.moduleInfo['extra_info'] = extra_info
if reason is not None:
obj.moduleInfo['reason'] = reason
if actions is not None:
obj.moduleInfo['actions'] = actions
## Checking 'sinks' from kwargs to resolve only requested sinks
# Note that we accept any module in 'sinks', even if it's not actually
# a sink in the graph
if sinks is not None:
persistent_sinks = [tmp_id_to_module_map[sink]
for sink in sinks
if sink in tmp_id_to_module_map]
else:
persistent_sinks = [tmp_id_to_module_map[sink]
for sink in pipeline.graph.sinks()]
# Update new sinks
for obj in persistent_sinks:
abort = False
try:
obj.update()
continue
except ModuleHadError:
pass
except AbortExecution:
break
except ModuleErrors, mes:
for me in mes.module_errors:
me.module.logging.end_update(me.module, me.msg)
errors[me.module.id] = me
abort = abort or me.abort
except ModuleError, me:
me.module.logging.end_update(me.module, me.msg, me.errorTrace)
errors[me.module.id] = me
abort = me.abort
except ModuleBreakpoint, mb:
mb.module.logging.end_update(mb.module)
errors[mb.module.id] = mb
abort = True
if stop_on_error or abort:
break
if self.done_update_hook:
self.done_update_hook(self._persistent_pipeline, self._objects)
# objs, errs, and execs are mappings that use the local ids as keys,
# as opposed to the persistent ids.
# They are thus ideal to external consumption.
objs = {}
# dict([(i, self._objects[tmp_to_persistent_module_map[i]])
# for i in tmp_to_persistent_module_map.keys()])
errs = {}
execs = {}
suspends = {}
caches = {}
to_delete = []
for (tmp_id, obj) in tmp_id_to_module_map.iteritems():
if clean_pipeline:
to_delete.append(obj.id)
objs[tmp_id] = obj
if obj.id in errors:
errs[tmp_id] = errors[obj.id]
if not clean_pipeline:
to_delete.append(obj.id)
if obj.id in executed:
execs[tmp_id] = executed[obj.id]
elif obj.id in suspended:
suspends[tmp_id] = suspended[obj.id]
elif obj.id in cached:
caches[tmp_id] = cached[obj.id]
else:
# these modules didn't execute
execs[tmp_id] = False
return (to_delete, objs, errs, execs, suspends, caches, parameter_changes)
def finalize_pipeline(self, pipeline, to_delete, objs, errs, execs,
suspended, cached, **kwargs):
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
view = fetch('view', DummyView())
reset_computed = fetch('reset_computed', True)
self.clean_modules(to_delete)
for i in objs:
if i in errs:
view.set_module_error(i, errs[i].msg, errs[i].errorTrace)
elif i in suspended and suspended[i]:
view.set_module_suspended(i, suspended[i])
elif i in execs and execs[i]:
view.set_module_success(i)
elif i in cached and cached[i]:
view.set_module_not_executed(i)
else:
view.set_module_persistent(i)
if reset_computed:
for module in self._objects.itervalues():
module.computed = False
def unlocked_execute(self, pipeline, **kwargs):
"""unlocked_execute(pipeline, **kwargs): Executes a pipeline using
caching. Caching works by reusing pipelines directly. This
means that there exists one global pipeline whose parts get
executed over and over again. This allows nested execution."""
res = self.setup_pipeline(pipeline, **kwargs)
modules_added = res[2]
conns_added = res[3]
to_delete = res[4]
errors = res[5]
if len(errors) == 0:
res = self.execute_pipeline(pipeline, *(res[:2]), **kwargs)
else:
res = (to_delete, res[0], errors, {}, {}, {}, [])
self.finalize_pipeline(pipeline, *(res[:-1]), **kwargs)
return InstanceObject(objects=res[1],
errors=res[2],
executed=res[3],
suspended=res[4],
parameter_changes=res[6],
modules_added=modules_added,
conns_added=conns_added)
@lock_method(vistrails.core.interpreter.utils.get_interpreter_lock())
def execute(self, pipeline, **kwargs):
"""execute(pipeline, **kwargs):
kwargs:
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
Executes a pipeline using caching. Caching works by reusing
pipelines directly. This means that there exists one global
pipeline whose parts get executed over and over again.
This function returns a triple of dictionaries (objs, errs, execs).
objs is a mapping from local ids (the ids in the pipeline) to
objects **in the persistent pipeline**. Notice, these are not
the objects inside the passed pipeline, but the objects they
were mapped to in the persistent pipeline.
errs is a dictionary from local ids to error messages of modules
that might have returns errors.
execs is a dictionary from local ids to boolean values indicating
whether they were executed or not.
If modules have no error associated with but were not executed, it
means they were cached."""
# Setup named arguments. We don't use named parameters so
# that positional parameter calls fail earlier
new_kwargs = {}
def fetch(name, default):
r = kwargs.get(name, default)
new_kwargs[name] = r
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
stop_on_error = fetch('stop_on_error', True)
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to execute: %s' % kwargs)
self.clean_non_cacheable_modules()
# if controller is not None:
# vistrail = controller.vistrail
# (pipeline, module_remap) = \
# core.db.io.expand_workflow(vistrail, pipeline)
# new_kwargs['module_remap'] = module_remap
# else:
# vistrail = None
if controller is not None:
vistrail = controller.vistrail
else:
vistrail = None
self.parent_execs = [None]
logger.start_workflow_execution(vistrail, pipeline, current_version)
self.annotate_workflow_execution(logger, reason, aliases, params)
result = self.unlocked_execute(pipeline, **new_kwargs)
logger.finish_workflow_execution(result.errors, suspended=result.suspended)
self.parent_execs = [None]
return result
def annotate_workflow_execution(self, logger, reason, aliases, params):
"""annotate_workflow_Execution(logger: LogController, reason:str,
aliases:dict, params:list)-> None
It will annotate the workflow execution in logger with the reason,
aliases and params.
"""
d = {}
d["__reason__"] = reason
if aliases is not None and isinstance(aliases, dict):
d["__aliases__"] = cPickle.dumps(aliases)
if params is not None and isinstance(params, list):
d["__params__"] = cPickle.dumps(params)
logger.insert_workflow_exec_annotations(d)
def add_to_persistent_pipeline(self, pipeline):
"""add_to_persistent_pipeline(pipeline):
(module_id_map, connection_id_map, modules_added)
Adds a pipeline to the persistent pipeline of the cached interpreter
and adds current logging object to each existing module.
Returns four things: two dictionaries describing the mapping
of ids from the passed pipeline to the persistent one (the
first one has the module id mapping, the second one has the
connection id mapping), a set of all module ids added to the
persistent pipeline, and a set of all connection ids added to
the persistent pipeline."""
module_id_map = Bidict()
connection_id_map = Bidict()
modules_added = set()
connections_added = set()
pipeline.refresh_signatures()
# we must traverse vertices in topological sort order
verts = pipeline.graph.vertices_topological_sort()
for new_module_id in verts:
new_sig = pipeline.subpipeline_signature(new_module_id)
if not self._persistent_pipeline.has_subpipeline_signature(new_sig):
# Must add module to persistent pipeline
persistent_module = copy.copy(pipeline.modules[new_module_id])
persistent_id = self._persistent_pipeline.fresh_module_id()
persistent_module.id = persistent_id
self._persistent_pipeline.add_module(persistent_module)
self._persistent_pipeline.modules[persistent_id]._signature = \
base64.b16encode(new_sig).lower()
module_id_map[new_module_id] = persistent_id
modules_added.add(new_module_id)
else:
i = self._persistent_pipeline \
.subpipeline_id_from_signature(new_sig)
module_id_map[new_module_id] = i
for connection in pipeline.connections.itervalues():
new_sig = pipeline.connection_signature(connection.id)
if not self._persistent_pipeline.has_connection_signature(new_sig):
# Must add connection to persistent pipeline
persistent_connection = copy.copy(connection)
persistent_id = self._persistent_pipeline.fresh_connection_id()
persistent_connection.id = persistent_id
persistent_connection.sourceId = module_id_map[
connection.sourceId]
persistent_connection.destinationId = module_id_map[
connection.destinationId]
self._persistent_pipeline.add_connection(persistent_connection)
connection_id_map[connection.id] = persistent_id
connections_added.add(connection.id)
else:
i = self._persistent_pipeline \
.connection_id_from_signature(new_sig)
connection_id_map[connection.id] = i
# update persistent signatures
self._persistent_pipeline.compute_signatures()
return (module_id_map, connection_id_map,
modules_added, connections_added)
def find_persistent_entities(self, pipeline):
"""returns a map from a pipeline to the persistent pipeline,
assuming those pieces exist"""
persistent_p = self._persistent_pipeline
object_map = {}
module_id_map = {}
connection_id_map = {}
pipeline.refresh_signatures()
# we must traverse vertices in topological sort order
verts = pipeline.graph.vertices_topological_sort()
for module_id in verts:
sig = pipeline.subpipeline_signature(module_id)
if persistent_p.has_subpipeline_signature(sig):
i = persistent_p.subpipeline_id_from_signature(sig)
module_id_map[module_id] = i
object_map[module_id] = self._objects[i]
else:
module_id_map[module_id] = None
object_map[module_id] = None
for connection in pipeline.connections.itervalues():
sig = pipeline.connection_signature(connection.id)
if persistent_p.has_connection_signature(sig):
connection_id_map[connection.id] = \
persistent_p.connection_id_from_signature(sig)
else:
connection_id_map[connection.id] = None
return (object_map, module_id_map, connection_id_map)
__instance = None
@staticmethod
def get():
if not CachedInterpreter.__instance:
CachedInterpreter.__instance = CachedInterpreter()
return CachedInterpreter.__instance
@staticmethod
def cleanup():
if CachedInterpreter.__instance:
CachedInterpreter.__instance.clear()
objs = gc.collect()
@staticmethod
def flush():
if CachedInterpreter.__instance:
CachedInterpreter.__instance.clear()
CachedInterpreter.__instance.create()
objs = gc.collect()
@staticmethod
def clear_package(identifier):
if CachedInterpreter.__instance:
CachedInterpreter.__instance._clear_package(identifier)
##############################################################################
# Testing
class TestCachedInterpreter(unittest.TestCase):
def test_cache(self):
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.db.io import load_vistrail
"""Test if basic caching is working."""
locator = XMLFileLocator(vistrails.core.system.vistrails_root_directory() +
'/tests/resources/dummy.xml')
(v, abstractions, thumbnails, mashups) = load_vistrail(locator)
# the controller will take care of upgrades
controller = VistrailController(v, locator, abstractions, thumbnails,
mashups)
p1 = v.getPipeline('int chain')
n = v.get_version_number('int chain')
controller.change_selected_version(n)
controller.flush_delayed_actions()
p1 = controller.current_pipeline
view = DummyView()
interpreter = vistrails.core.interpreter.cached.CachedInterpreter.get()
result = interpreter.execute(p1,
locator=v,
current_version=n,
view=view,
)
# to force fresh params
p2 = v.getPipeline('int chain')
controller.change_selected_version(n)
controller.flush_delayed_actions()
p2 = controller.current_pipeline
result = interpreter.execute(p2,
locator=v,
current_version=n,
view=view,
)
assert len(result.modules_added) == 1
if __name__ == '__main__':
unittest.main()
Fixes imports in core.interpreter.cached
###############################################################################
##
## Copyright (C) 2011-2013, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import base64
import copy
import cPickle
import gc
from vistrails.core.common import InstanceObject, lock_method, \
VistrailsInternalError
from vistrails.core.data_structures.bijectivedict import Bidict
import vistrails.core.interpreter.base
from vistrails.core.interpreter.base import AbortExecution
import vistrails.core.interpreter.utils
from vistrails.core.log.controller import DummyLogController
from vistrails.core.modules.basic_modules import identifier as basic_pkg
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import ModuleConnector, \
ModuleHadError, ModuleError, ModuleBreakpoint, ModuleErrors
from vistrails.core.utils import DummyView
import vistrails.core.system
import vistrails.core.vistrail.pipeline
# from core.modules.module_utils import FilePool
##############################################################################
class CachedInterpreter(vistrails.core.interpreter.base.BaseInterpreter):
def __init__(self):
vistrails.core.interpreter.base.BaseInterpreter.__init__(self)
self.debugger = None
self.create()
def create(self):
# FIXME moved here because otherwise we hit the registry too early
from vistrails.core.modules.module_utils import FilePool
self._file_pool = FilePool()
self._persistent_pipeline = vistrails.core.vistrail.pipeline.Pipeline()
self._objects = {}
self._executed = {}
self.filePool = self._file_pool
def clear(self):
self._file_pool.cleanup()
self._persistent_pipeline.clear()
for obj in self._objects.itervalues():
obj.clear()
self._objects = {}
self._executed = {}
def __del__(self):
self.clear()
def clean_modules(self, modules_to_clean):
"""clean_modules(modules_to_clean: list of persistent module ids)
Removes modules from the persistent pipeline, and the modules that
depend on them."""
if not modules_to_clean:
return
g = self._persistent_pipeline.graph
modules_to_clean = (set(modules_to_clean) &
set(self._persistent_pipeline.modules.iterkeys()))
dependencies = g.vertices_topological_sort(modules_to_clean)
for v in dependencies:
self._persistent_pipeline.delete_module(v)
del self._objects[v]
def clean_non_cacheable_modules(self):
"""clean_non_cacheable_modules() -> None
Removes all modules that are not cacheable from the persistent
pipeline, and the modules that depend on them, and
previously suspended modules """
non_cacheable_modules = [i for
(i, mod) in self._objects.iteritems()
if not mod.is_cacheable() or \
mod.suspended]
self.clean_modules(non_cacheable_modules)
def _clear_package(self, identifier):
"""clear_package(identifier: str) -> None
Removes all modules from the given package from the persistent
pipeline.
"""
modules = [mod.id
for mod in self._persistent_pipeline.module_list
if mod.module_descriptor.identifier == identifier]
self.clean_modules(modules)
def setup_pipeline(self, pipeline, **kwargs):
"""setup_pipeline(controller, pipeline, locator, currentVersion,
view, aliases, **kwargs)
Matches a pipeline with the persistent pipeline and creates
instances of modules that aren't in the cache.
"""
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
stop_on_error = fetch('stop_on_error', True)
reg = get_module_registry()
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to setup_pipeline: %s' % kwargs)
def create_null():
"""Creates a Null value"""
getter = get_module_registry().get_descriptor_by_name
descriptor = getter(basic_pkg, 'Null')
return descriptor.module()
def create_constant(param, module):
"""Creates a Constant from a parameter spec"""
getter = reg.get_descriptor_by_name
desc = getter(param.identifier, param.type, param.namespace)
constant = desc.module()
constant.id = module.id
# if param.evaluatedStrValue:
# constant.setValue(param.evaluatedStrValue)
if param.strValue != '':
constant.setValue(param.strValue)
else:
constant.setValue( \
constant.translate_to_string(constant.default_value))
return constant
### BEGIN METHOD ###
# if self.debugger:
# self.debugger.update()
to_delete = []
errors = {}
if controller is not None:
# Controller is none for sub_modules
controller.validate(pipeline)
else:
pipeline.validate()
self.resolve_aliases(pipeline, aliases)
if vistrail_variables:
self.resolve_variables(vistrail_variables, pipeline)
self.update_params(pipeline, params)
(tmp_to_persistent_module_map,
conn_map,
module_added_set,
conn_added_set) = self.add_to_persistent_pipeline(pipeline)
# Create the new objects
for i in module_added_set:
persistent_id = tmp_to_persistent_module_map[i]
module = self._persistent_pipeline.modules[persistent_id]
obj = self._objects[persistent_id] = module.summon()
obj.interpreter = self
obj.id = persistent_id
obj.is_breakpoint = module.is_breakpoint
obj.signature = module._signature
# Checking if output should be stored
if module.has_annotation_with_key('annotate_output'):
annotate_output = module.get_annotation_by_key('annotate_output')
#print annotate_output
if annotate_output:
obj.annotate_output = True
for f in module.functions:
connector = None
if len(f.params) == 0:
connector = ModuleConnector(create_null(), 'value')
elif len(f.params) == 1:
p = f.params[0]
try:
constant = create_constant(p, module)
connector = ModuleConnector(constant, 'value')
except ValueError, e:
err = ModuleError(self, 'Cannot convert parameter '
'value "%s"\n' % p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
except Exception, e:
err = ModuleError(self, 'Uncaught exception: "%s"' % \
p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
else:
tupleModule = vistrails.core.interpreter.base.InternalTuple()
tupleModule.length = len(f.params)
for (j,p) in enumerate(f.params):
try:
constant = create_constant(p, module)
constant.update()
connector = ModuleConnector(constant, 'value')
tupleModule.set_input_port(j, connector)
except ValueError, e:
err = ModuleError(self, "Cannot convert parameter "
"value '%s'\n" % p.strValue + \
str(e))
errors[i] = err
to_delete.append(obj.id)
except Exception, e:
err = ModuleError(self, 'Uncaught exception: '
'"%s"' % p.strValue + str(e))
errors[i] = err
to_delete.append(obj.id)
connector = ModuleConnector(tupleModule, 'value')
if connector:
obj.set_input_port(f.name, connector, is_method=True)
# Create the new connections
for i in conn_added_set:
persistent_id = conn_map[i]
conn = self._persistent_pipeline.connections[persistent_id]
src = self._objects[conn.sourceId]
dst = self._objects[conn.destinationId]
conn.makeConnection(src, dst)
if self.done_summon_hook:
self.done_summon_hook(self._persistent_pipeline, self._objects)
for callable_ in done_summon_hooks:
callable_(self._persistent_pipeline, self._objects)
tmp_id_to_module_map = {}
for i, j in tmp_to_persistent_module_map.iteritems():
tmp_id_to_module_map[i] = self._objects[j]
return (tmp_id_to_module_map, tmp_to_persistent_module_map.inverse,
module_added_set, conn_added_set, to_delete, errors)
def execute_pipeline(self, pipeline, tmp_id_to_module_map,
persistent_to_tmp_id_map, **kwargs):
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
module_executed_hook = fetch('module_executed_hook', [])
module_suspended_hook = fetch('module_suspended_hook', [])
done_summon_hooks = fetch('done_summon_hooks', [])
clean_pipeline = fetch('clean_pipeline', False)
stop_on_error = fetch('stop_on_error', True)
# parent_exec = fetch('parent_exec', None)
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to execute_pipeline: %s' % kwargs)
errors = {}
executed = {}
suspended = {}
cached = {}
# LOGGING SETUP
def get_remapped_id(id):
return persistent_to_tmp_id_map[id]
# the executed dict works on persistent ids
def add_to_executed(obj):
executed[obj.id] = True
for callable_ in module_executed_hook:
callable_(obj.id)
# the suspended dict works on persistent ids
def add_to_suspended(obj):
suspended[obj.id] = obj.suspended
for callable_ in module_suspended_hook:
callable_(obj.id)
def set_computing(obj):
i = get_remapped_id(obj.id)
view.set_module_computing(i)
# views work on local ids
def begin_compute(obj):
i = get_remapped_id(obj.id)
view.set_module_computing(i)
reg = get_module_registry()
module_name = reg.get_descriptor(obj.__class__).name
# !!!self.parent_execs is mutated!!!
logger.start_execution(obj, i, module_name,
parent_execs=self.parent_execs)
# views and loggers work on local ids
def begin_update(obj):
i = get_remapped_id(obj.id)
view.set_module_active(i)
def update_cached(obj):
cached[obj.id] = True
i = get_remapped_id(obj.id)
reg = get_module_registry()
module_name = reg.get_descriptor(obj.__class__).name
# !!!self.parent_execs is mutated!!!
logger.start_execution(obj, i, module_name,
parent_execs=self.parent_execs,
cached=1)
view.set_module_not_executed(i)
num_pops = logger.finish_execution(obj,'', self.parent_execs)
# views and loggers work on local ids
def end_update(obj, error='', errorTrace=None, was_suspended = False):
i = get_remapped_id(obj.id)
if was_suspended:
view.set_module_suspended(i, error)
error = error.msg
elif not error:
view.set_module_success(i)
else:
view.set_module_error(i, error)
# !!!self.parent_execs is mutated!!!
logger.finish_execution(obj, error, self.parent_execs, errorTrace,
was_suspended)
# views and loggers work on local ids
def annotate(obj, d):
i = get_remapped_id(obj.id)
logger.insert_module_annotations(obj, d)
# views and loggers work on local ids
def update_progress(obj, percentage=0.0):
i = get_remapped_id(obj.id)
view.set_module_progress(i, percentage)
def add_exec(exec_):
logger.add_exec(exec_, self.parent_execs)
logging_obj = InstanceObject(signalSuccess=add_to_executed,
signalSuspended=add_to_suspended,
begin_update=begin_update,
begin_compute=begin_compute,
update_progress=update_progress,
end_update=end_update,
update_cached=update_cached,
set_computing=set_computing,
add_exec = add_exec,
annotate=annotate,
log=logger)
# PARAMETER CHANGES SETUP
parameter_changes = []
def change_parameter(obj, name, value):
parameter_changes.append((get_remapped_id(obj.id),
name, value))
def make_change_parameter(obj):
return lambda *args: change_parameter(obj, *args)
# Update **all** modules in the current pipeline
for i, obj in tmp_id_to_module_map.iteritems():
obj.logging = logging_obj
obj.change_parameter = make_change_parameter(obj)
# Update object pipeline information
obj.moduleInfo['locator'] = locator
obj.moduleInfo['version'] = current_version
obj.moduleInfo['moduleId'] = i
obj.moduleInfo['pipeline'] = pipeline
obj.moduleInfo['controller'] = controller
if extra_info is not None:
obj.moduleInfo['extra_info'] = extra_info
if reason is not None:
obj.moduleInfo['reason'] = reason
if actions is not None:
obj.moduleInfo['actions'] = actions
## Checking 'sinks' from kwargs to resolve only requested sinks
# Note that we accept any module in 'sinks', even if it's not actually
# a sink in the graph
if sinks is not None:
persistent_sinks = [tmp_id_to_module_map[sink]
for sink in sinks
if sink in tmp_id_to_module_map]
else:
persistent_sinks = [tmp_id_to_module_map[sink]
for sink in pipeline.graph.sinks()]
# Update new sinks
for obj in persistent_sinks:
abort = False
try:
obj.update()
continue
except ModuleHadError:
pass
except AbortExecution:
break
except ModuleErrors, mes:
for me in mes.module_errors:
me.module.logging.end_update(me.module, me.msg)
errors[me.module.id] = me
abort = abort or me.abort
except ModuleError, me:
me.module.logging.end_update(me.module, me.msg, me.errorTrace)
errors[me.module.id] = me
abort = me.abort
except ModuleBreakpoint, mb:
mb.module.logging.end_update(mb.module)
errors[mb.module.id] = mb
abort = True
if stop_on_error or abort:
break
if self.done_update_hook:
self.done_update_hook(self._persistent_pipeline, self._objects)
# objs, errs, and execs are mappings that use the local ids as keys,
# as opposed to the persistent ids.
# They are thus ideal to external consumption.
objs = {}
# dict([(i, self._objects[tmp_to_persistent_module_map[i]])
# for i in tmp_to_persistent_module_map.keys()])
errs = {}
execs = {}
suspends = {}
caches = {}
to_delete = []
for (tmp_id, obj) in tmp_id_to_module_map.iteritems():
if clean_pipeline:
to_delete.append(obj.id)
objs[tmp_id] = obj
if obj.id in errors:
errs[tmp_id] = errors[obj.id]
if not clean_pipeline:
to_delete.append(obj.id)
if obj.id in executed:
execs[tmp_id] = executed[obj.id]
elif obj.id in suspended:
suspends[tmp_id] = suspended[obj.id]
elif obj.id in cached:
caches[tmp_id] = cached[obj.id]
else:
# these modules didn't execute
execs[tmp_id] = False
return (to_delete, objs, errs, execs, suspends, caches, parameter_changes)
def finalize_pipeline(self, pipeline, to_delete, objs, errs, execs,
suspended, cached, **kwargs):
def fetch(name, default):
r = kwargs.get(name, default)
try:
del kwargs[name]
except KeyError:
pass
return r
view = fetch('view', DummyView())
reset_computed = fetch('reset_computed', True)
self.clean_modules(to_delete)
for i in objs:
if i in errs:
view.set_module_error(i, errs[i].msg, errs[i].errorTrace)
elif i in suspended and suspended[i]:
view.set_module_suspended(i, suspended[i])
elif i in execs and execs[i]:
view.set_module_success(i)
elif i in cached and cached[i]:
view.set_module_not_executed(i)
else:
view.set_module_persistent(i)
if reset_computed:
for module in self._objects.itervalues():
module.computed = False
def unlocked_execute(self, pipeline, **kwargs):
"""unlocked_execute(pipeline, **kwargs): Executes a pipeline using
caching. Caching works by reusing pipelines directly. This
means that there exists one global pipeline whose parts get
executed over and over again. This allows nested execution."""
res = self.setup_pipeline(pipeline, **kwargs)
modules_added = res[2]
conns_added = res[3]
to_delete = res[4]
errors = res[5]
if len(errors) == 0:
res = self.execute_pipeline(pipeline, *(res[:2]), **kwargs)
else:
res = (to_delete, res[0], errors, {}, {}, {}, [])
self.finalize_pipeline(pipeline, *(res[:-1]), **kwargs)
return InstanceObject(objects=res[1],
errors=res[2],
executed=res[3],
suspended=res[4],
parameter_changes=res[6],
modules_added=modules_added,
conns_added=conns_added)
@lock_method(vistrails.core.interpreter.utils.get_interpreter_lock())
def execute(self, pipeline, **kwargs):
"""execute(pipeline, **kwargs):
kwargs:
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
Executes a pipeline using caching. Caching works by reusing
pipelines directly. This means that there exists one global
pipeline whose parts get executed over and over again.
This function returns a triple of dictionaries (objs, errs, execs).
objs is a mapping from local ids (the ids in the pipeline) to
objects **in the persistent pipeline**. Notice, these are not
the objects inside the passed pipeline, but the objects they
were mapped to in the persistent pipeline.
errs is a dictionary from local ids to error messages of modules
that might have returns errors.
execs is a dictionary from local ids to boolean values indicating
whether they were executed or not.
If modules have no error associated with but were not executed, it
means they were cached."""
# Setup named arguments. We don't use named parameters so
# that positional parameter calls fail earlier
new_kwargs = {}
def fetch(name, default):
r = kwargs.get(name, default)
new_kwargs[name] = r
try:
del kwargs[name]
except KeyError:
pass
return r
controller = fetch('controller', None)
locator = fetch('locator', None)
current_version = fetch('current_version', None)
view = fetch('view', DummyView())
vistrail_variables = fetch('vistrail_variables', None)
aliases = fetch('aliases', None)
params = fetch('params', None)
extra_info = fetch('extra_info', None)
logger = fetch('logger', DummyLogController())
sinks = fetch('sinks', None)
reason = fetch('reason', None)
actions = fetch('actions', None)
done_summon_hooks = fetch('done_summon_hooks', [])
module_executed_hook = fetch('module_executed_hook', [])
stop_on_error = fetch('stop_on_error', True)
if len(kwargs) > 0:
raise VistrailsInternalError('Wrong parameters passed '
'to execute: %s' % kwargs)
self.clean_non_cacheable_modules()
# if controller is not None:
# vistrail = controller.vistrail
# (pipeline, module_remap) = \
# core.db.io.expand_workflow(vistrail, pipeline)
# new_kwargs['module_remap'] = module_remap
# else:
# vistrail = None
if controller is not None:
vistrail = controller.vistrail
else:
vistrail = None
self.parent_execs = [None]
logger.start_workflow_execution(vistrail, pipeline, current_version)
self.annotate_workflow_execution(logger, reason, aliases, params)
result = self.unlocked_execute(pipeline, **new_kwargs)
logger.finish_workflow_execution(result.errors, suspended=result.suspended)
self.parent_execs = [None]
return result
def annotate_workflow_execution(self, logger, reason, aliases, params):
"""annotate_workflow_Execution(logger: LogController, reason:str,
aliases:dict, params:list)-> None
It will annotate the workflow execution in logger with the reason,
aliases and params.
"""
d = {}
d["__reason__"] = reason
if aliases is not None and isinstance(aliases, dict):
d["__aliases__"] = cPickle.dumps(aliases)
if params is not None and isinstance(params, list):
d["__params__"] = cPickle.dumps(params)
logger.insert_workflow_exec_annotations(d)
def add_to_persistent_pipeline(self, pipeline):
"""add_to_persistent_pipeline(pipeline):
(module_id_map, connection_id_map, modules_added)
Adds a pipeline to the persistent pipeline of the cached interpreter
and adds current logging object to each existing module.
Returns four things: two dictionaries describing the mapping
of ids from the passed pipeline to the persistent one (the
first one has the module id mapping, the second one has the
connection id mapping), a set of all module ids added to the
persistent pipeline, and a set of all connection ids added to
the persistent pipeline."""
module_id_map = Bidict()
connection_id_map = Bidict()
modules_added = set()
connections_added = set()
pipeline.refresh_signatures()
# we must traverse vertices in topological sort order
verts = pipeline.graph.vertices_topological_sort()
for new_module_id in verts:
new_sig = pipeline.subpipeline_signature(new_module_id)
if not self._persistent_pipeline.has_subpipeline_signature(new_sig):
# Must add module to persistent pipeline
persistent_module = copy.copy(pipeline.modules[new_module_id])
persistent_id = self._persistent_pipeline.fresh_module_id()
persistent_module.id = persistent_id
self._persistent_pipeline.add_module(persistent_module)
self._persistent_pipeline.modules[persistent_id]._signature = \
base64.b16encode(new_sig).lower()
module_id_map[new_module_id] = persistent_id
modules_added.add(new_module_id)
else:
i = self._persistent_pipeline \
.subpipeline_id_from_signature(new_sig)
module_id_map[new_module_id] = i
for connection in pipeline.connections.itervalues():
new_sig = pipeline.connection_signature(connection.id)
if not self._persistent_pipeline.has_connection_signature(new_sig):
# Must add connection to persistent pipeline
persistent_connection = copy.copy(connection)
persistent_id = self._persistent_pipeline.fresh_connection_id()
persistent_connection.id = persistent_id
persistent_connection.sourceId = module_id_map[
connection.sourceId]
persistent_connection.destinationId = module_id_map[
connection.destinationId]
self._persistent_pipeline.add_connection(persistent_connection)
connection_id_map[connection.id] = persistent_id
connections_added.add(connection.id)
else:
i = self._persistent_pipeline \
.connection_id_from_signature(new_sig)
connection_id_map[connection.id] = i
# update persistent signatures
self._persistent_pipeline.compute_signatures()
return (module_id_map, connection_id_map,
modules_added, connections_added)
def find_persistent_entities(self, pipeline):
"""returns a map from a pipeline to the persistent pipeline,
assuming those pieces exist"""
persistent_p = self._persistent_pipeline
object_map = {}
module_id_map = {}
connection_id_map = {}
pipeline.refresh_signatures()
# we must traverse vertices in topological sort order
verts = pipeline.graph.vertices_topological_sort()
for module_id in verts:
sig = pipeline.subpipeline_signature(module_id)
if persistent_p.has_subpipeline_signature(sig):
i = persistent_p.subpipeline_id_from_signature(sig)
module_id_map[module_id] = i
object_map[module_id] = self._objects[i]
else:
module_id_map[module_id] = None
object_map[module_id] = None
for connection in pipeline.connections.itervalues():
sig = pipeline.connection_signature(connection.id)
if persistent_p.has_connection_signature(sig):
connection_id_map[connection.id] = \
persistent_p.connection_id_from_signature(sig)
else:
connection_id_map[connection.id] = None
return (object_map, module_id_map, connection_id_map)
__instance = None
@staticmethod
def get():
if not CachedInterpreter.__instance:
CachedInterpreter.__instance = CachedInterpreter()
return CachedInterpreter.__instance
@staticmethod
def cleanup():
if CachedInterpreter.__instance:
CachedInterpreter.__instance.clear()
objs = gc.collect()
@staticmethod
def flush():
if CachedInterpreter.__instance:
CachedInterpreter.__instance.clear()
CachedInterpreter.__instance.create()
objs = gc.collect()
@staticmethod
def clear_package(identifier):
if CachedInterpreter.__instance:
CachedInterpreter.__instance._clear_package(identifier)
##############################################################################
# Testing
import unittest
class TestCachedInterpreter(unittest.TestCase):
def test_cache(self):
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.db.io import load_vistrail
"""Test if basic caching is working."""
locator = XMLFileLocator(vistrails.core.system.vistrails_root_directory() +
'/tests/resources/dummy.xml')
(v, abstractions, thumbnails, mashups) = load_vistrail(locator)
# the controller will take care of upgrades
controller = VistrailController(v, locator, abstractions, thumbnails,
mashups)
p1 = v.getPipeline('int chain')
n = v.get_version_number('int chain')
controller.change_selected_version(n)
controller.flush_delayed_actions()
p1 = controller.current_pipeline
view = DummyView()
interpreter = vistrails.core.interpreter.cached.CachedInterpreter.get()
result = interpreter.execute(p1,
locator=v,
current_version=n,
view=view,
)
# to force fresh params
p2 = v.getPipeline('int chain')
controller.change_selected_version(n)
controller.flush_delayed_actions()
p2 = controller.current_pipeline
result = interpreter.execute(p2,
locator=v,
current_version=n,
view=view,
)
assert len(result.modules_added) == 1
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import torch
import os
from test.common.assets import get_asset_path
from test.common.torchtext_test_case import TorchtextTestCase
from torchtext.experimental.vectors import (
Vectors,
vectors_from_file_object
)
class TestVectors(TorchtextTestCase):
def test_empty_vectors(self):
tokens = []
vectors = []
unk_tensor = torch.tensor([0], dtype=torch.float)
vectors_obj = Vectors(tokens, vectors, unk_tensor)
torch.testing.assert_allclose(vectors_obj['not_in_it'], unk_tensor)
def test_empty_unk(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors)
torch.testing.assert_allclose(vectors_obj['not_in_it'], expected_unk_tensor)
def test_vectors_basic(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a', 'b']
vectors = [tensorA, tensorB]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
torch.testing.assert_allclose(vectors_obj['a'], tensorA)
torch.testing.assert_allclose(vectors_obj['b'], tensorB)
torch.testing.assert_allclose(vectors_obj['not_in_it'], unk_tensor)
def test_vectors_jit(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a', 'b']
vectors = [tensorA, tensorB]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
jit_vectors_obj = torch.jit.script(vectors_obj)
torch.testing.assert_allclose(vectors_obj['a'], jit_vectors_obj['a'])
torch.testing.assert_allclose(vectors_obj['b'], jit_vectors_obj['b'])
torch.testing.assert_allclose(vectors_obj['not_in_it'], jit_vectors_obj['not_in_it'])
def test_vectors_add_item(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
tensorB = torch.tensor([0., 1])
vectors_obj['b'] = tensorB
torch.testing.assert_allclose(vectors_obj['a'], tensorA)
torch.testing.assert_allclose(vectors_obj['b'], tensorB)
torch.testing.assert_allclose(vectors_obj['not_in_it'], unk_tensor)
def test_vectors_load_and_save(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors)
vector_path = os.path.join(self.test_dir, 'vectors.pt')
torch.save(vectors_obj, vector_path)
loaded_vectors_obj = torch.load(vector_path)
torch.testing.assert_allclose(loaded_vectors_obj['a'], tensorA)
torch.testing.assert_allclose(loaded_vectors_obj['not_in_it'], expected_unk_tensor)
def test_errors(self):
tokens = []
vectors = []
with self.assertRaises(ValueError):
# Test proper error raised when passing in empty tokens and vectors and
# not passing in a user defined unk_tensor
Vectors(tokens, vectors)
tensorA = torch.tensor([1, 0, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1, 0], dtype=torch.float)
tokens = ['a', 'b', 'c']
vectors = [tensorA, tensorB]
with self.assertRaises(RuntimeError):
# Test proper error raised when tokens and vectors have different sizes
Vectors(tokens, vectors)
tensorC = torch.tensor([0, 0, 1], dtype=torch.float)
tokens = ['a', 'a', 'c']
vectors = [tensorA, tensorB, tensorC]
with self.assertRaises(RuntimeError):
# Test proper error raised when tokens have duplicates
# TODO (Nayef211): use self.assertRaisesRegex() to check
# the key of the duplicate token in the error message
Vectors(tokens, vectors)
tensorC = torch.tensor([0, 0, 1], dtype=torch.int8)
vectors = [tensorA, tensorB, tensorC]
with self.assertRaises(TypeError):
# Test proper error raised when vectors are not of type torch.float
Vectors(tokens, vectors)
def test_vectors_from_file(self):
asset_name = 'vectors_test.csv'
asset_path = get_asset_path(asset_name)
f = open(asset_path, 'r')
vectors_obj = vectors_from_file_object(f)
expected_tensorA = torch.tensor([1, 0, 0], dtype=torch.float)
expected_tensorB = torch.tensor([0, 1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0, 0], dtype=torch.float)
torch.testing.assert_allclose(vectors_obj['a'], expected_tensorA)
torch.testing.assert_allclose(vectors_obj['b'], expected_tensorB)
torch.testing.assert_allclose(vectors_obj['not_in_it'], expected_unk_tensor)
updated tests to use self.assertEqual
# -*- coding: utf-8 -*-
import torch
import os
from test.common.assets import get_asset_path
from test.common.torchtext_test_case import TorchtextTestCase
from torchtext.experimental.vectors import (
Vectors,
vectors_from_file_object
)
class TestVectors(TorchtextTestCase):
def test_empty_vectors(self):
tokens = []
vectors = []
unk_tensor = torch.tensor([0], dtype=torch.float)
vectors_obj = Vectors(tokens, vectors, unk_tensor)
self.assertEqual(vectors_obj['not_in_it'], unk_tensor)
def test_empty_unk(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors)
self.assertEqual(vectors_obj['not_in_it'], expected_unk_tensor)
def test_vectors_basic(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a', 'b']
vectors = [tensorA, tensorB]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
self.assertEqual(vectors_obj['a'], tensorA)
self.assertEqual(vectors_obj['b'], tensorB)
self.assertEqual(vectors_obj['not_in_it'], unk_tensor)
def test_vectors_jit(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a', 'b']
vectors = [tensorA, tensorB]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
jit_vectors_obj = torch.jit.script(vectors_obj)
self.assertEqual(vectors_obj['a'], jit_vectors_obj['a'])
self.assertEqual(vectors_obj['b'], jit_vectors_obj['b'])
self.assertEqual(vectors_obj['not_in_it'], jit_vectors_obj['not_in_it'])
def test_vectors_add_item(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors, unk_tensor=unk_tensor)
tensorB = torch.tensor([0., 1])
vectors_obj['b'] = tensorB
self.assertEqual(vectors_obj['a'], tensorA)
self.assertEqual(vectors_obj['b'], tensorB)
self.assertEqual(vectors_obj['not_in_it'], unk_tensor)
def test_vectors_load_and_save(self):
tensorA = torch.tensor([1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0], dtype=torch.float)
tokens = ['a']
vectors = [tensorA]
vectors_obj = Vectors(tokens, vectors)
vector_path = os.path.join(self.test_dir, 'vectors.pt')
torch.save(vectors_obj, vector_path)
loaded_vectors_obj = torch.load(vector_path)
self.assertEqual(loaded_vectors_obj['a'], tensorA)
self.assertEqual(loaded_vectors_obj['not_in_it'], expected_unk_tensor)
def test_errors(self):
tokens = []
vectors = []
with self.assertRaises(ValueError):
# Test proper error raised when passing in empty tokens and vectors and
# not passing in a user defined unk_tensor
Vectors(tokens, vectors)
tensorA = torch.tensor([1, 0, 0], dtype=torch.float)
tensorB = torch.tensor([0, 1, 0], dtype=torch.float)
tokens = ['a', 'b', 'c']
vectors = [tensorA, tensorB]
with self.assertRaises(RuntimeError):
# Test proper error raised when tokens and vectors have different sizes
Vectors(tokens, vectors)
tensorC = torch.tensor([0, 0, 1], dtype=torch.float)
tokens = ['a', 'a', 'c']
vectors = [tensorA, tensorB, tensorC]
with self.assertRaises(RuntimeError):
# Test proper error raised when tokens have duplicates
# TODO (Nayef211): use self.assertRaisesRegex() to check
# the key of the duplicate token in the error message
Vectors(tokens, vectors)
tensorC = torch.tensor([0, 0, 1], dtype=torch.int8)
vectors = [tensorA, tensorB, tensorC]
with self.assertRaises(TypeError):
# Test proper error raised when vectors are not of type torch.float
Vectors(tokens, vectors)
def test_vectors_from_file(self):
asset_name = 'vectors_test.csv'
asset_path = get_asset_path(asset_name)
f = open(asset_path, 'r')
vectors_obj = vectors_from_file_object(f)
expected_tensorA = torch.tensor([1, 0, 0], dtype=torch.float)
expected_tensorB = torch.tensor([0, 1, 0], dtype=torch.float)
expected_unk_tensor = torch.tensor([0, 0, 0], dtype=torch.float)
self.assertEqual(vectors_obj['a'], expected_tensorA)
self.assertEqual(vectors_obj['b'], expected_tensorB)
self.assertEqual(vectors_obj['not_in_it'], expected_unk_tensor)
|
from scipy.misc import imread
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from functools import partial
tedmask = imread('tedxbw.png', flatten=True)
def color_func(word=None, font_size=None, position=None,
orientation=None, font_path=None, random_state=None, dictionary=None):
# val = max(0, min(255, font_size * 6 - 30))
val = dictionary[word] * 42 - 50
return "rgb(%d, %d, %d)" % (0, 0, val)
def generate(words):
"""
Words are supposed to be a list of tuples [(word, weight)]
"""
words_dict = dict(words)
words = " ".join(words_dict.keys())
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color='white',
width=400,
height=400,
scale=3,
mask=tedmask,
color_func=partial(color_func, dictionary=words_dict)).generate(words)
img = wordcloud.to_image()
pixdata = img.load()
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y] == (255, 255, 255, 255):
pixdata[x, y] = (255, 255, 255, 0)
return img
changed path
from scipy.misc import imread
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from functools import partial
tedmask = imread('./visualization/tedxbw.png', flatten=True)
def color_func(word=None, font_size=None, position=None,
orientation=None, font_path=None, random_state=None, dictionary=None):
# val = max(0, min(255, font_size * 6 - 30))
val = dictionary[word] * 42 - 50
return "rgb(%d, %d, %d)" % (0, 0, val)
def generate(words):
"""
Words are supposed to be a list of tuples [(word, weight)]
"""
words_dict = dict(words)
words = " ".join(words_dict.keys())
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color='white',
width=400,
height=400,
scale=3,
mask=tedmask,
color_func=partial(color_func, dictionary=words_dict)).generate(words)
img = wordcloud.to_image()
pixdata = img.load()
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y] == (255, 255, 255, 255):
pixdata[x, y] = (255, 255, 255, 0)
return img |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import mimetypes
from collections import Counter
from copy import deepcopy
from datetime import datetime, date
from operator import itemgetter
from uuid import uuid4
import wtforms
from sqlalchemy.dialects.postgresql import ARRAY
from werkzeug.datastructures import FileStorage
from wtforms.validators import NumberRange, ValidationError, InputRequired
from indico.core.db import db
from indico.modules.events.registration.fields.base import (RegistrationFormFieldBase, RegistrationFormBillableField,
RegistrationFormBillableItemsField)
from indico.modules.events.registration.models.form_fields import RegistrationFormFieldData
from indico.modules.events.registration.models.registrations import RegistrationData
from indico.util.date_time import format_date, iterdays, strftime_all_years
from indico.util.fs import secure_filename
from indico.util.i18n import _, L_
from indico.util.string import normalize_phone_number, snakify_keys
from indico.web.forms.fields import IndicoRadioField, JSONField
from indico.web.forms.validators import IndicoEmail
from MaKaC.webinterface.common.countries import CountryHolder
def get_field_merged_options(field, registration_data):
rdata = registration_data.get(field.id)
result = deepcopy(field.view_data)
result['deletedChoice'] = []
if not rdata or not rdata.data:
return result
values = [rdata.data['choice']] if 'choice' in rdata.data else rdata.data.keys()
for val in values:
if val and not any(item['id'] == val for item in result['choices']):
field_data = rdata.field_data
merged_data = field.field_impl.unprocess_field_data(field_data.versioned_data,
field_data.field.data)
missing_option = next((choice for choice in merged_data['choices'] if choice['id'] == val), None)
if missing_option:
result['choices'].append(missing_option)
result['deletedChoice'].append(missing_option['id'])
return result
class TextField(RegistrationFormFieldBase):
name = 'text'
wtf_field_class = wtforms.StringField
class NumberField(RegistrationFormBillableField):
name = 'number'
wtf_field_class = wtforms.IntegerField
@property
def validators(self):
min_value = self.form_item.data.get('min_value', None)
return [NumberRange(min=min_value)] if min_value else None
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) * int(reg_data or 0)
def get_friendly_data(self, registration_data):
if registration_data.data is None:
return ''
return registration_data.data
class TextAreaField(RegistrationFormFieldBase):
name = 'textarea'
wtf_field_class = wtforms.StringField
class ChoiceBaseField(RegistrationFormBillableItemsField):
versioned_data_fields = RegistrationFormBillableItemsField.versioned_data_fields | {'choices'}
has_default_item = False
wtf_field_class = JSONField
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
items = deepcopy(versioned_data['choices'])
for item in items:
item['caption'] = unversioned_data['captions'][item['id']]
return {'choices': items}
@property
def filter_choices(self):
return self.form_item.data['captions']
@property
def view_data(self):
return dict(super(ChoiceBaseField, self).view_data, places_used=self.get_places_used())
@property
def validators(self):
def _check_number_of_places(form, field):
if not field.data:
return
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
choices = self.form_item.versioned_data['choices']
captions = self.form_item.data['captions']
for k in field.data:
choice = next((x for x in choices if x['id'] == k), None)
places_limit = choice.get('places_limit')
places_used_dict = self.get_places_used()
places_used_dict.update(field.data)
if places_limit and not (places_limit - places_used_dict.get(k, 0)) >= 0:
raise ValidationError(_('No places left for the option: {0}').format(captions[k]))
return [_check_number_of_places]
@classmethod
def process_field_data(cls, data, old_data=None, old_versioned_data=None):
unversioned_data, versioned_data = super(ChoiceBaseField, cls).process_field_data(data, old_data,
old_versioned_data)
items = [x for x in versioned_data['choices'] if not x.get('remove')]
captions = dict(old_data['captions']) if old_data is not None else {}
if cls.has_default_item:
unversioned_data.setdefault('default_item', None)
for item in items:
if 'id' not in item:
item['id'] = unicode(uuid4())
item.setdefault('is_billable', False)
item['price'] = float(item['price']) if item.get('price') else 0
item['places_limit'] = int(item['places_limit']) if item.get('places_limit') else 0
item['max_extra_slots'] = int(item['max_extra_slots']) if item.get('max_extra_slots') else 0
if cls.has_default_item and unversioned_data['default_item'] in {item['caption'], item['id']}:
unversioned_data['default_item'] = item['id']
captions[item['id']] = item.pop('caption')
versioned_data['choices'] = items
unversioned_data['captions'] = captions
return unversioned_data, versioned_data
def get_places_used(self):
places_used = Counter()
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
data = registration.data_by_field[self.form_item.id].data
if not data:
continue
places_used.update(data)
return dict(places_used)
def create_sql_filter(self, data_list):
return RegistrationData.data.has_any(db.func.cast(data_list, ARRAY(db.String)))
def calculate_price(self, reg_data, versioned_data):
if not reg_data:
return 0
billable_choices = [x for x in versioned_data['choices'] if x['id'] in reg_data and x['is_billable']]
price = 0
for billable_field in billable_choices:
price += billable_field['price']
if billable_field.get('extra_slots_pay'):
price += (reg_data[billable_field['id']] - 1) * billable_field['price']
return price
class SingleChoiceField(ChoiceBaseField):
name = 'single_choice'
has_default_item = True
@property
def default_value(self):
data = self.form_item.data
versioned_data = self.form_item.versioned_data
try:
default_item = data['default_item']
except KeyError:
return None
# only use the default item if it exists in the current version
return {default_item: 1} if any(x['id'] == default_item for x in versioned_data['choices']) else {}
def get_friendly_data(self, registration_data):
if not registration_data.data:
return ''
uuid, number_of_slots = registration_data.data.items()[0]
caption = registration_data.field_data.field.data['captions'][uuid]
return '{} (+{})'.format(caption, number_of_slots - 1) if number_of_slots > 1 else caption
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if billable_items_locked and old_data.price:
# if the old field was paid we can simply ignore any change and keep the old value
return {}
# always store no-option as empty dict
if value is None:
value = {}
return super(SingleChoiceField, self).process_form_data(registration, value, old_data, billable_items_locked)
class CheckboxField(RegistrationFormBillableField):
name = 'checkbox'
wtf_field_class = wtforms.BooleanField
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable') or not reg_data:
return 0
return versioned_data.get('price', 0)
def get_friendly_data(self, registration_data):
return self.friendly_data_mapping[registration_data.data]
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
@property
def view_data(self):
return dict(super(CheckboxField, self).view_data, places_used=self.get_places_used())
@property
def filter_choices(self):
return {unicode(val).lower(): caption for val, caption in self.friendly_data_mapping.iteritems()
if val is not None}
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
class DateField(RegistrationFormFieldBase):
name = 'date'
wtf_field_class = wtforms.StringField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if value:
date_format = self.form_item.data['date_format']
value = datetime.strptime(value, date_format).isoformat()
return super(DateField, self).process_form_data(registration, value, old_data, billable_items_locked)
def get_friendly_data(self, registration_data):
date_string = registration_data.data
if not date_string:
return ''
dt = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S')
return strftime_all_years(dt, self.form_item.data['date_format'])
@property
def view_data(self):
has_time = ' ' in self.form_item.data['date_format']
return dict(super(DateField, self).view_data, has_time=has_time)
class BooleanField(RegistrationFormBillableField):
name = 'bool'
wtf_field_class = IndicoRadioField
required_validator = InputRequired
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
@property
def wtf_field_kwargs(self):
return {'choices': [(True, _('Yes')), (False, _('No'))],
'coerce': lambda x: {'yes': True, 'no': False}.get(x, None)}
@property
def filter_choices(self):
return {unicode(val).lower(): caption for val, caption in self.friendly_data_mapping.iteritems()
if val is not None}
@property
def view_data(self):
return dict(super(BooleanField, self).view_data, places_used=self.get_places_used())
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if field.data and not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) if reg_data else 0
def get_friendly_data(self, registration_data):
return self.friendly_data_mapping[registration_data.data]
class PhoneField(RegistrationFormFieldBase):
name = 'phone'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: normalize_phone_number(x) if x else '']}
class CountryField(RegistrationFormFieldBase):
name = 'country'
wtf_field_class = wtforms.SelectField
@property
def wtf_field_kwargs(self):
return {'choices': CountryHolder.getCountries().items()}
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
choices = sorted([{'caption': v, 'countryKey': k} for k, v in CountryHolder.getCountries().iteritems()],
key=itemgetter('caption'))
return {'choices': choices}
@property
def filter_choices(self):
return dict(self.wtf_field_kwargs['choices'])
def get_friendly_data(self, registration_data):
return CountryHolder.getCountries()[registration_data.data] if registration_data.data else ''
class _DeletableFileField(wtforms.FileField):
def process_formdata(self, valuelist):
if not valuelist:
self.data = {'keep_existing': False, 'uploaded_file': None}
else:
# This expects a form with a hidden field and a file field with the same name.
# If the hidden field is empty, it indicates that an existing file should be
# deleted or replaced with the newly uploaded file.
keep_existing = '' not in valuelist
uploaded_file = next((x for x in valuelist if isinstance(x, FileStorage)), None)
if not uploaded_file or not uploaded_file.filename:
uploaded_file = None
self.data = {'keep_existing': keep_existing, 'uploaded_file': uploaded_file}
class FileField(RegistrationFormFieldBase):
name = 'file'
wtf_field_class = _DeletableFileField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
data = {'field_data': self.form_item.current_data}
file_ = value['uploaded_file']
if file_:
# we have a file -> always save it
data['file'] = {
'data': file_.file,
'name': secure_filename(file_.filename, 'attachment'),
'content_type': mimetypes.guess_type(file_.filename)[0] or file_.mimetype or 'application/octet-stream'
}
elif not value['keep_existing']:
data['file'] = None
return data
@property
def default_value(self):
return None
def get_friendly_data(self, registration_data):
if not registration_data:
return ''
return registration_data.filename
class EmailField(RegistrationFormFieldBase):
name = 'email'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: x.lower() if x else x]}
@property
def validators(self):
return [IndicoEmail()]
class AccommodationField(RegistrationFormBillableItemsField):
name = 'accommodation'
wtf_field_class = JSONField
versioned_data_fields = RegistrationFormBillableField.versioned_data_fields | {'choices'}
@classmethod
def process_field_data(cls, data, old_data=None, old_versioned_data=None):
unversioned_data, versioned_data = super(AccommodationField, cls).process_field_data(data, old_data,
old_versioned_data)
items = [x for x in versioned_data['choices'] if not x.get('remove')]
captions = dict(old_data['captions']) if old_data is not None else {}
for item in items:
if 'id' not in item:
item['id'] = unicode(uuid4())
item.setdefault('is_billable', False)
item['price'] = float(item['price']) if item.get('price') else 0
item['places_limit'] = int(item['places_limit']) if item.get('places_limit') else 0
captions[item['id']] = item.pop('caption')
for key in {'arrival_date_from', 'arrival_date_to', 'departure_date_from', 'departure_date_to'}:
unversioned_data[key] = _to_machine_date(unversioned_data[key])
versioned_data['choices'] = items
unversioned_data['captions'] = captions
return unversioned_data, versioned_data
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
data = {}
arrival_date_from = _to_date(unversioned_data['arrival_date_from'])
arrival_date_to = _to_date(unversioned_data['arrival_date_to'])
departure_date_from = _to_date(unversioned_data['departure_date_from'])
departure_date_to = _to_date(unversioned_data['departure_date_to'])
data['arrival_dates'] = [(dt.date().isoformat(), format_date(dt))
for dt in iterdays(arrival_date_from, arrival_date_to)]
data['departure_dates'] = [(dt.date().isoformat(), format_date(dt))
for dt in iterdays(departure_date_from, departure_date_to)]
items = deepcopy(versioned_data['choices'])
for item in items:
item['caption'] = unversioned_data['captions'][item['id']]
data['choices'] = items
return data
@property
def validators(self):
def _stay_dates_valid(form, field):
if not field.data:
return
data = snakify_keys(field.data)
try:
arrival_date = data['arrival_date']
departure_date = data['departure_date']
except KeyError:
raise ValidationError(_("Arrival/departure date is missing"))
if _to_date(arrival_date) > _to_date(departure_date):
raise ValidationError(_("Arrival date can't be set after the departure date."))
def _check_number_of_places(form, field):
if not field.data:
return
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(snakify_keys(field.data), old_data):
return
item = next((x for x in self.form_item.versioned_data['choices'] if x['id'] == field.data['choice']),
None)
captions = self.form_item.data['captions']
places_used_dict = self.get_places_used()
if item and item['places_limit'] and not ((item['places_limit']
- places_used_dict.get(field.data['choice'], 0))):
raise ValidationError(_('Not enough rooms in the {0}').format(captions[item['id']]))
return [_stay_dates_valid, _check_number_of_places]
@property
def view_data(self):
return dict(super(AccommodationField, self).view_data, places_used=self.get_places_used())
def get_friendly_data(self, registration_data):
friendly_data = dict(registration_data.data)
if not friendly_data:
return {}
unversioned_data = registration_data.field_data.field.data
friendly_data['choice'] = unversioned_data['captions'][friendly_data['choice']]
friendly_data['arrival_date'] = _to_date(friendly_data['arrival_date'])
friendly_data['departure_date'] = _to_date(friendly_data['departure_date'])
friendly_data['nights'] = (friendly_data['departure_date'] - friendly_data['arrival_date']).days
return friendly_data
def calculate_price(self, reg_data, versioned_data):
if not reg_data:
return 0
item = next((x for x in versioned_data['choices']
if reg_data['choice'] == x['id'] and x.get('is_billable', False)), None)
if not item or not item['price']:
return 0
nights = (_to_date(reg_data['departure_date']) - _to_date(reg_data['arrival_date'])).days
return item['price'] * nights
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if billable_items_locked and old_data.price:
# if the old field was paid we can simply ignore any change and keep the old data
return {}
data = {}
if value:
data = {
'choice': value['choice'],
'arrival_date': value['arrivalDate'],
'departure_date': value['departureDate']
}
return super(AccommodationField, self).process_form_data(registration, data, old_data, billable_items_locked)
def get_places_used(self):
places_used = Counter()
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
data = registration.data_by_field[self.form_item.id].data
if not data:
continue
places_used.update((data['choice'],))
return dict(places_used)
def iter_placeholder_info(self):
yield 'name', 'Accommodation name for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'nights', 'Number of nights for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'arrival', 'Arrival date for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'departure', 'Departure date for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
def render_placeholder(self, data, key=None):
mapping = {'name': 'choice',
'nights': 'nights',
'arrival': 'arrival_date',
'departure': 'departure_date'}
rv = self.get_friendly_data(data).get(mapping[key], '')
if isinstance(rv, date):
rv = format_date(rv).decode('utf-8')
return rv
def _to_machine_date(date):
return datetime.strptime(date, '%d/%m/%Y').strftime('%Y-%m-%d')
def _to_date(date):
return datetime.strptime(date, '%Y-%m-%d').date()
class MultiChoiceField(ChoiceBaseField):
name = 'multi_choice'
@property
def default_value(self):
return {}
def get_friendly_data(self, registration_data):
def _format_item(uuid, number_of_slots):
caption = self.form_item.data['captions'][uuid]
return '{} (+{})'.format(caption, number_of_slots - 1) if number_of_slots > 1 else caption
reg_data = registration_data.data
if not reg_data:
return ''
return sorted(_format_item(uuid, number_of_slots) for uuid, number_of_slots in reg_data.iteritems())
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
# always store no-option as empty dict
if value is None:
value = {}
return_value = {}
if old_data:
old_choices_mapping = {x['id']: x for x in old_data.field_data.versioned_data['choices']}
new_choices_mapping = {x['id']: x for x in self.form_item.versioned_data['choices']}
# choices that have been removed but are still selected
revoked_selected_choice_ids = value.viewkeys() - new_choices_mapping.viewkeys()
# choices that had their price updated
updated_selected_choice_ids = {
x for x in value
if (x in old_choices_mapping and x in new_choices_mapping and
(old_choices_mapping[x]['is_billable'] != new_choices_mapping[x]['is_billable'] or
old_choices_mapping[x]['price'] != new_choices_mapping[x]['price']))
}
# if the user selected a new item that doesn't exist in the current version yet
new_items_selected = any(x not in old_choices_mapping for x in value)
# if all selected options exist in the old data version
all_choices_in_old = value.viewkeys() <= old_choices_mapping.viewkeys()
if all_choices_in_old:
return_value['field_data'] = old_data.field_data
elif new_items_selected:
# XXX: should we create a new version when deselecting a removed item
if not revoked_selected_choice_ids and not updated_selected_choice_ids:
# only new choices selected, so just upgrade to the latest version
return_value['field_data'] = self.form_item.current_data
else:
# Create a new data version where the removed/updated items have been added back.
new_data_version = deepcopy(self.form_item.current_data.versioned_data)
new_data_version['choices'].extend(old_choices_mapping[x] for x in revoked_selected_choice_ids)
for i, data in enumerate(new_data_version['choices']):
if data['id'] in updated_selected_choice_ids:
new_data_version['choices'][i] = dict(old_choices_mapping[data['id']])
data_version = RegistrationFormFieldData(field=self.form_item, versioned_data=new_data_version)
return_value['field_data'] = data_version
else: # pragma: no cover
raise Exception('Unexpected state - not all choices in old version but nothing new selected')
if not billable_items_locked:
processed_data = super(RegistrationFormBillableField, self).process_form_data(registration, value, old_data)
return {key: return_value.get(key, value) for key, value in processed_data.iteritems()}
if old_data.data == value:
# nothing changed
# XXX: should we ignore slot changes if extra slots don't pay?
# probably that needs a js update to keep the slots choice
# enabled even if the item is paid...
return return_value
# XXX: This code still relies on the client sending data for the disabled fields.
# This is pretty ugly but especially in case of non-billable extra slots it makes
# sense to keep it like this. If someone tampers with the list of billable fields
# we detect it any reject the change to the field's data anyway.
if old_data:
old_billable = {uuid: num for uuid, num in old_data.data.iteritems()
if old_choices_mapping[uuid]['is_billable'] and old_choices_mapping[uuid]['price']}
new_billable = {uuid: num for uuid, num in value.iteritems()
if new_choices_mapping[uuid]['is_billable'] and new_choices_mapping[uuid]['price']}
if old_data and old_billable != new_billable:
# preserve existing data
return return_value
else:
# nothing price-related changed
# TODO: check item prices (in case there's a change between old/new version)
# for now we simply ignore field changes in this case (since the old/new price
# check in the base method will fail)
processed_data = super(MultiChoiceField, self).process_form_data(registration, value, old_data, True)
return {key: return_value.get(key, value) for key, value in processed_data.iteritems()}
Fix multi_choice options display on price change
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import mimetypes
from collections import Counter
from copy import deepcopy
from datetime import datetime, date
from operator import itemgetter
from uuid import uuid4
import wtforms
from sqlalchemy.dialects.postgresql import ARRAY
from werkzeug.datastructures import FileStorage
from wtforms.validators import NumberRange, ValidationError, InputRequired
from indico.core.db import db
from indico.modules.events.registration.fields.base import (RegistrationFormFieldBase, RegistrationFormBillableField,
RegistrationFormBillableItemsField)
from indico.modules.events.registration.models.form_fields import RegistrationFormFieldData
from indico.modules.events.registration.models.registrations import RegistrationData
from indico.util.date_time import format_date, iterdays, strftime_all_years
from indico.util.fs import secure_filename
from indico.util.i18n import _, L_
from indico.util.string import normalize_phone_number, snakify_keys
from indico.web.forms.fields import IndicoRadioField, JSONField
from indico.web.forms.validators import IndicoEmail
from MaKaC.webinterface.common.countries import CountryHolder
def _get_choice_by_id(choice_id, choices):
for choice in choices:
if choice['id'] == choice_id:
return choice
def get_field_merged_options(field, registration_data):
rdata = registration_data.get(field.id)
result = deepcopy(field.view_data)
result['deletedChoice'] = []
if not rdata or not rdata.data:
return result
values = [rdata.data['choice']] if 'choice' in rdata.data else rdata.data.keys()
for val in values:
if val and not any(item['id'] == val for item in result['choices']):
field_data = rdata.field_data
merged_data = field.field_impl.unprocess_field_data(field_data.versioned_data,
field_data.field.data)
missing_option = next((choice for choice in merged_data['choices'] if choice['id'] == val), None)
if missing_option:
result['choices'].append(missing_option)
result['deletedChoice'].append(missing_option['id'])
else:
result['choices'].remove(_get_choice_by_id(val, result['choices']))
result['choices'].append(_get_choice_by_id(val, rdata.field_data.versioned_data.get('choices', {})))
return result
class TextField(RegistrationFormFieldBase):
name = 'text'
wtf_field_class = wtforms.StringField
class NumberField(RegistrationFormBillableField):
name = 'number'
wtf_field_class = wtforms.IntegerField
@property
def validators(self):
min_value = self.form_item.data.get('min_value', None)
return [NumberRange(min=min_value)] if min_value else None
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) * int(reg_data or 0)
def get_friendly_data(self, registration_data):
if registration_data.data is None:
return ''
return registration_data.data
class TextAreaField(RegistrationFormFieldBase):
name = 'textarea'
wtf_field_class = wtforms.StringField
class ChoiceBaseField(RegistrationFormBillableItemsField):
versioned_data_fields = RegistrationFormBillableItemsField.versioned_data_fields | {'choices'}
has_default_item = False
wtf_field_class = JSONField
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
items = deepcopy(versioned_data['choices'])
for item in items:
item['caption'] = unversioned_data['captions'][item['id']]
return {'choices': items}
@property
def filter_choices(self):
return self.form_item.data['captions']
@property
def view_data(self):
return dict(super(ChoiceBaseField, self).view_data, places_used=self.get_places_used())
@property
def validators(self):
def _check_number_of_places(form, field):
if not field.data:
return
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
choices = self.form_item.versioned_data['choices']
captions = self.form_item.data['captions']
for k in field.data:
choice = next((x for x in choices if x['id'] == k), None)
places_limit = choice.get('places_limit')
places_used_dict = self.get_places_used()
places_used_dict.update(field.data)
if places_limit and not (places_limit - places_used_dict.get(k, 0)) >= 0:
raise ValidationError(_('No places left for the option: {0}').format(captions[k]))
return [_check_number_of_places]
@classmethod
def process_field_data(cls, data, old_data=None, old_versioned_data=None):
unversioned_data, versioned_data = super(ChoiceBaseField, cls).process_field_data(data, old_data,
old_versioned_data)
items = [x for x in versioned_data['choices'] if not x.get('remove')]
captions = dict(old_data['captions']) if old_data is not None else {}
if cls.has_default_item:
unversioned_data.setdefault('default_item', None)
for item in items:
if 'id' not in item:
item['id'] = unicode(uuid4())
item.setdefault('is_billable', False)
item['price'] = float(item['price']) if item.get('price') else 0
item['places_limit'] = int(item['places_limit']) if item.get('places_limit') else 0
item['max_extra_slots'] = int(item['max_extra_slots']) if item.get('max_extra_slots') else 0
if cls.has_default_item and unversioned_data['default_item'] in {item['caption'], item['id']}:
unversioned_data['default_item'] = item['id']
captions[item['id']] = item.pop('caption')
versioned_data['choices'] = items
unversioned_data['captions'] = captions
return unversioned_data, versioned_data
def get_places_used(self):
places_used = Counter()
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
data = registration.data_by_field[self.form_item.id].data
if not data:
continue
places_used.update(data)
return dict(places_used)
def create_sql_filter(self, data_list):
return RegistrationData.data.has_any(db.func.cast(data_list, ARRAY(db.String)))
def calculate_price(self, reg_data, versioned_data):
if not reg_data:
return 0
billable_choices = [x for x in versioned_data['choices'] if x['id'] in reg_data and x['is_billable']]
price = 0
for billable_field in billable_choices:
price += billable_field['price']
if billable_field.get('extra_slots_pay'):
price += (reg_data[billable_field['id']] - 1) * billable_field['price']
return price
class SingleChoiceField(ChoiceBaseField):
name = 'single_choice'
has_default_item = True
@property
def default_value(self):
data = self.form_item.data
versioned_data = self.form_item.versioned_data
try:
default_item = data['default_item']
except KeyError:
return None
# only use the default item if it exists in the current version
return {default_item: 1} if any(x['id'] == default_item for x in versioned_data['choices']) else {}
def get_friendly_data(self, registration_data):
if not registration_data.data:
return ''
uuid, number_of_slots = registration_data.data.items()[0]
caption = registration_data.field_data.field.data['captions'][uuid]
return '{} (+{})'.format(caption, number_of_slots - 1) if number_of_slots > 1 else caption
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if billable_items_locked and old_data.price:
# if the old field was paid we can simply ignore any change and keep the old value
return {}
# always store no-option as empty dict
if value is None:
value = {}
return super(SingleChoiceField, self).process_form_data(registration, value, old_data, billable_items_locked)
class CheckboxField(RegistrationFormBillableField):
name = 'checkbox'
wtf_field_class = wtforms.BooleanField
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable') or not reg_data:
return 0
return versioned_data.get('price', 0)
def get_friendly_data(self, registration_data):
return self.friendly_data_mapping[registration_data.data]
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
@property
def view_data(self):
return dict(super(CheckboxField, self).view_data, places_used=self.get_places_used())
@property
def filter_choices(self):
return {unicode(val).lower(): caption for val, caption in self.friendly_data_mapping.iteritems()
if val is not None}
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
class DateField(RegistrationFormFieldBase):
name = 'date'
wtf_field_class = wtforms.StringField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if value:
date_format = self.form_item.data['date_format']
value = datetime.strptime(value, date_format).isoformat()
return super(DateField, self).process_form_data(registration, value, old_data, billable_items_locked)
def get_friendly_data(self, registration_data):
date_string = registration_data.data
if not date_string:
return ''
dt = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S')
return strftime_all_years(dt, self.form_item.data['date_format'])
@property
def view_data(self):
has_time = ' ' in self.form_item.data['date_format']
return dict(super(DateField, self).view_data, has_time=has_time)
class BooleanField(RegistrationFormBillableField):
name = 'bool'
wtf_field_class = IndicoRadioField
required_validator = InputRequired
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
@property
def wtf_field_kwargs(self):
return {'choices': [(True, _('Yes')), (False, _('No'))],
'coerce': lambda x: {'yes': True, 'no': False}.get(x, None)}
@property
def filter_choices(self):
return {unicode(val).lower(): caption for val, caption in self.friendly_data_mapping.iteritems()
if val is not None}
@property
def view_data(self):
return dict(super(BooleanField, self).view_data, places_used=self.get_places_used())
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if field.data and not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) if reg_data else 0
def get_friendly_data(self, registration_data):
return self.friendly_data_mapping[registration_data.data]
class PhoneField(RegistrationFormFieldBase):
name = 'phone'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: normalize_phone_number(x) if x else '']}
class CountryField(RegistrationFormFieldBase):
name = 'country'
wtf_field_class = wtforms.SelectField
@property
def wtf_field_kwargs(self):
return {'choices': CountryHolder.getCountries().items()}
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
choices = sorted([{'caption': v, 'countryKey': k} for k, v in CountryHolder.getCountries().iteritems()],
key=itemgetter('caption'))
return {'choices': choices}
@property
def filter_choices(self):
return dict(self.wtf_field_kwargs['choices'])
def get_friendly_data(self, registration_data):
return CountryHolder.getCountries()[registration_data.data] if registration_data.data else ''
class _DeletableFileField(wtforms.FileField):
def process_formdata(self, valuelist):
if not valuelist:
self.data = {'keep_existing': False, 'uploaded_file': None}
else:
# This expects a form with a hidden field and a file field with the same name.
# If the hidden field is empty, it indicates that an existing file should be
# deleted or replaced with the newly uploaded file.
keep_existing = '' not in valuelist
uploaded_file = next((x for x in valuelist if isinstance(x, FileStorage)), None)
if not uploaded_file or not uploaded_file.filename:
uploaded_file = None
self.data = {'keep_existing': keep_existing, 'uploaded_file': uploaded_file}
class FileField(RegistrationFormFieldBase):
name = 'file'
wtf_field_class = _DeletableFileField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
data = {'field_data': self.form_item.current_data}
file_ = value['uploaded_file']
if file_:
# we have a file -> always save it
data['file'] = {
'data': file_.file,
'name': secure_filename(file_.filename, 'attachment'),
'content_type': mimetypes.guess_type(file_.filename)[0] or file_.mimetype or 'application/octet-stream'
}
elif not value['keep_existing']:
data['file'] = None
return data
@property
def default_value(self):
return None
def get_friendly_data(self, registration_data):
if not registration_data:
return ''
return registration_data.filename
class EmailField(RegistrationFormFieldBase):
name = 'email'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: x.lower() if x else x]}
@property
def validators(self):
return [IndicoEmail()]
class AccommodationField(RegistrationFormBillableItemsField):
name = 'accommodation'
wtf_field_class = JSONField
versioned_data_fields = RegistrationFormBillableField.versioned_data_fields | {'choices'}
@classmethod
def process_field_data(cls, data, old_data=None, old_versioned_data=None):
unversioned_data, versioned_data = super(AccommodationField, cls).process_field_data(data, old_data,
old_versioned_data)
items = [x for x in versioned_data['choices'] if not x.get('remove')]
captions = dict(old_data['captions']) if old_data is not None else {}
for item in items:
if 'id' not in item:
item['id'] = unicode(uuid4())
item.setdefault('is_billable', False)
item['price'] = float(item['price']) if item.get('price') else 0
item['places_limit'] = int(item['places_limit']) if item.get('places_limit') else 0
captions[item['id']] = item.pop('caption')
for key in {'arrival_date_from', 'arrival_date_to', 'departure_date_from', 'departure_date_to'}:
unversioned_data[key] = _to_machine_date(unversioned_data[key])
versioned_data['choices'] = items
unversioned_data['captions'] = captions
return unversioned_data, versioned_data
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
data = {}
arrival_date_from = _to_date(unversioned_data['arrival_date_from'])
arrival_date_to = _to_date(unversioned_data['arrival_date_to'])
departure_date_from = _to_date(unversioned_data['departure_date_from'])
departure_date_to = _to_date(unversioned_data['departure_date_to'])
data['arrival_dates'] = [(dt.date().isoformat(), format_date(dt))
for dt in iterdays(arrival_date_from, arrival_date_to)]
data['departure_dates'] = [(dt.date().isoformat(), format_date(dt))
for dt in iterdays(departure_date_from, departure_date_to)]
items = deepcopy(versioned_data['choices'])
for item in items:
item['caption'] = unversioned_data['captions'][item['id']]
data['choices'] = items
return data
@property
def validators(self):
def _stay_dates_valid(form, field):
if not field.data:
return
data = snakify_keys(field.data)
try:
arrival_date = data['arrival_date']
departure_date = data['departure_date']
except KeyError:
raise ValidationError(_("Arrival/departure date is missing"))
if _to_date(arrival_date) > _to_date(departure_date):
raise ValidationError(_("Arrival date can't be set after the departure date."))
def _check_number_of_places(form, field):
if not field.data:
return
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(snakify_keys(field.data), old_data):
return
item = next((x for x in self.form_item.versioned_data['choices'] if x['id'] == field.data['choice']),
None)
captions = self.form_item.data['captions']
places_used_dict = self.get_places_used()
if item and item['places_limit'] and not ((item['places_limit']
- places_used_dict.get(field.data['choice'], 0))):
raise ValidationError(_('Not enough rooms in the {0}').format(captions[item['id']]))
return [_stay_dates_valid, _check_number_of_places]
@property
def view_data(self):
return dict(super(AccommodationField, self).view_data, places_used=self.get_places_used())
def get_friendly_data(self, registration_data):
friendly_data = dict(registration_data.data)
if not friendly_data:
return {}
unversioned_data = registration_data.field_data.field.data
friendly_data['choice'] = unversioned_data['captions'][friendly_data['choice']]
friendly_data['arrival_date'] = _to_date(friendly_data['arrival_date'])
friendly_data['departure_date'] = _to_date(friendly_data['departure_date'])
friendly_data['nights'] = (friendly_data['departure_date'] - friendly_data['arrival_date']).days
return friendly_data
def calculate_price(self, reg_data, versioned_data):
if not reg_data:
return 0
item = next((x for x in versioned_data['choices']
if reg_data['choice'] == x['id'] and x.get('is_billable', False)), None)
if not item or not item['price']:
return 0
nights = (_to_date(reg_data['departure_date']) - _to_date(reg_data['arrival_date'])).days
return item['price'] * nights
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if billable_items_locked and old_data.price:
# if the old field was paid we can simply ignore any change and keep the old data
return {}
data = {}
if value:
data = {
'choice': value['choice'],
'arrival_date': value['arrivalDate'],
'departure_date': value['departureDate']
}
return super(AccommodationField, self).process_form_data(registration, data, old_data, billable_items_locked)
def get_places_used(self):
places_used = Counter()
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
data = registration.data_by_field[self.form_item.id].data
if not data:
continue
places_used.update((data['choice'],))
return dict(places_used)
def iter_placeholder_info(self):
yield 'name', 'Accommodation name for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'nights', 'Number of nights for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'arrival', 'Arrival date for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
yield 'departure', 'Departure date for "{}" ({})'.format(self.form_item.title, self.form_item.parent.title)
def render_placeholder(self, data, key=None):
mapping = {'name': 'choice',
'nights': 'nights',
'arrival': 'arrival_date',
'departure': 'departure_date'}
rv = self.get_friendly_data(data).get(mapping[key], '')
if isinstance(rv, date):
rv = format_date(rv).decode('utf-8')
return rv
def _to_machine_date(date):
return datetime.strptime(date, '%d/%m/%Y').strftime('%Y-%m-%d')
def _to_date(date):
return datetime.strptime(date, '%Y-%m-%d').date()
class MultiChoiceField(ChoiceBaseField):
name = 'multi_choice'
@property
def default_value(self):
return {}
def get_friendly_data(self, registration_data):
def _format_item(uuid, number_of_slots):
caption = self.form_item.data['captions'][uuid]
return '{} (+{})'.format(caption, number_of_slots - 1) if number_of_slots > 1 else caption
reg_data = registration_data.data
if not reg_data:
return ''
return sorted(_format_item(uuid, number_of_slots) for uuid, number_of_slots in reg_data.iteritems())
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
# always store no-option as empty dict
if value is None:
value = {}
return_value = {}
if old_data:
old_choices_mapping = {x['id']: x for x in old_data.field_data.versioned_data['choices']}
new_choices_mapping = {x['id']: x for x in self.form_item.versioned_data['choices']}
# choices that have been removed but are still selected
revoked_selected_choice_ids = value.viewkeys() - new_choices_mapping.viewkeys()
# choices that had their price updated
updated_selected_choice_ids = {
x for x in value
if (x in old_choices_mapping and x in new_choices_mapping and
(old_choices_mapping[x]['is_billable'] != new_choices_mapping[x]['is_billable'] or
old_choices_mapping[x]['price'] != new_choices_mapping[x]['price']))
}
# if the user selected a new item that doesn't exist in the current version yet
new_items_selected = any(x not in old_choices_mapping for x in value)
# if all selected options exist in the old data version
all_choices_in_old = value.viewkeys() <= old_choices_mapping.viewkeys()
if all_choices_in_old:
return_value['field_data'] = old_data.field_data
elif new_items_selected:
# XXX: should we create a new version when deselecting a removed item
if not revoked_selected_choice_ids and not updated_selected_choice_ids:
# only new choices selected, so just upgrade to the latest version
return_value['field_data'] = self.form_item.current_data
else:
# Create a new data version where the removed/updated items have been added back.
new_data_version = deepcopy(self.form_item.current_data.versioned_data)
new_data_version['choices'].extend(old_choices_mapping[x] for x in revoked_selected_choice_ids)
for i, data in enumerate(new_data_version['choices']):
if data['id'] in updated_selected_choice_ids:
new_data_version['choices'][i] = dict(old_choices_mapping[data['id']])
data_version = RegistrationFormFieldData(field=self.form_item, versioned_data=new_data_version)
return_value['field_data'] = data_version
else: # pragma: no cover
raise Exception('Unexpected state - not all choices in old version but nothing new selected')
if not billable_items_locked:
processed_data = super(RegistrationFormBillableField, self).process_form_data(registration, value, old_data)
return {key: return_value.get(key, value) for key, value in processed_data.iteritems()}
if old_data.data == value:
# nothing changed
# XXX: should we ignore slot changes if extra slots don't pay?
# probably that needs a js update to keep the slots choice
# enabled even if the item is paid...
return return_value
# XXX: This code still relies on the client sending data for the disabled fields.
# This is pretty ugly but especially in case of non-billable extra slots it makes
# sense to keep it like this. If someone tampers with the list of billable fields
# we detect it any reject the change to the field's data anyway.
if old_data:
old_billable = {uuid: num for uuid, num in old_data.data.iteritems()
if old_choices_mapping[uuid]['is_billable'] and old_choices_mapping[uuid]['price']}
new_billable = {uuid: num for uuid, num in value.iteritems()
if new_choices_mapping[uuid]['is_billable'] and new_choices_mapping[uuid]['price']}
if old_data and old_billable != new_billable:
# preserve existing data
return return_value
else:
# nothing price-related changed
# TODO: check item prices (in case there's a change between old/new version)
# for now we simply ignore field changes in this case (since the old/new price
# check in the base method will fail)
processed_data = super(MultiChoiceField, self).process_form_data(registration, value, old_data, True)
return {key: return_value.get(key, value) for key, value in processed_data.iteritems()}
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTC from 0 to 1 and 60 BTC from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test balances with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTC, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalances
expected_balances_0 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('9.99'), # change from node 0's send
'untrusted_pending': Decimal('60.0')},
'watchonly': {'immature': Decimal('5000'),
'trusted': Decimal('50.0'),
'untrusted_pending': Decimal('0E-8')}}
expected_balances_1 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('0E-8'), # node 1's send had an unsafe input
'untrusted_pending': Decimal('30.0') - fee_node_1}} # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[0].getbalances(), expected_balances_0)
assert_equal(self.nodes[1].getbalances(), expected_balances_1)
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
# getwalletinfo.unconfirmed_balance
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getbalances.mine.untrusted_pending with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
balance_node0 = Decimal('69.99') # node 1's send plus change from node 0's send
balance_node1 = Decimal('29.98') # change from node 0's send
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], balance_node0)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], balance_node1)
assert_equal(self.nodes[0].getbalance(), balance_node0)
assert_equal(self.nodes[1].getbalance(), balance_node1)
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getbalances()['mine']['untrusted_pending']
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet(self.default_wallet_name)
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet(self.default_wallet_name)
after = self.nodes[1].getbalances()['mine']['untrusted_pending']
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
Revert "test: Add missing sync_all to wallet_balance test"
This reverts commit fa815255c70d32809aac640db4a8762c7d71e8db.
The underlying bug has been fixed in commit f77b1de16feee097a88e99d2ecdd4d84beb4f915.
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTC from 0 to 1 and 60 BTC from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test balances with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTC, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalances
expected_balances_0 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('9.99'), # change from node 0's send
'untrusted_pending': Decimal('60.0')},
'watchonly': {'immature': Decimal('5000'),
'trusted': Decimal('50.0'),
'untrusted_pending': Decimal('0E-8')}}
expected_balances_1 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('0E-8'), # node 1's send had an unsafe input
'untrusted_pending': Decimal('30.0') - fee_node_1}} # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[0].getbalances(), expected_balances_0)
assert_equal(self.nodes[1].getbalances(), expected_balances_1)
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
# getwalletinfo.unconfirmed_balance
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getbalances.mine.untrusted_pending with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
balance_node0 = Decimal('69.99') # node 1's send plus change from node 0's send
balance_node1 = Decimal('29.98') # change from node 0's send
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], balance_node0)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], balance_node1)
assert_equal(self.nodes[0].getbalance(), balance_node0)
assert_equal(self.nodes[1].getbalance(), balance_node1)
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getbalances()['mine']['untrusted_pending']
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet(self.default_wallet_name)
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet(self.default_wallet_name)
after = self.nodes[1].getbalances()['mine']['untrusted_pending']
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
|
"""Provides a class for consistently managing and writing vivarium outputs and output paths."""
from collections import defaultdict
import os
from datetime import datetime
import yaml
class ResultsWriter:
"""Writes output files for vivarium simulations.
Attributes
----------
results_root: str
The root directory to which results will be written.
"""
def __init__(self, results_root):
"""
Parameters
----------
results_root: str
The root directory to which results will be written.
"""
self.results_root = results_root
os.makedirs(results_root, exist_ok=True)
self._directories = defaultdict(lambda: self.results_root)
def add_sub_directory(self, key, path):
"""Adds a sub-directory to the results directory.
Parameters
----------
key: str
A look-up key for the directory path.
path: str
The relative path from the root of the results directory to the sub-directory.
Returns
-------
str:
The absolute path to the sub-directory.
"""
sub_dir_path = os.path.join(self.results_root, path)
os.makedirs(sub_dir_path, exist_ok=True)
self._directories[key] = sub_dir_path
return sub_dir_path
def write_output(self, data, file_name, key=None):
"""Writes output data to disk.
Parameters
----------
data: pandas.DataFrame or dict
The data to write to disk.
file_name: str
The name of the file to write.
key: str, optional
The lookup key for the sub_directory to write results to, if any.
"""
path = os.path.join(self._directories[key], file_name)
extension = file_name.split('.')[-1]
if extension == 'yaml':
with open(path, 'w') as f:
yaml.dump(data, f)
elif extension == 'hdf':
data.to_hdf(path, 'data', format='table')
else:
raise NotImplementedError(
f"Only 'yaml' and 'hdf' file types are supported. You requested {extension}")
def dump_simulation_configuration(self, component_configuration_path):
"""Sets up a simulation to get the complete configuration, then writes it to disk.
Parameters
----------
component_configuration_path: str
Absolute path to a yaml file with the simulation component configuration.
"""
from vivarium.framework.engine import build_simulation_configuration, load_component_manager, setup_simulation
configuration = build_simulation_configuration({'simulation_configuration': component_configuration_path})
configuration.run_configuration.results_directory = self.results_root
component_manager = load_component_manager(configuration)
setup_simulation(component_manager, configuration)
self.write_output(configuration.to_dict(), 'base_config.yaml')
with open(component_configuration_path) as f:
self.write_output(f.read(), 'components.yaml')
def get_results_writer(results_directory, model_specification_file):
launch_time = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
config_name = os.path.basename(model_specification_file.rpartition('.')[0])
results_root = results_directory + f"/{config_name}/{launch_time}"
return ResultsWriter(results_root)
def get_results_writer_for_restart(results_directory):
return ResultsWriter(results_directory)
Get rid of some poorly conceived functions
"""Provides a class for consistently managing and writing vivarium outputs and output paths."""
from collections import defaultdict
import os
from datetime import datetime
import yaml
class ResultsWriter:
"""Writes output files for vivarium simulations.
Attributes
----------
results_root: str
The root directory to which results will be written.
"""
def __init__(self, results_root):
"""
Parameters
----------
results_root: str
The root directory to which results will be written.
"""
self.results_root = results_root
os.makedirs(results_root, exist_ok=True)
self._directories = defaultdict(lambda: self.results_root)
def add_sub_directory(self, key, path):
"""Adds a sub-directory to the results directory.
Parameters
----------
key: str
A look-up key for the directory path.
path: str
The relative path from the root of the results directory to the sub-directory.
Returns
-------
str:
The absolute path to the sub-directory.
"""
sub_dir_path = os.path.join(self.results_root, path)
os.makedirs(sub_dir_path, exist_ok=True)
self._directories[key] = sub_dir_path
return sub_dir_path
def write_output(self, data, file_name, key=None):
"""Writes output data to disk.
Parameters
----------
data: pandas.DataFrame or dict
The data to write to disk.
file_name: str
The name of the file to write.
key: str, optional
The lookup key for the sub_directory to write results to, if any.
"""
path = os.path.join(self._directories[key], file_name)
extension = file_name.split('.')[-1]
if extension == 'yaml':
with open(path, 'w') as f:
yaml.dump(data, f)
elif extension == 'hdf':
data.to_hdf(path, 'data', format='table')
else:
raise NotImplementedError(
f"Only 'yaml' and 'hdf' file types are supported. You requested {extension}")
def get_results_writer(results_directory, model_specification_file):
launch_time = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
config_name = os.path.basename(model_specification_file.rpartition('.')[0])
results_root = results_directory + f"/{config_name}/{launch_time}"
return ResultsWriter(results_root)
|
from random import randint
from bisect import bisect
from itertools import izip_longest
from .backend import CleaverBackend
from .identity import CleaverIdentityProvider
class Cleaver(object):
def __init__(self, environ, identity, backend):
"""
Create a new Cleaver instance.
Not generally instantiated directly, but established automatically by
``cleaver.SplitMiddleware`` and used within a WSGI application via
``request.environ['cleaver']``.
:param identity any implementation of
``cleaver.identity.CleaverIdentityProvider``
:param backend any implementation of
``cleaver.backend.CleaverBackend``
"""
if not isinstance(identity, CleaverIdentityProvider):
raise RuntimeError(
'%s must implement cleaver.identity.CleaverIdentityProvider' \
% identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self._environ = environ
@property
def identity(self):
return self._identity.get_identity(self._environ)
def split(self, experiment_name, *variants):
"""
Used to split and track user experience amongst one or more variants.
:param *variants can take many forms, depending on usage.
When no variants are provided, the test falls back to a simple
True/False 50/50 split, e.g.,
>>> sidebar() if split('include_sidebar') else empty()
Otherwise, variants should be provided as arbitrary tuples:
>>> split('text_color', ('red', '#F00'), ('blue', '#00F'))
By default, variants are chosen with equal weight. You can tip the
scales if you like by passing a proportional weight in each variant
tuple:
>>> split('text_color', ('red', '#F00', 2), ('blue', '#00F', 4))
"""
keys, values, weights = self._parse_variants(variants)
b = self._backend
# Record the experiment if it doesn't exist already
experiment = b.get_experiment(experiment_name, keys)
if experiment is None:
b.save_experiment(experiment_name, keys)
experiment = b.get_experiment(experiment_name, keys)
# Retrieve the variant assigned to the current user
variant = b.get_variant(self.identity, experiment.name)
if variant is None:
# ...or choose (and store) one randomly if it doesn't exist yet...
variant = self._random_variant(keys, weights).next()
b.participate(self.identity, experiment.name, variant)
return dict(zip(keys, values))[variant]
def score(self, experiment_name):
"""
Used to mark the current experiment variant as "converted".
:param experiment_name the string name of the experiment
"""
self._backend.score(
experiment_name,
self._backend.get_variant(self.identity, experiment_name)
)
def _parse_variants(self, variants):
if not len(variants):
variants = [('True', True), ('False', False)]
variants = map(
lambda v: tuple(list(v) + [1]) if len(v) < 3 else v,
variants
)
return izip_longest(*variants)
def _random_variant(self, variants, weights):
total = 0
accumulator = []
for w in weights:
total += w
accumulator.append(total)
r = randint(0, total - 1)
yield variants[bisect(accumulator, r)]
Documentation improvements.
from random import randint
from bisect import bisect
from itertools import izip_longest
from .backend import CleaverBackend
from .identity import CleaverIdentityProvider
class Cleaver(object):
def __init__(self, environ, identity, backend):
"""
Create a new Cleaver instance.
Not generally instantiated directly, but established automatically by
``cleaver.SplitMiddleware`` and used within a WSGI application via
``request.environ['cleaver']``.
:param identity any implementation of
``cleaver.identity.CleaverIdentityProvider``
:param backend any implementation of
``cleaver.backend.CleaverBackend``
"""
if not isinstance(identity, CleaverIdentityProvider):
raise RuntimeError(
'%s must implement cleaver.identity.CleaverIdentityProvider' \
% identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self._environ = environ
@property
def identity(self):
return self._identity.get_identity(self._environ)
def split(self, experiment_name, *variants):
"""
Used to split and track user experience amongst one or more variants.
:param experiment_name a unique string name for the experiment
:param *variants can take many forms, depending on usage.
When no variants are provided, the test falls back to a simple
True/False 50/50 split, e.g.,
>>> sidebar() if split('include_sidebar') else empty()
Otherwise, variants should be provided as arbitrary tuples in the
format ('unique_string_label', any_value), ... e.g.,
>>> split('text_color', ('red', '#F00'), ('blue', '#00F'))
By default, variants are chosen with equal weight. You can tip the
scales if you like by passing a proportional *integer* weight as
the third element in each variant tuple:
>>> split('text_color', ('red', '#F00', 2), ('blue', '#00F', 4))
"""
keys, values, weights = self._parse_variants(variants)
b = self._backend
# Record the experiment if it doesn't exist already
experiment = b.get_experiment(experiment_name, keys)
if experiment is None:
b.save_experiment(experiment_name, keys)
experiment = b.get_experiment(experiment_name, keys)
# Retrieve the variant assigned to the current user
variant = b.get_variant(self.identity, experiment.name)
if variant is None:
# ...or choose (and store) one randomly if it doesn't exist yet...
variant = self._random_variant(keys, weights).next()
b.participate(self.identity, experiment.name, variant)
return dict(zip(keys, values))[variant]
def score(self, experiment_name):
"""
Used to mark the current experiment variant as "converted".
:param experiment_name the string name of the experiment
"""
self._backend.score(
experiment_name,
self._backend.get_variant(self.identity, experiment_name)
)
def _parse_variants(self, variants):
if not len(variants):
variants = [('True', True), ('False', False)]
variants = map(
lambda v: tuple(list(v) + [1]) if len(v) < 3 else v,
variants
)
return izip_longest(*variants)
def _random_variant(self, variants, weights):
total = 0
accumulator = []
for w in weights:
total += w
accumulator.append(total)
r = randint(0, total - 1)
yield variants[bisect(accumulator, r)]
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import mimetypes
from uuid import uuid4
import wtforms
from wtforms.validators import NumberRange
from indico.modules.events.registration.fields.base import RegistrationFormFieldBase
from indico.modules.events.registration.models.registrations import RegistrationData
from indico.util.fs import secure_filename
from indico.util.string import crc32, normalize_phone_number
from indico.web.forms.validators import IndicoEmail
from MaKaC.webinterface.common.countries import CountryHolder
class TextField(RegistrationFormFieldBase):
name = 'text'
wtf_field_class = wtforms.StringField
class NumberField(RegistrationFormFieldBase):
name = 'number'
wtf_field_class = wtforms.IntegerField
@property
def validators(self):
min_value = self.form_item.current_data.versioned_data.get('minValue', None)
return [NumberRange(min=min_value)] if min_value else None
class TextAreaField(RegistrationFormFieldBase):
name = 'textarea'
wtf_field_class = wtforms.StringField
class SelectField(RegistrationFormFieldBase):
name = 'radio'
wtf_field_class = wtforms.StringField
@property
def default_value(self):
data = self.form_item.data
versioned_data = self.form_item.current_data.versioned_data
try:
default_item = data['default_item']
except KeyError:
return None
return next((x['id'] for x in versioned_data['radioitems'] if x['caption'] == default_item), None)
@classmethod
def modify_post_data(cls, post_data):
items = post_data['radioitems']
for item in items:
item['id'] = unicode(uuid4())
class CheckboxField(RegistrationFormFieldBase):
name = 'checkbox'
wtf_field_class = wtforms.BooleanField
class DateField(RegistrationFormFieldBase):
name = 'date'
wtf_field_class = wtforms.StringField
@classmethod
def modify_post_data(cls, post_data):
date_format = post_data['date_format'].split(' ')
post_data['date_format'] = date_format[0]
if len(date_format) == 2:
post_data['time_format'] = date_format[1]
class BooleanField(RegistrationFormFieldBase):
name = 'yes/no'
wtf_field_class = wtforms.StringField
class PhoneField(RegistrationFormFieldBase):
name = 'phone'
wtf_field_class = wtforms.StringField
field_kwargs = {'filters': [lambda x: normalize_phone_number(x) if x else '']}
class CountryField(RegistrationFormFieldBase):
name = 'country'
wtf_field_class = wtforms.SelectField
@property
def field_kwargs(self):
return {'choices': CountryHolder.getCountries().items()}
class FileField(RegistrationFormFieldBase):
name = 'file'
wtf_field_class = wtforms.FileField
def save_data(self, registration, value):
if value is None:
return
f = value.file
content = f.read()
metadata = {
'hash': crc32(content),
'size': len(content),
'filename': secure_filename(value.filename, 'registration_form_file'),
'content_type': mimetypes.guess_type(value.filename)[0] or value.mimetype or 'application/octet-stream'
}
registration.data.append(RegistrationData(field_data_id=self.form_item.current_data_id, file=content,
file_metadata=metadata))
@property
def default_value(self):
return None
class EmailField(RegistrationFormFieldBase):
name = 'email'
wtf_field_class = wtforms.StringField
field_kwargs = {'filters': [lambda x: x.lower() if x else x]}
@property
def validators(self):
return [IndicoEmail()]
Fix validators property for NumberField
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import mimetypes
from uuid import uuid4
import wtforms
from wtforms.validators import NumberRange
from indico.modules.events.registration.fields.base import RegistrationFormFieldBase
from indico.modules.events.registration.models.registrations import RegistrationData
from indico.util.fs import secure_filename
from indico.util.string import crc32, normalize_phone_number
from indico.web.forms.validators import IndicoEmail
from MaKaC.webinterface.common.countries import CountryHolder
class TextField(RegistrationFormFieldBase):
name = 'text'
wtf_field_class = wtforms.StringField
class NumberField(RegistrationFormFieldBase):
name = 'number'
wtf_field_class = wtforms.IntegerField
@property
def validators(self):
min_value = self.form_item.data.get('min_value', None)
return [NumberRange(min=min_value)] if min_value else None
class TextAreaField(RegistrationFormFieldBase):
name = 'textarea'
wtf_field_class = wtforms.StringField
class SelectField(RegistrationFormFieldBase):
name = 'radio'
wtf_field_class = wtforms.StringField
@property
def default_value(self):
data = self.form_item.data
versioned_data = self.form_item.current_data.versioned_data
try:
default_item = data['default_item']
except KeyError:
return None
return next((x['id'] for x in versioned_data['radioitems'] if x['caption'] == default_item), None)
@classmethod
def modify_post_data(cls, post_data):
items = post_data['radioitems']
for item in items:
item['id'] = unicode(uuid4())
class CheckboxField(RegistrationFormFieldBase):
name = 'checkbox'
wtf_field_class = wtforms.BooleanField
class DateField(RegistrationFormFieldBase):
name = 'date'
wtf_field_class = wtforms.StringField
@classmethod
def modify_post_data(cls, post_data):
date_format = post_data['date_format'].split(' ')
post_data['date_format'] = date_format[0]
if len(date_format) == 2:
post_data['time_format'] = date_format[1]
class BooleanField(RegistrationFormFieldBase):
name = 'yes/no'
wtf_field_class = wtforms.StringField
class PhoneField(RegistrationFormFieldBase):
name = 'phone'
wtf_field_class = wtforms.StringField
field_kwargs = {'filters': [lambda x: normalize_phone_number(x) if x else '']}
class CountryField(RegistrationFormFieldBase):
name = 'country'
wtf_field_class = wtforms.SelectField
@property
def field_kwargs(self):
return {'choices': CountryHolder.getCountries().items()}
class FileField(RegistrationFormFieldBase):
name = 'file'
wtf_field_class = wtforms.FileField
def save_data(self, registration, value):
if value is None:
return
f = value.file
content = f.read()
metadata = {
'hash': crc32(content),
'size': len(content),
'filename': secure_filename(value.filename, 'registration_form_file'),
'content_type': mimetypes.guess_type(value.filename)[0] or value.mimetype or 'application/octet-stream'
}
registration.data.append(RegistrationData(field_data_id=self.form_item.current_data_id, file=content,
file_metadata=metadata))
@property
def default_value(self):
return None
class EmailField(RegistrationFormFieldBase):
name = 'email'
wtf_field_class = wtforms.StringField
field_kwargs = {'filters': [lambda x: x.lower() if x else x]}
@property
def validators(self):
return [IndicoEmail()]
|
#!/usr/bin/python
# usage: ./smoke_test.py --mode OS_NAME --num-keys SOME_NUMBER_HERE
import time, sys, os, socket, random, time, signal, subprocess
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'test', 'common')))
import driver, workload_common
from vcoptparse import *
op = OptParser()
op["num_keys"] = IntFlag("--num-keys", 500)
op["mode"] = StringFlag("--mode", "debug")
op["pkg_type"] = StringFlag("--pkg-type", "deb") # "deb" or "rpm"
opts = op.parse(sys.argv)
num_keys = opts["num_keys"]
base_port = 11213 # port that RethinkDB runs from by default
if opts["pkg_type"] == "rpm":
def install(path):
return "rpm -i %s" % path
def get_binary(path):
return "rpm -qpil %s | grep /usr/bin" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name
elif opts["pkg_type"] == "deb":
def install(path):
return "dpkg -i %s" % path
def get_binary(path):
return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name
else:
print >>sys.stderr, "Error: Unknown package type."
exit(0)
def purge_installed_packages():
try:
old_binaries_raw = exec_command(["ls", "/usr/bin/rethinkdb*"], shell = True).stdout.readlines()
except Exception, e:
print "Nothing to remove."
return
old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw)
print "Binaries scheduled for removal: ", old_binaries
exec_command(uninstall(old_binaries[0]), shell = True)
purge_installed_packages()
def exec_command(cmd, bg = False, shell = False):
if type(cmd) == type("") and not shell:
cmd = cmd.split(" ")
elif type(cmd) == type([]) and shell:
cmd = " ".join(cmd)
print cmd
if bg:
return subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell) # doesn't actually run in background: it just skips the waiting part
else:
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell)
proc.wait()
if proc.poll():
raise RuntimeError("Error: command ended with signal %d." % proc.poll())
return proc
def wait_until_started_up(proc, host, port, timeout = 600):
time_limit = time.time() + timeout
while time.time() < time_limit:
if proc.poll() is not None:
raise RuntimeError("Process stopped unexpectedly with return code %d." % proc.poll())
s = socket.socket()
try:
s.connect((host, port))
except socket.error, e:
time.sleep(1)
else:
break
finally:
s.close()
else:
raise RuntimeError("Could not connect to process.")
def test_against(host, port, timeout = 600):
with workload_common.make_memcache_connection({"address": (host, port), "mclib": "pylibmc", "protocol": "text"}) as mc:
temp = 0
time_limit = time.time() + timeout
while not temp and time.time() < time_limit:
try:
temp = mc.set("test", "test")
print temp
except Exception, e:
print e
pass
time.sleep(1)
goodsets = 0
goodgets = 0
for i in range(num_keys):
try:
if mc.set(str(i), str(i)):
goodsets += 1
except:
pass
for i in range(num_keys):
try:
if mc.get(str(i)) == str(i):
goodgets += 1
except:
pass
return goodsets, goodgets
cur_dir = exec_command("pwd").stdout.readline().strip('\n')
p = exec_command("find build/%s -name *.%s" % (opts["mode"], opts["pkg_type"]))
raw = p.stdout.readlines()
res_paths = map(lambda x: os.path.join(cur_dir, x.strip('\n')), raw)
print "Packages to install:", res_paths
failed_test = False
for path in res_paths:
print "TESTING A NEW PACKAGE"
print "Uninstalling old packages..."
purge_installed_packages()
print "Done uninstalling..."
print "Installing RethinkDB..."
target_binary_name = exec_command(get_binary(path), shell = True).stdout.readlines()[0].strip('\n')
print "Target binary name:", target_binary_name
exec_command(install(path))
print "Starting RethinkDB..."
exec_command('rm -rf rethinkdb_cluster_data')
proc = exec_command("rethinkdb", bg = True)
# gets the IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("rethinkdb.com", 80))
ip = s.getsockname()[0]
s.close()
print "IP Address detected:", ip
wait_until_started_up(proc, ip, base_port)
print "Testing..."
res = test_against(ip, base_port)
print "Tests completed. Killing instance now..."
proc.send_signal(signal.SIGINT)
if res != (num_keys, num_keys):
print "Done: FAILED"
print "Results: %d successful sets, %d successful gets (%d total)" % (res[0], res[1], num_keys)
failed_test = True
else:
print "Done: PASSED"
print "Done."
if failed_test:
exit(1)
else:
exit(0)
Removing core files along with rethinkdb_cluster_data.
#!/usr/bin/python
# usage: ./smoke_test.py --mode OS_NAME --num-keys SOME_NUMBER_HERE
import time, sys, os, socket, random, time, signal, subprocess
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'test', 'common')))
import driver, workload_common
from vcoptparse import *
op = OptParser()
op["num_keys"] = IntFlag("--num-keys", 500)
op["mode"] = StringFlag("--mode", "debug")
op["pkg_type"] = StringFlag("--pkg-type", "deb") # "deb" or "rpm"
opts = op.parse(sys.argv)
num_keys = opts["num_keys"]
base_port = 11213 # port that RethinkDB runs from by default
if opts["pkg_type"] == "rpm":
def install(path):
return "rpm -i %s" % path
def get_binary(path):
return "rpm -qpil %s | grep /usr/bin" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name
elif opts["pkg_type"] == "deb":
def install(path):
return "dpkg -i %s" % path
def get_binary(path):
return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name
else:
print >>sys.stderr, "Error: Unknown package type."
exit(0)
def purge_installed_packages():
try:
old_binaries_raw = exec_command(["ls", "/usr/bin/rethinkdb*"], shell = True).stdout.readlines()
except Exception, e:
print "Nothing to remove."
return
old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw)
print "Binaries scheduled for removal: ", old_binaries
exec_command(uninstall(old_binaries[0]), shell = True)
purge_installed_packages()
def exec_command(cmd, bg = False, shell = False):
if type(cmd) == type("") and not shell:
cmd = cmd.split(" ")
elif type(cmd) == type([]) and shell:
cmd = " ".join(cmd)
print cmd
if bg:
return subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell) # doesn't actually run in background: it just skips the waiting part
else:
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell)
proc.wait()
if proc.poll():
raise RuntimeError("Error: command ended with signal %d." % proc.poll())
return proc
def wait_until_started_up(proc, host, port, timeout = 600):
time_limit = time.time() + timeout
while time.time() < time_limit:
if proc.poll() is not None:
raise RuntimeError("Process stopped unexpectedly with return code %d." % proc.poll())
s = socket.socket()
try:
s.connect((host, port))
except socket.error, e:
time.sleep(1)
else:
break
finally:
s.close()
else:
raise RuntimeError("Could not connect to process.")
def test_against(host, port, timeout = 600):
with workload_common.make_memcache_connection({"address": (host, port), "mclib": "pylibmc", "protocol": "text"}) as mc:
temp = 0
time_limit = time.time() + timeout
while not temp and time.time() < time_limit:
try:
temp = mc.set("test", "test")
print temp
except Exception, e:
print e
pass
time.sleep(1)
goodsets = 0
goodgets = 0
for i in range(num_keys):
try:
if mc.set(str(i), str(i)):
goodsets += 1
except:
pass
for i in range(num_keys):
try:
if mc.get(str(i)) == str(i):
goodgets += 1
except:
pass
return goodsets, goodgets
cur_dir = exec_command("pwd").stdout.readline().strip('\n')
p = exec_command("find build/%s -name *.%s" % (opts["mode"], opts["pkg_type"]))
raw = p.stdout.readlines()
res_paths = map(lambda x: os.path.join(cur_dir, x.strip('\n')), raw)
print "Packages to install:", res_paths
failed_test = False
for path in res_paths:
print "TESTING A NEW PACKAGE"
print "Uninstalling old packages..."
purge_installed_packages()
print "Done uninstalling..."
print "Installing RethinkDB..."
target_binary_name = exec_command(get_binary(path), shell = True).stdout.readlines()[0].strip('\n')
print "Target binary name:", target_binary_name
exec_command(install(path))
print "Starting RethinkDB..."
exec_command("rm -rf rethinkdb_cluster_data")
exec_command("rm -f core.*")
proc = exec_command("rethinkdb", bg = True)
# gets the IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("rethinkdb.com", 80))
ip = s.getsockname()[0]
s.close()
print "IP Address detected:", ip
wait_until_started_up(proc, ip, base_port)
print "Testing..."
res = test_against(ip, base_port)
print "Tests completed. Killing instance now..."
proc.send_signal(signal.SIGINT)
if res != (num_keys, num_keys):
print "Done: FAILED"
print "Results: %d successful sets, %d successful gets (%d total)" % (res[0], res[1], num_keys)
failed_test = True
else:
print "Done: PASSED"
print "Done."
if failed_test:
exit(1)
else:
exit(0)
|
#!/usr/bin/env python
# from click.testing import CliRunner
from vumi_http_proxy import clickme
from twisted.trial import unittest
from twisted.trial.unittest import SynchronousTestCase
# Testing segment
class TestClickMe(unittest.TestCase):
def test_click(self):
# runner = CliRunner()
# import pdb; pdb.set_trace()
result = SynchronousTestCase.patch(
clickme.cli, clickme.cli.port, 8000)
self.assertEqual(result.exit_code, 0)
self.assertEqual(str(result.output).splitlines()[0], (
'Starting connection to 0.0.0.0:8080'))
Clickme patch
#!/usr/bin/env python
from click.testing import CliRunner
from vumi_http_proxy import clickme, http_proxy
from twisted.trial import unittest
# Testing segment
class TestClickMe(unittest.TestCase):
def test_click(self):
runner = CliRunner()
self.patch(http_proxy.Initialize, 'main',
lambda x: x)
result = runner.invoke(clickme.cli)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.splitlines(), [
'Starting connection to 0.0.0.0:8080',
])
|
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import binascii
import drda
from drda import codepoint as cp
def _recv_from_sock(sock, nbytes):
n = nbytes
recieved = b''
while n:
bs = sock.recv(n)
recieved += bs
n -= len(bs)
return recieved
def _send_to_sock(sock, b):
sock.send(b)
def _pack_null_string(v, enc):
if v is None:
return b'\xff'
b = v.encode(enc)
return b'\x00' + len(b).to_bytes(4, byteorder='big') + b
def _pack_binary(code_point, v):
b = code_point.to_bytes(2, byteorder='big') + v
return (len(b) + 2).to_bytes(2, byteorder='big') + b
def _pack_uint(code_point, v, size):
return _pack_binary(code_point, v.to_bytes(size, byteorder='big'))
def _pack_str(code_point, v, enc):
return _pack_binary(code_point, v.encode(enc))
def parse_string(b):
"parse VCM"
ln = int.from_bytes(b[:2], byteorder='big')
if ln:
s = b[2:2+ln].decode('utf-8')
else:
s = ''
b = b[2+ln:]
return s, b
def parse_name(b):
"parse VCM or VCS"
s1, b = parse_string(b)
s2, b = parse_string(b)
ln = int.from_bytes(b[:2], byteorder='big')
return s1 if s1 else s2, b
def pack_dds_object(code_point, o):
"pack to DDS packet"
return (len(o)+4).to_bytes(2, byteorder='big') + code_point.to_bytes(2, byteorder='big') + o
def parse_reply(obj):
d = {}
i = 0
while i < len(obj):
ln = int.from_bytes(obj[i:i+2], byteorder='big')
d[int.from_bytes(obj[i+2:i+4], byteorder='big')] = obj[i+4:i+ln]
i += ln
assert i == len(obj)
return d
def parse_sqlcard_derby(obj, enc):
sqlcode = int.from_bytes(obj[1:5], byteorder='big', signed=True)
sqlstate = obj[5:10].decode('ascii')
sqlerrproc = obj[10:18]
misc = obj[18:56]
ln = int.from_bytes(obj[56:58], byteorder='big')
message = obj[58:58+ln].decode(enc)
rest = obj[58+ln:]
assert rest[:3] == b'\x00\x00\xff'
rest = rest[3:]
if sqlcode < 0:
err = drda.OperationalError(sqlcode, sqlstate, message)
else:
err = None
return err, rest
def parse_sqlcard_db2(obj, message, enc, endian):
sqlcode = int.from_bytes(obj[1:5], byteorder=endian, signed=True)
sqlstate = obj[5:10].decode('ascii')
sqlerrproc = obj[10:18]
misc = obj[18:54]
rest = obj[54:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlrdbname = obj[2:2+ln]
rest = rest[2+ln:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlerrmsg_m = obj[2:2+ln]
rest = rest[2+ln:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlerrmsg_s = obj[2:2+ln]
rest = rest[2+ln:]
if sqlcode < 0:
err = drda.OperationalError(sqlcode, sqlstate, message)
else:
err = None
return err, rest
def _parse_column(b):
precision = int.from_bytes(b[:2], byteorder='big')
scale = int.from_bytes(b[2:4], byteorder='big')
sqllength = int.from_bytes(b[4:12], byteorder='big')
sqltype = int.from_bytes(b[12:14], byteorder='big')
sqlccsid = int.from_bytes(b[14:16], byteorder='big')
b = b[16:]
# SQLDOPTGRP
assert b[0] == 0x00 # not null
b = b[3:]
sqlname, b = parse_name(b)
sqllabel, b = parse_name(b)
sqlcomments, b = parse_name(b)
# SQLUDTGRP
if b[0] == 0x00: # not null
b = b[5:]
sqludtrdb, b = parse_string(b)
sqlschema, b = parse_name(b)
sqludtname, b = parse_name(b)
else:
b = b[1:]
# SQLDXGRP
assert b[0] == 0x00 # not null
b = b[9:]
sqlxrdbnam, b = parse_string(b)
sqlxcolname, b = parse_name(b)
sqlxbasename, b = parse_name(b)
sqlxschema, b = parse_name(b)
sqlxname, b = parse_name(b)
return (sqlname, sqltype, sqllength, sqllength, precision, scale, None), b
def parse_sqldard_derby(obj, enc):
description = []
err, rest = parse_sqlcard_derby(obj, enc)
if not err:
ln = int.from_bytes(rest[19:21], byteorder='big')
b = rest[21:]
for i in range(ln):
d, b = _parse_column(b)
description.append(d)
return err, description
def parse_sqldard_db2(obj, message, enc, endian):
description = []
err, rest = parse_sqlcard_db2(obj, message, enc, endian)
if not err:
ln = int.from_bytes(rest[19:21], byteorder='big')
b = rest[21:]
for i in range(ln):
d, b = _parse_column(b)
description.append(d)
return err, description
def read_dds(sock):
"Read one DDS packet from socket"
b = _recv_from_sock(sock, 6)
ln = int.from_bytes(b[:2], byteorder='big')
assert b[2] == 0xD0
dds_type = b[3] & 0b1111
chained = b[3] & 0b01000000
number = int.from_bytes(b[4:6], byteorder='big')
obj = _recv_from_sock(sock, ln-6)
assert int.from_bytes(obj[:2], byteorder='big') == ln - 6
code_point = int.from_bytes(obj[2:4], byteorder='big')
return dds_type, chained, number, code_point, obj[4:]
def write_requests_dds(sock, obj_list):
"Write request DDS packets"
cur_id = 1
for i in range(len(obj_list)):
o = obj_list[i]
code_point = int.from_bytes(o[2:4], byteorder='big')
_send_to_sock(sock, (len(o)+6).to_bytes(2, byteorder='big'))
if code_point in (cp.SQLSTT, cp.SQLATTR):
flag = 3 # DSS object
else:
flag = 1 # DSS request
if i < len(obj_list) - 1:
flag |= 0b01000000
if code_point in (
cp.EXCSQLIMM, cp.PRPSQLSTT, cp.SQLATTR,
):
next_id = cur_id
flag |= 0b00010000
else:
next_id = cur_id + 1
_send_to_sock(sock, bytes([0xD0, flag]))
_send_to_sock(sock, cur_id.to_bytes(2, byteorder='big'))
_send_to_sock(sock, o)
cur_id = next_id
def packEXCSAT(conn, mgrlvlls):
b = b''
for p in mgrlvlls:
b += p.to_bytes(2, byteorder='big')
return pack_dds_object(cp.EXCSAT, (
_pack_str(cp.EXTNAM, 'pydrda', 'cp500') +
_pack_str(cp.SRVNAM, '%s:%d' % (conn.host, conn.port), 'cp500') +
_pack_str(cp.SRVRLSLV, 'pydrda', 'cp500') +
_pack_binary(cp.MGRLVLLS, b) +
_pack_str(cp.SRVCLSNM, 'pydrda', 'cp500')
)
)
def packEXCSAT_MGRLVLLS(mgrlvlls):
b = b''
for p in mgrlvlls:
b += p.to_bytes(2, byteorder='big')
return pack_dds_object(cp.EXCSAT, (_pack_binary(cp.MGRLVLLS, b)))
def packSECCHK(secmec, database, user, password, enc):
return pack_dds_object(cp.SECCHK, (
_pack_uint(cp.SECMEC, secmec, 2) +
_pack_str(cp.RDBNAM, database, enc) +
_pack_str(cp.USRID, user, enc) +
_pack_str(cp.PASSWORD, password, enc)
)
)
def packACCRDB(rdbnam, enc):
return pack_dds_object(cp.ACCRDB, (
_pack_str(cp.RDBNAM, rdbnam, enc) +
_pack_uint(cp.RDBACCCL, cp.SQLAM, 2) +
_pack_str(cp.PRDID, 'DNC10130', enc) +
_pack_str(cp.TYPDEFNAM, 'QTDSQLASC', enc) +
_pack_binary(
cp.CRRTKN,
binascii.unhexlify(b'd5c6f0f0f0f0f0f12ec3f0c1f50155630d5a11')) +
_pack_binary(
cp.TYPDEFOVR,
binascii.unhexlify(b'0006119c04b80006119d04b00006119e04b8'))
)
)
def packACCSEC(database, secmec):
return pack_dds_object(
cp.ACCSEC,
_pack_uint(cp.SECMEC, secmec, 2) + _pack_str(cp.RDBNAM, database, 'cp500'),
)
def packRDBCMM():
return pack_dds_object(cp.RDBCMM, bytes())
def _packPKGNAMCSN(database):
pkgnamcsn = bytearray(
binascii.a2b_hex(
'0044211353414d504c452020202020202020202020204e554c4c4944202020202020202020202020'
'53514c43324f323620202020202020202020414141414166416400c9'
)
)
dbnam = (database + ' ' * 18).encode('utf-8')[:18]
pkgnamcsn[4:22] = dbnam
return bytes(pkgnamcsn)
def packEXCSQLIMM(database):
return pack_dds_object(
cp.EXCSQLIMM,
_packPKGNAMCSN(database) + _pack_binary(cp.RDBCMTOK, bytes([241]))
)
def packPRPSQLSTT_derby(database):
return pack_dds_object(
cp.PRPSQLSTT,
_packPKGNAMCSN(database) +
_pack_binary(cp.RTNSQLDA, bytes([241])) +
_pack_binary(cp.TYPSQLDA, bytes([4]))
)
def packPRPSQLSTT_db2(database):
return pack_dds_object(
cp.PRPSQLSTT,
_packPKGNAMCSN(database)
)
def packDSCSQLSTT(database):
return pack_dds_object(
cp.DSCSQLSTT,
_packPKGNAMCSN(database) + _pack_uint(cp.QRYINSID, 0, 8)
)
def packEXCSQLSET(database):
return pack_dds_object(
cp.EXCSQLSET,
_packPKGNAMCSN(database)
)
def packOPNQRY_derby(database):
return pack_dds_object(
cp.OPNQRY,
_packPKGNAMCSN(database) +
_pack_uint(cp.QRYBLKSZ, 32767, 4) +
_pack_binary(cp.QRYCLSIMP, bytes([1]))
)
def packOPNQRY_db2(database):
return pack_dds_object(
cp.OPNQRY,
_packPKGNAMCSN(database) +
_pack_uint(cp.QRYBLKSZ, 32767, 4) +
_pack_binary(cp.DYNDTAFMT, bytes([0xF1]))
)
def packCLSQRY(database):
return pack_dds_object(
cp.CLSQRY,
_packPKGNAMCSN(database) + _pack_uint(cp.QRYINSID, 0, 8)
)
def packSQLSTT(sql):
return pack_dds_object(
cp.SQLSTT,
_pack_null_string(sql, 'utf-8') + _pack_null_string(None, 'utf-8')
)
def packSQLATTR(attr):
return pack_dds_object(
cp.SQLATTR,
_pack_null_string(attr, 'utf-8') + _pack_null_string(None, 'utf-8')
)
fix SRVNAM
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import platform
import binascii
import drda
from drda import codepoint as cp
def _recv_from_sock(sock, nbytes):
n = nbytes
recieved = b''
while n:
bs = sock.recv(n)
recieved += bs
n -= len(bs)
return recieved
def _send_to_sock(sock, b):
sock.send(b)
def _pack_null_string(v, enc):
if v is None:
return b'\xff'
b = v.encode(enc)
return b'\x00' + len(b).to_bytes(4, byteorder='big') + b
def _pack_binary(code_point, v):
b = code_point.to_bytes(2, byteorder='big') + v
return (len(b) + 2).to_bytes(2, byteorder='big') + b
def _pack_uint(code_point, v, size):
return _pack_binary(code_point, v.to_bytes(size, byteorder='big'))
def _pack_str(code_point, v, enc):
return _pack_binary(code_point, v.encode(enc))
def parse_string(b):
"parse VCM"
ln = int.from_bytes(b[:2], byteorder='big')
if ln:
s = b[2:2+ln].decode('utf-8')
else:
s = ''
b = b[2+ln:]
return s, b
def parse_name(b):
"parse VCM or VCS"
s1, b = parse_string(b)
s2, b = parse_string(b)
ln = int.from_bytes(b[:2], byteorder='big')
return s1 if s1 else s2, b
def pack_dds_object(code_point, o):
"pack to DDS packet"
return (len(o)+4).to_bytes(2, byteorder='big') + code_point.to_bytes(2, byteorder='big') + o
def parse_reply(obj):
d = {}
i = 0
while i < len(obj):
ln = int.from_bytes(obj[i:i+2], byteorder='big')
d[int.from_bytes(obj[i+2:i+4], byteorder='big')] = obj[i+4:i+ln]
i += ln
assert i == len(obj)
return d
def parse_sqlcard_derby(obj, enc):
sqlcode = int.from_bytes(obj[1:5], byteorder='big', signed=True)
sqlstate = obj[5:10].decode('ascii')
sqlerrproc = obj[10:18]
misc = obj[18:56]
ln = int.from_bytes(obj[56:58], byteorder='big')
message = obj[58:58+ln].decode(enc)
rest = obj[58+ln:]
assert rest[:3] == b'\x00\x00\xff'
rest = rest[3:]
if sqlcode < 0:
err = drda.OperationalError(sqlcode, sqlstate, message)
else:
err = None
return err, rest
def parse_sqlcard_db2(obj, message, enc, endian):
sqlcode = int.from_bytes(obj[1:5], byteorder=endian, signed=True)
sqlstate = obj[5:10].decode('ascii')
sqlerrproc = obj[10:18]
misc = obj[18:54]
rest = obj[54:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlrdbname = obj[2:2+ln]
rest = rest[2+ln:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlerrmsg_m = obj[2:2+ln]
rest = rest[2+ln:]
ln = int.from_bytes(rest[:2], byteorder='big')
sqlerrmsg_s = obj[2:2+ln]
rest = rest[2+ln:]
if sqlcode < 0:
err = drda.OperationalError(sqlcode, sqlstate, message)
else:
err = None
return err, rest
def _parse_column(b):
precision = int.from_bytes(b[:2], byteorder='big')
scale = int.from_bytes(b[2:4], byteorder='big')
sqllength = int.from_bytes(b[4:12], byteorder='big')
sqltype = int.from_bytes(b[12:14], byteorder='big')
sqlccsid = int.from_bytes(b[14:16], byteorder='big')
b = b[16:]
# SQLDOPTGRP
assert b[0] == 0x00 # not null
b = b[3:]
sqlname, b = parse_name(b)
sqllabel, b = parse_name(b)
sqlcomments, b = parse_name(b)
# SQLUDTGRP
if b[0] == 0x00: # not null
b = b[5:]
sqludtrdb, b = parse_string(b)
sqlschema, b = parse_name(b)
sqludtname, b = parse_name(b)
else:
b = b[1:]
# SQLDXGRP
assert b[0] == 0x00 # not null
b = b[9:]
sqlxrdbnam, b = parse_string(b)
sqlxcolname, b = parse_name(b)
sqlxbasename, b = parse_name(b)
sqlxschema, b = parse_name(b)
sqlxname, b = parse_name(b)
return (sqlname, sqltype, sqllength, sqllength, precision, scale, None), b
def parse_sqldard_derby(obj, enc):
description = []
err, rest = parse_sqlcard_derby(obj, enc)
if not err:
ln = int.from_bytes(rest[19:21], byteorder='big')
b = rest[21:]
for i in range(ln):
d, b = _parse_column(b)
description.append(d)
return err, description
def parse_sqldard_db2(obj, message, enc, endian):
description = []
err, rest = parse_sqlcard_db2(obj, message, enc, endian)
if not err:
ln = int.from_bytes(rest[19:21], byteorder='big')
b = rest[21:]
for i in range(ln):
d, b = _parse_column(b)
description.append(d)
return err, description
def read_dds(sock):
"Read one DDS packet from socket"
b = _recv_from_sock(sock, 6)
ln = int.from_bytes(b[:2], byteorder='big')
assert b[2] == 0xD0
dds_type = b[3] & 0b1111
chained = b[3] & 0b01000000
number = int.from_bytes(b[4:6], byteorder='big')
obj = _recv_from_sock(sock, ln-6)
assert int.from_bytes(obj[:2], byteorder='big') == ln - 6
code_point = int.from_bytes(obj[2:4], byteorder='big')
return dds_type, chained, number, code_point, obj[4:]
def write_requests_dds(sock, obj_list):
"Write request DDS packets"
cur_id = 1
for i in range(len(obj_list)):
o = obj_list[i]
code_point = int.from_bytes(o[2:4], byteorder='big')
_send_to_sock(sock, (len(o)+6).to_bytes(2, byteorder='big'))
if code_point in (cp.SQLSTT, cp.SQLATTR):
flag = 3 # DSS object
else:
flag = 1 # DSS request
if i < len(obj_list) - 1:
flag |= 0b01000000
if code_point in (
cp.EXCSQLIMM, cp.PRPSQLSTT, cp.SQLATTR,
):
next_id = cur_id
flag |= 0b00010000
else:
next_id = cur_id + 1
_send_to_sock(sock, bytes([0xD0, flag]))
_send_to_sock(sock, cur_id.to_bytes(2, byteorder='big'))
_send_to_sock(sock, o)
cur_id = next_id
def packEXCSAT(conn, mgrlvlls):
b = b''
for p in mgrlvlls:
b += p.to_bytes(2, byteorder='big')
return pack_dds_object(cp.EXCSAT, (
_pack_str(cp.EXTNAM, 'pydrda', 'cp500') +
_pack_str(cp.SRVNAM, platform.node(), 'cp500') +
_pack_str(cp.SRVRLSLV, 'pydrda', 'cp500') +
_pack_binary(cp.MGRLVLLS, b) +
_pack_str(cp.SRVCLSNM, 'pydrda', 'cp500')
)
)
def packEXCSAT_MGRLVLLS(mgrlvlls):
b = b''
for p in mgrlvlls:
b += p.to_bytes(2, byteorder='big')
return pack_dds_object(cp.EXCSAT, (_pack_binary(cp.MGRLVLLS, b)))
def packSECCHK(secmec, database, user, password, enc):
return pack_dds_object(cp.SECCHK, (
_pack_uint(cp.SECMEC, secmec, 2) +
_pack_str(cp.RDBNAM, database, enc) +
_pack_str(cp.USRID, user, enc) +
_pack_str(cp.PASSWORD, password, enc)
)
)
def packACCRDB(rdbnam, enc):
return pack_dds_object(cp.ACCRDB, (
_pack_str(cp.RDBNAM, rdbnam, enc) +
_pack_uint(cp.RDBACCCL, cp.SQLAM, 2) +
_pack_str(cp.PRDID, 'DNC10130', enc) +
_pack_str(cp.TYPDEFNAM, 'QTDSQLASC', enc) +
_pack_binary(
cp.CRRTKN,
binascii.unhexlify(b'd5c6f0f0f0f0f0f12ec3f0c1f50155630d5a11')) +
_pack_binary(
cp.TYPDEFOVR,
binascii.unhexlify(b'0006119c04b80006119d04b00006119e04b8'))
)
)
def packACCSEC(database, secmec):
return pack_dds_object(
cp.ACCSEC,
_pack_uint(cp.SECMEC, secmec, 2) + _pack_str(cp.RDBNAM, database, 'cp500'),
)
def packRDBCMM():
return pack_dds_object(cp.RDBCMM, bytes())
def _packPKGNAMCSN(database):
pkgnamcsn = bytearray(
binascii.a2b_hex(
'0044211353414d504c452020202020202020202020204e554c4c4944202020202020202020202020'
'53514c43324f323620202020202020202020414141414166416400c9'
)
)
dbnam = (database + ' ' * 18).encode('utf-8')[:18]
pkgnamcsn[4:22] = dbnam
return bytes(pkgnamcsn)
def packEXCSQLIMM(database):
return pack_dds_object(
cp.EXCSQLIMM,
_packPKGNAMCSN(database) + _pack_binary(cp.RDBCMTOK, bytes([241]))
)
def packPRPSQLSTT_derby(database):
return pack_dds_object(
cp.PRPSQLSTT,
_packPKGNAMCSN(database) +
_pack_binary(cp.RTNSQLDA, bytes([241])) +
_pack_binary(cp.TYPSQLDA, bytes([4]))
)
def packPRPSQLSTT_db2(database):
return pack_dds_object(
cp.PRPSQLSTT,
_packPKGNAMCSN(database)
)
def packDSCSQLSTT(database):
return pack_dds_object(
cp.DSCSQLSTT,
_packPKGNAMCSN(database) + _pack_uint(cp.QRYINSID, 0, 8)
)
def packEXCSQLSET(database):
return pack_dds_object(
cp.EXCSQLSET,
_packPKGNAMCSN(database)
)
def packOPNQRY_derby(database):
return pack_dds_object(
cp.OPNQRY,
_packPKGNAMCSN(database) +
_pack_uint(cp.QRYBLKSZ, 32767, 4) +
_pack_binary(cp.QRYCLSIMP, bytes([1]))
)
def packOPNQRY_db2(database):
return pack_dds_object(
cp.OPNQRY,
_packPKGNAMCSN(database) +
_pack_uint(cp.QRYBLKSZ, 32767, 4) +
_pack_binary(cp.DYNDTAFMT, bytes([0xF1]))
)
def packCLSQRY(database):
return pack_dds_object(
cp.CLSQRY,
_packPKGNAMCSN(database) + _pack_uint(cp.QRYINSID, 0, 8)
)
def packSQLSTT(sql):
return pack_dds_object(
cp.SQLSTT,
_pack_null_string(sql, 'utf-8') + _pack_null_string(None, 'utf-8')
)
def packSQLATTR(attr):
return pack_dds_object(
cp.SQLATTR,
_pack_null_string(attr, 'utf-8') + _pack_null_string(None, 'utf-8')
)
|
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: admin.py 15 2008-01-14 15:39:38Z s0undt3ch $
# =============================================================================
# $URL: http://devnull.ufsoft.org/svn/TracAdsPanel/trunk/adspanel/admin.py $
# $LastChangedDate: 2008-01-14 15:39:38 +0000 (Mon, 14 Jan 2008) $
# $Rev: 15 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2008 UfSoft.org - Pedro Algarvio <ufs@ufsoft.org>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from trac.core import *
from trac.web.chrome import ITemplateProvider
from trac.admin import IAdminPanelProvider
from trac.config import Option, BoolOption, _TRUE_VALUES
from trac.util.text import unicode_unquote
from pkg_resources import resource_filename
class AdsAdminPanel(Component):
implements(ITemplateProvider, IAdminPanelProvider)
def __init__(self):
self.options = {}
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
return []
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
Genshi templates.
"""
return [resource_filename(__name__, 'templates')]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if req.perm.has_permission('TRAC_ADMIN'):
yield ('adspanel', 'Ads Panel', 'config', 'Configuration')
def render_admin_panel(self, req, cat, page, path_info):
if req.method.lower() == 'post':
self.config.set('adspanel', 'hide_for_authenticated',
req.args.get('hide_for_authenticated') in
_TRUE_VALUES)
self.config.set('adspanel', 'store_in_session',
req.args.get('store_in_session') in _TRUE_VALUES)
self.config.save()
code = req.args.get('ads_code')
cursor = self.env.get_db_cnx().cursor()
cursor.execute('SELECT value FROM system WHERE name=%s',
('adspanel.code',))
if cursor.fetchone():
self.log.debug('Updating Ads HTML Code')
cursor.execute('UPDATE system SET value=%s WHERE name=%s',
(code, 'adspanel.code'))
else:
self.log.debug('Inserting Ads HTML Code')
cursor.execute('INSERT INTO system (name,value) VALUES (%s,%s)',
('adspanel.code', code))
req.redirect(req.href.admin(cat, page))
self._update_config()
return 'ads_admin.html', {'ads_options': self.options}
# Internal methods
def _update_config(self):
for option in [option for option in Option.registry.values()
if option.section == 'adspanel']:
value = ''
if option.name in ('hide_for_authenticated', 'store_in_session'):
value = self.config.getbool('adspanel', option.name,
option.default)
elif option.name == 'ads_code':
# Still get the Option to get __doc__ from it
value = self.config.get('adspanel', option.name, option.default)
option.value = value
self.options[option.name] = option
cursor = self.env.get_db_cnx().cursor()
cursor.execute('SELECT value FROM system WHERE name=%s',
('adspanel.code',))
code = cursor.fetchone()
if code:
code = unicode_unquote(code[0])
else:
code = ''
self.options['ads_code'].value = code
[svn] Admin panel fix.
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: admin.py 22 2008-01-14 17:49:58Z s0undt3ch $
# =============================================================================
# $URL: http://devnull.ufsoft.org/svn/TracAdsPanel/trunk/adspanel/admin.py $
# $LastChangedDate: 2008-01-14 17:49:58 +0000 (Mon, 14 Jan 2008) $
# $Rev: 22 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2008 UfSoft.org - Pedro Algarvio <ufs@ufsoft.org>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from trac.core import *
from trac.web.chrome import ITemplateProvider
from trac.admin import IAdminPanelProvider
from trac.config import Option, BoolOption, _TRUE_VALUES
from trac.util.text import unicode_unquote
from pkg_resources import resource_filename
class AdsAdminPanel(Component):
implements(ITemplateProvider, IAdminPanelProvider)
def __init__(self):
self.options = {}
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
return []
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
Genshi templates.
"""
return [resource_filename(__name__, 'templates')]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if req.perm.has_permission('TRAC_ADMIN'):
yield ('adspanel', 'Ads Panel', 'config', 'Configuration')
def render_admin_panel(self, req, cat, page, path_info):
if req.method.lower() == 'post':
self.config.set('adspanel', 'hide_for_authenticated',
req.args.get('hide_for_authenticated') in
_TRUE_VALUES)
self.config.set('adspanel', 'store_in_session',
req.args.get('store_in_session') in _TRUE_VALUES)
self.config.save()
code = req.args.get('ads_code')
cursor = self.env.get_db_cnx().cursor()
cursor.execute('SELECT value FROM system WHERE name=%s',
('adspanel.code',))
if cursor.fetchone():
self.log.debug('Updating Ads HTML Code')
cursor.execute('UPDATE system SET value=%s WHERE name=%s',
(code, 'adspanel.code'))
else:
self.log.debug('Inserting Ads HTML Code')
cursor.execute('INSERT INTO system (name,value) VALUES (%s,%s)',
('adspanel.code', code))
req.redirect(req.href.admin(cat, page))
self._update_config()
return 'ads_admin.html', {'ads_options': self.options}
# Internal methods
def _update_config(self):
for option in [option for option in Option.registry.values()
if option.section == 'adspanel']:
value = ''
if option.name in ('hide_for_authenticated', 'store_in_session'):
value = self.config.getbool('adspanel', option.name,
option.default)
elif option.name == 'ads_code':
# Still get the Option to get __doc__ from it
value = self.config.get('adspanel', option.name, option.default)
option.value = str(value).lower()
self.options[option.name] = option
cursor = self.env.get_db_cnx().cursor()
cursor.execute('SELECT value FROM system WHERE name=%s',
('adspanel.code',))
code = cursor.fetchone()
if code:
code = unicode_unquote(code[0])
else:
code = ''
self.options['ads_code'].value = code
|
"""Model an algorithm as a list of functions.
Installation
------------
:py:mod:`algorithm` is available on `GitHub`_ and on `PyPI`_::
$ pip install algorithm
We `test <https://travis-ci.org/gittip/algorithm.py>`_ against
Python 2.6, 2.7, 3.2, and 3.3.
:py:mod:`algorithm` is MIT-licensed.
.. _GitHub: https://github.com/gittip/algorithm.py
.. _PyPI: https://pypi.python.org/pypi/algorithm
Tutorial
--------
This module provides an abstraction for implementing arbitrary algorithms as a
list of functions that operate on a shared state dictionary. Algorithms defined
this way are easy to arbitrarily modify at run time, and they provide cascading
exception handling.
To get started, define some functions:
>>> def foo():
... return {'baz': 1}
...
>>> def bar():
... return {'buz': 2}
...
>>> def bloo(baz, buz):
... return {'sum': baz + buz}
...
Each function returns a :py:class:`dict`, which is used to update the state of
the current run of the algorithm. Names from the state dictionary are made
available to downstream functions via :py:mod:`dependency_injection`. Now
make an :py:class:`Algorithm` object:
>>> from algorithm import Algorithm
>>> blah = Algorithm(foo, bar, bloo)
The functions you passed to the constructor are loaded into a list:
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function bloo ...>]
Now you can use :py:func:`~Algorithm.run` to run the algorithm. You'll get back
a dictionary representing the algorithm's final state:
>>> state = blah.run()
>>> state['sum']
3
Okay!
Modifying an Algorithm
++++++++++++++++++++++
Let's add two functions to the algorithm. First let's define the functions:
>>> def uh_oh(baz):
... if baz == 2:
... raise heck
...
>>> def deal_with_it(exception):
... print("I am dealing with it!")
... return {'exception': None}
...
Now let's interpolate them into our algorithm. Let's put the ``uh_oh`` function between
``bar`` and ``bloo``:
>>> blah.insert_before('bloo', uh_oh)
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function uh_oh ...>, <function bloo ...>]
Then let's add our exception handler at the end:
>>> blah.insert_after('bloo', deal_with_it)
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
Just for kicks, let's remove the ``foo`` function while we're at it:
>>> blah.remove('foo')
>>> blah.functions #doctest: +ELLIPSIS
[<function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
If you're making extensive changes to an algorithm, you should feel free to
directly manipulate the list of functions, rather than using the more
cumbersome :py:meth:`~algorithm.Algorithm.insert_before`,
:py:meth:`~algorithm.Algorithm.insert_after`, and
:py:meth:`~algorithm.Algorithm.remove` methods. We could have achieved the same
result like so:
>>> blah.functions = [ blah['bar']
... , uh_oh
... , blah['bloo']
... , deal_with_it
... ]
>>> blah.functions #doctest: +ELLIPSIS
[<function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
Either way, what happens when we run it? Since we no longer have the ``foo``
function providing a value for ``bar``, we'll need to supply that using a
keyword argument to :py:func:`~Algorithm.run`:
>>> state = blah.run(baz=2)
I am dealing with it!
Exception Handling
++++++++++++++++++
Whenever a function raises an exception, like ``uh_oh`` did in the example
above, :py:class:`~Algorithm.run` captures the exception and populates an
``exception`` key in the current algorithm run state dictionary. While
``exception`` is not ``None``, any normal function is skipped, and only
functions that ask for ``exception`` get called. It's like a fast-forward. So
in our example ``deal_with_it`` got called, but ``bloo`` didn't, which is why
there is no ``sum``:
>>> 'sum' in state
False
If we run without tripping the exception in ``uh_oh`` then we have ``sum`` at
the end:
>>> blah.run(baz=5)['sum']
7
API Reference
-------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import opcode
import sys
import types
from dependency_injection import resolve_dependencies
__version__ = '1.0.0-dev'
PYTHON_2 = sys.version_info < (3, 0, 0)
if PYTHON_2:
def exec_(some_python, namespace):
# Have to double-exec because the Python 2 form is SyntaxError in 3.
exec("exec some_python in namespace")
else:
def exec_(some_python, namespace):
exec(some_python, namespace)
class FunctionNotFound(KeyError):
"""Used when a function is not found in an algorithm function list (subclasses
:py:exc:`KeyError`).
"""
def __str__(self):
return "The function '{0}' isn't in this algorithm.".format(*self.args)
class Algorithm(object):
"""Model an algorithm as a list of functions.
:param functions: a sequence of functions in the order they are to be run
:param bool raise_immediately: Whether to re-raise exceptions immediately.
:py:class:`False` by default, this can only be set as a keyword argument
Each function in your algorithm must return a mapping or :py:class:`None`.
If it returns a mapping, the mapping will be used to update a state
dictionary for the current run of the algorithm. Functions in the algorithm
can use any name from the current state dictionary as a parameter, and the
value will then be supplied dynamically via :py:mod:`dependency_injection`.
See the :py:func:`run` method for details on exception handling.
"""
functions = None #: A list of functions comprising the algorithm.
default_raise_immediately = False
def __init__(self, *functions, **kw):
self.default_raise_immediately = kw.pop('raise_immediately', False)
if functions:
if not isinstance(functions[0], collections.Callable):
raise TypeError("Not a function: {0}".format(repr(functions[0])))
self.functions = list(functions)
self.debug = _DebugMethod(self)
def run(self, _raise_immediately=None, _return_after=None, **state):
"""Run through the functions in the :py:attr:`functions` list.
:param bool _raise_immediately: if not ``None``, will override any
default for ``raise_immediately`` that was set in the constructor
:param str _return_after: if not ``None``, return after calling the function
with this name
:param dict state: remaining keyword arguments are used for the initial
state dictionary for this run of the algorithm
:raises: :py:exc:`FunctionNotFound`, if there is no function named
``_return_after``
:returns: a dictionary representing the final algorithm state
The state dictionary is initialized with three items (their default
values can be overriden using keyword arguments to :py:func:`run`):
- ``algorithm`` - a reference to the parent :py:class:`Algorithm` instance
- ``state`` - a circular reference to the state dictionary
- ``exception`` - ``None``
For each function in the :py:attr:`functions` list, we look at the
function signature and compare it to the current value of ``exception``
in the state dictionary. If ``exception`` is ``None`` then we skip any
function that asks for ``exception``, and if ``exception`` is *not*
``None`` then we only call functions that *do* ask for it. The upshot
is that any function that raises an exception will cause us to
fast-forward to the next exception-handling function in the list.
Here are some further notes on exception handling:
- If a function provides a default value for ``exception``, then that
function will be called whether or not there is an exception being
handled.
- You should return ``{'exception': None}`` to reset exception
handling. Under Python 2 we will call ``sys.exc_clear`` for you
(under Python 3 exceptions are cleared automatically at the end of
except blocks).
- If ``exception`` is not ``None`` after all functions have been run,
then we re-raise it.
- If ``raise_immediately`` evaluates to ``True`` (looking first at any
per-call ``_raise_immediately`` and then at the instance default),
then we re-raise any exception immediately instead of
fast-forwarding to the next exception handler.
"""
if _raise_immediately is None:
_raise_immediately = self.default_raise_immediately
if _return_after is not None:
if _return_after not in self.get_names():
raise FunctionNotFound(_return_after)
if 'algorithm' not in state: state['algorithm'] = self
if 'state' not in state: state['state'] = state
if 'exception' not in state: state['exception'] = None
for function in self.functions:
function_name = function.__name__
try:
deps = resolve_dependencies(function, state)
have_exception = state['exception'] is not None
if 'exception' in deps.signature.required and not have_exception:
pass # Function wants exception but we don't have it.
elif 'exception' not in deps.signature.parameters and have_exception:
pass # Function doesn't want exception but we have it.
else:
new_state = function(**deps.as_kwargs)
if new_state is not None:
if PYTHON_2:
if 'exception' in new_state:
if new_state['exception'] is None:
sys.exc_clear()
state.update(new_state)
except:
ExceptionClass, exception = sys.exc_info()[:2]
state['exception'] = exception
if _raise_immediately:
raise
if _return_after is not None and function_name == _return_after:
break
if state['exception'] is not None:
if PYTHON_2:
# Under Python 2, raising state['exception'] means the
# traceback stops at this reraise. We want the traceback to go
# back to where the exception was first raised, and a naked
# raise will reraise the current exception.
raise
else:
# Under Python 3, exceptions are cleared at the end of the
# except block, meaning we have no current exception to reraise
# here. Thankfully, the traceback off this reraise will show
# back to the original exception.
raise state['exception']
return state
def __getitem__(self, name):
"""Return the function in the :py:attr:`functions` list named ``name``, or raise
:py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> algo['foo'] is foo
True
>>> algo['bar']
Traceback (most recent call last):
...
FunctionNotFound: The function 'bar' isn't in this algorithm.
"""
func = None
for candidate in self.functions:
if candidate.__name__ == name:
func = candidate
break
if func is None:
raise FunctionNotFound(name)
return func
def get_names(self):
"""Returns a list of the names of the functions in the :py:attr:`functions` list.
"""
return [f.__name__ for f in self.functions]
def insert_before(self, name, *newfuncs):
"""Insert ``newfuncs`` in the :py:attr:`functions` list before the function named
``name``, or raise :py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> def bar(): pass
>>> algo.insert_before('foo', bar)
>>> algo.get_names()
['bar', 'foo']
>>> def baz(): pass
>>> algo.insert_before('foo', baz)
>>> algo.get_names()
['bar', 'baz', 'foo']
"""
i = self.functions.index(self[name])
self.functions[i:i] = newfuncs
def insert_after(self, name, *newfuncs):
"""Insert ``newfuncs`` in the :py:attr:`functions` list after the function named
``name``, or raise :py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> def bar(): pass
>>> algo.insert_after('foo', bar)
>>> algo.get_names()
['foo', 'bar']
>>> def baz(): pass
>>> algo.insert_after('bar', baz)
>>> algo.get_names()
['foo', 'bar', 'baz']
"""
i = self.functions.index(self[name]) + 1
self.functions[i:i] = newfuncs
def remove(self, *names):
"""Remove the functions named ``name`` from the :py:attr:`functions` list, or raise
:py:exc:`FunctionNotFound`.
"""
for name in names:
func = self[name]
self.functions.remove(func)
@classmethod
def from_dotted_name(cls, dotted_name, **kw):
"""Construct a new instance from an algorithm definition module.
:param dotted_name: the dotted name of a Python module containing an
algorithm definition
:param kw: keyword arguments are passed through to the default constructor
This is a convenience constructor that lets you take an algorithm
definition from a regular Python file. For example, create a file named
``blah_algorithm.py`` on your ``PYTHONPATH``::
def foo():
return {'baz': 1}
def bar():
return {'buz': 2}
def bloo(baz, buz):
return {'sum': baz + buz}
Then pass the dotted name of the file to this constructor:
>>> blah = Algorithm.from_dotted_name('blah_algorithm')
All functions defined in the file whose name doesn't begin with ``_``
are loaded into a list in the order they're defined in the file, and
this list is passed to the default class constructor.
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function bloo ...>]
"""
module = cls._load_module_from_dotted_name(dotted_name)
functions = cls._load_functions_from_module(module)
return cls(*functions, **kw)
def debug(self, function):
"""Given a function, return a copy of the function with a breakpoint
immediately inside it.
:param function function: a function object
This method wraps the module-level function :py:func:`algorithm.debug`,
adding three conveniences.
First, calling this method not only returns a copy of the function with
a breakpoint installed, it actually replaces the old function in the
algorithm with the copy. So you can do:
>>> def foo():
... pass
...
>>> algo = Algorithm(foo)
>>> algo.debug(foo) #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Second, it provides a method on itself to install via function name
instead of function object:
>>> algo = Algorithm(foo)
>>> algo.debug.by_name('foo') #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Third, it aliases the :py:meth:`~DebugMethod.by_name` method as
:py:meth:`~_DebugMethod.__getitem__` so you can use mapping access as well:
>>> algo = Algorithm(foo)
>>> algo.debug['foo'] #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Why would you want to do that? Well, let's say you've written a library
that includes an algorithm:
>>> def foo(): pass
...
>>> def bar(): pass
...
>>> def baz(): pass
...
>>> blah = Algorithm(foo, bar, baz)
And now some user of your library ends up rebuilding the functions list
using some of the original functions and some of their own:
>>> def mine(): pass
...
>>> def precious(): pass
...
>>> blah.functions = [ blah['foo']
... , mine
... , blah['bar']
... , precious
... , blah['baz']
... ]
Now the user of your library wants to debug ``blah['bar']``, but since
they're using your code as a library it's inconvenient for them to drop
a breakpoint in your source code. With this feature, they can just
insert ``.debug`` in their own source code like so:
>>> blah.functions = [ blah['foo']
... , mine
... , blah.debug['bar']
... , precious
... , blah['baz']
... ]
Now when they run the algorithm they'll hit a pdb breakpoint just
inside your ``bar`` function:
>>> blah.run() #doctest: +SKIP
(Pdb)
"""
raise NotImplementedError # Should be overriden by _DebugMethod in constructor.
# Helpers for loading from a file.
# ================================
@staticmethod
def _load_module_from_dotted_name(dotted_name):
class RootModule(object): pass
module = RootModule() # let's us use getattr to traverse down
exec_('import {0}'.format(dotted_name), module.__dict__)
for name in dotted_name.split('.'):
module = getattr(module, name)
return module
@staticmethod
def _load_functions_from_module(module):
"""Given a module object, return a list of functions from the module, sorted by lineno.
"""
functions_with_lineno = []
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if type(obj) != types.FunctionType:
continue
func = obj
lineno = func.__code__.co_firstlineno
functions_with_lineno.append((lineno, func))
functions_with_lineno.sort()
return [func for lineno, func in functions_with_lineno]
# Debugging Helpers
# =================
class _DebugMethod(object):
# See docstring at Algorithm.debug.
def __init__(self, algorithm):
self.algorithm = algorithm
def __call__(self, function):
debugging_function = debug(function)
for i, candidate in enumerate(self.algorithm.functions):
if candidate is function:
self.algorithm.functions[i] = debugging_function
return debugging_function
def by_name(self, name):
return self(self.algorithm[name])
__getitem__ = by_name
def debug(function):
"""Given a function, return a copy of the function with a breakpoint
immediately inside it.
:param function function: a function object
Okay! This is fun. :-)
This is a decorator, because it takes a function and returns a function.
But it would be useless in situations where you could actually decorate a
function using the normal decorator syntax, because then you have the
source code in front of you and you could just insert the breakpoint
yourself. It's also pretty useless when you have a function object that
you're about to call, because you can simply add a ``set_trace`` before the
function call and then step into the function. No: this helper is only
useful when you've got a function object that you want to debug, and you
have neither the definition nor the call conveniently at hand. See the
method :py:meth:`Algorithm.debug` for an explanation of how this situation
arises with the :py:mod:`algorithm` module.
For our purposes here, it's enough to know that you can wrap any function:
>>> def foo(bar, baz):
... return bar + baz
...
>>> func = debug(foo)
And then calling the function will drop you into pdb:
>>> func(1, 2) #doctest: +SKIP
(Pdb)
The fun part is how this is implemented: we dynamically modify the
function's bytecode to insert the statements ``import pdb;
pdb.set_trace()``. Neat, huh? :-)
"""
# Build bytecode for a set_trace call.
# ====================================
NOARG = object()
codes = ( ('LOAD_CONST', 0)
, ('LOAD_CONST', None)
, ('IMPORT_NAME', 'pdb')
, ('STORE_GLOBAL', 'pdb')
, ('LOAD_GLOBAL', 'pdb')
, ('LOAD_ATTR', 'set_trace')
, ('CALL_FUNCTION', 0)
, ('POP_TOP', NOARG)
)
new_names = function.__code__.co_names
new_consts = function.__code__.co_consts
new_code = b''
addr_pad = 0
if PYTHON_2:
_chr = chr
else:
# In Python 3 chr returns a str (== 2's unicode), not a bytes (== 2's
# str). However, the func_new constructor wants a bytes for both code
# and lnotab. We use latin-1 to encode these to bytes, per the docs:
#
# The simplest method is to map the codepoints 0-255 to the bytes
# 0x0-0xff. This means that a string object that contains codepoints
# above U+00FF can't be encoded with this method (which is called
# 'latin-1' or 'iso-8859-1').
#
# http://docs.python.org/3/library/codecs.html#encodings-and-unicode
_chr = lambda x: chr(x).encode('latin-1')
for name, arg in codes:
# This is the inverse of the subset of dis.disassemble needed to handle
# our use case.
addr_pad += 1
op = opcode.opmap[name]
new_code += _chr(op)
if op >= opcode.HAVE_ARGUMENT:
addr_pad += 2
if op in opcode.hasconst:
if arg not in new_consts:
new_consts += (arg,)
val = new_consts.index(arg)
elif op in opcode.hasname:
if PYTHON_2:
# In Python 3, func_new wants str (== unicode) for names.
arg = arg.encode('ASCII')
if arg not in new_names:
new_names += (arg,)
val = new_names.index(arg)
elif name == 'CALL_FUNCTION':
val = arg # number of args
new_code += _chr(val) + _chr(0)
# Finish inserting our new bytecode in front of the old.
# ======================================================
# Loop over old_code and append it to new_code, fixing up absolute jump
# references along the way. Then fix up the line number table.
old_code = function.__code__.co_code
i = 0
n = len(old_code)
while i < n:
c = old_code[i]
if type(c) is int:
# In Python 3, index access on a bytestring returns an int.
c = _chr(c)
op = ord(c)
i += 1
new_code += c
if op >= opcode.HAVE_ARGUMENT:
if PYTHON_2:
oparg = ord(old_code[i]) + ord(old_code[i+1])*256
else:
oparg = old_code[i] + old_code[i+1]*256
if op in opcode.hasjabs:
oparg += addr_pad
i += 2
new_code += _chr(oparg) + _chr(0)
old = function.__code__.co_lnotab
new_lnotab = ( old[:2]
+ _chr( (ord(old[2]) if len(old) > 2 else 0)
+ addr_pad
)
+ old[3:]
)
# Now construct new code and function objects.
# ============================================
# See Objects/codeobject.c in Python source.
common_args = ( function.__code__.co_nlocals
, function.__code__.co_stacksize
, function.__code__.co_flags
, new_code
, new_consts
, new_names
, function.__code__.co_varnames
, function.__code__.co_filename
, function.__code__.co_name
, function.__code__.co_firstlineno
, new_lnotab
, function.__code__.co_freevars
, function.__code__.co_cellvars
)
if PYTHON_2:
new_code = type(function.__code__)(function.__code__.co_argcount, *common_args)
new_function = type(function)( new_code
, function.func_globals
, function.func_name
, function.func_defaults
, function.func_closure
)
else:
new_code = type(function.__code__)( function.__code__.co_argcount
, function.__code__.co_kwonlyargcount
, *common_args
)
new_function = type(function)( new_code
, function.__globals__
, function.__name__
, function.__defaults__
, function.__closure__
)
return new_function
if __name__ == '__main__':
import doctest
doctest.testmod()
Add special markers Algorithm.START and END
So that .insert_{before, after}(Algorithm.START, somefunc) works
"""Model an algorithm as a list of functions.
Installation
------------
:py:mod:`algorithm` is available on `GitHub`_ and on `PyPI`_::
$ pip install algorithm
We `test <https://travis-ci.org/gittip/algorithm.py>`_ against
Python 2.6, 2.7, 3.2, and 3.3.
:py:mod:`algorithm` is MIT-licensed.
.. _GitHub: https://github.com/gittip/algorithm.py
.. _PyPI: https://pypi.python.org/pypi/algorithm
Tutorial
--------
This module provides an abstraction for implementing arbitrary algorithms as a
list of functions that operate on a shared state dictionary. Algorithms defined
this way are easy to arbitrarily modify at run time, and they provide cascading
exception handling.
To get started, define some functions:
>>> def foo():
... return {'baz': 1}
...
>>> def bar():
... return {'buz': 2}
...
>>> def bloo(baz, buz):
... return {'sum': baz + buz}
...
Each function returns a :py:class:`dict`, which is used to update the state of
the current run of the algorithm. Names from the state dictionary are made
available to downstream functions via :py:mod:`dependency_injection`. Now
make an :py:class:`Algorithm` object:
>>> from algorithm import Algorithm
>>> blah = Algorithm(foo, bar, bloo)
The functions you passed to the constructor are loaded into a list:
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function bloo ...>]
Now you can use :py:func:`~Algorithm.run` to run the algorithm. You'll get back
a dictionary representing the algorithm's final state:
>>> state = blah.run()
>>> state['sum']
3
Okay!
Modifying an Algorithm
++++++++++++++++++++++
Let's add two functions to the algorithm. First let's define the functions:
>>> def uh_oh(baz):
... if baz == 2:
... raise heck
...
>>> def deal_with_it(exception):
... print("I am dealing with it!")
... return {'exception': None}
...
Now let's interpolate them into our algorithm. Let's put the ``uh_oh`` function between
``bar`` and ``bloo``:
>>> blah.insert_before('bloo', uh_oh)
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function uh_oh ...>, <function bloo ...>]
Then let's add our exception handler at the end:
>>> blah.insert_after('bloo', deal_with_it)
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
Just for kicks, let's remove the ``foo`` function while we're at it:
>>> blah.remove('foo')
>>> blah.functions #doctest: +ELLIPSIS
[<function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
If you're making extensive changes to an algorithm, you should feel free to
directly manipulate the list of functions, rather than using the more
cumbersome :py:meth:`~algorithm.Algorithm.insert_before`,
:py:meth:`~algorithm.Algorithm.insert_after`, and
:py:meth:`~algorithm.Algorithm.remove` methods. We could have achieved the same
result like so:
>>> blah.functions = [ blah['bar']
... , uh_oh
... , blah['bloo']
... , deal_with_it
... ]
>>> blah.functions #doctest: +ELLIPSIS
[<function bar ...>, <function uh_oh ...>, <function bloo ...>, <function deal_with_it ...>]
Either way, what happens when we run it? Since we no longer have the ``foo``
function providing a value for ``bar``, we'll need to supply that using a
keyword argument to :py:func:`~Algorithm.run`:
>>> state = blah.run(baz=2)
I am dealing with it!
Exception Handling
++++++++++++++++++
Whenever a function raises an exception, like ``uh_oh`` did in the example
above, :py:class:`~Algorithm.run` captures the exception and populates an
``exception`` key in the current algorithm run state dictionary. While
``exception`` is not ``None``, any normal function is skipped, and only
functions that ask for ``exception`` get called. It's like a fast-forward. So
in our example ``deal_with_it`` got called, but ``bloo`` didn't, which is why
there is no ``sum``:
>>> 'sum' in state
False
If we run without tripping the exception in ``uh_oh`` then we have ``sum`` at
the end:
>>> blah.run(baz=5)['sum']
7
API Reference
-------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import opcode
import sys
import types
from dependency_injection import resolve_dependencies
__version__ = '1.0.0-dev'
PYTHON_2 = sys.version_info < (3, 0, 0)
if PYTHON_2:
def exec_(some_python, namespace):
# Have to double-exec because the Python 2 form is SyntaxError in 3.
exec("exec some_python in namespace")
else:
def exec_(some_python, namespace):
exec(some_python, namespace)
class FunctionNotFound(KeyError):
"""Used when a function is not found in an algorithm function list (subclasses
:py:exc:`KeyError`).
"""
def __str__(self):
return "The function '{0}' isn't in this algorithm.".format(*self.args)
class Algorithm(object):
"""Model an algorithm as a list of functions.
:param functions: a sequence of functions in the order they are to be run
:param bool raise_immediately: Whether to re-raise exceptions immediately.
:py:class:`False` by default, this can only be set as a keyword argument
Each function in your algorithm must return a mapping or :py:class:`None`.
If it returns a mapping, the mapping will be used to update a state
dictionary for the current run of the algorithm. Functions in the algorithm
can use any name from the current state dictionary as a parameter, and the
value will then be supplied dynamically via :py:mod:`dependency_injection`.
See the :py:func:`run` method for details on exception handling.
"""
functions = None #: A list of functions comprising the algorithm.
default_raise_immediately = False
START = -1
END = -2
def __init__(self, *functions, **kw):
self.default_raise_immediately = kw.pop('raise_immediately', False)
if functions:
if not isinstance(functions[0], collections.Callable):
raise TypeError("Not a function: {0}".format(repr(functions[0])))
self.functions = list(functions)
self.debug = _DebugMethod(self)
def run(self, _raise_immediately=None, _return_after=None, **state):
"""Run through the functions in the :py:attr:`functions` list.
:param bool _raise_immediately: if not ``None``, will override any
default for ``raise_immediately`` that was set in the constructor
:param str _return_after: if not ``None``, return after calling the function
with this name
:param dict state: remaining keyword arguments are used for the initial
state dictionary for this run of the algorithm
:raises: :py:exc:`FunctionNotFound`, if there is no function named
``_return_after``
:returns: a dictionary representing the final algorithm state
The state dictionary is initialized with three items (their default
values can be overriden using keyword arguments to :py:func:`run`):
- ``algorithm`` - a reference to the parent :py:class:`Algorithm` instance
- ``state`` - a circular reference to the state dictionary
- ``exception`` - ``None``
For each function in the :py:attr:`functions` list, we look at the
function signature and compare it to the current value of ``exception``
in the state dictionary. If ``exception`` is ``None`` then we skip any
function that asks for ``exception``, and if ``exception`` is *not*
``None`` then we only call functions that *do* ask for it. The upshot
is that any function that raises an exception will cause us to
fast-forward to the next exception-handling function in the list.
Here are some further notes on exception handling:
- If a function provides a default value for ``exception``, then that
function will be called whether or not there is an exception being
handled.
- You should return ``{'exception': None}`` to reset exception
handling. Under Python 2 we will call ``sys.exc_clear`` for you
(under Python 3 exceptions are cleared automatically at the end of
except blocks).
- If ``exception`` is not ``None`` after all functions have been run,
then we re-raise it.
- If ``raise_immediately`` evaluates to ``True`` (looking first at any
per-call ``_raise_immediately`` and then at the instance default),
then we re-raise any exception immediately instead of
fast-forwarding to the next exception handler.
"""
if _raise_immediately is None:
_raise_immediately = self.default_raise_immediately
if _return_after is not None:
if _return_after not in self.get_names():
raise FunctionNotFound(_return_after)
if 'algorithm' not in state: state['algorithm'] = self
if 'state' not in state: state['state'] = state
if 'exception' not in state: state['exception'] = None
for function in self.functions:
function_name = function.__name__
try:
deps = resolve_dependencies(function, state)
have_exception = state['exception'] is not None
if 'exception' in deps.signature.required and not have_exception:
pass # Function wants exception but we don't have it.
elif 'exception' not in deps.signature.parameters and have_exception:
pass # Function doesn't want exception but we have it.
else:
new_state = function(**deps.as_kwargs)
if new_state is not None:
if PYTHON_2:
if 'exception' in new_state:
if new_state['exception'] is None:
sys.exc_clear()
state.update(new_state)
except:
ExceptionClass, exception = sys.exc_info()[:2]
state['exception'] = exception
if _raise_immediately:
raise
if _return_after is not None and function_name == _return_after:
break
if state['exception'] is not None:
if PYTHON_2:
# Under Python 2, raising state['exception'] means the
# traceback stops at this reraise. We want the traceback to go
# back to where the exception was first raised, and a naked
# raise will reraise the current exception.
raise
else:
# Under Python 3, exceptions are cleared at the end of the
# except block, meaning we have no current exception to reraise
# here. Thankfully, the traceback off this reraise will show
# back to the original exception.
raise state['exception']
return state
def __getitem__(self, name):
"""Return the function in the :py:attr:`functions` list named ``name``, or raise
:py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> algo['foo'] is foo
True
>>> algo['bar']
Traceback (most recent call last):
...
FunctionNotFound: The function 'bar' isn't in this algorithm.
"""
func = None
for candidate in self.functions:
if candidate.__name__ == name:
func = candidate
break
if func is None:
raise FunctionNotFound(name)
return func
def get_names(self):
"""Returns a list of the names of the functions in the :py:attr:`functions` list.
"""
return [f.__name__ for f in self.functions]
def insert_before(self, name, *newfuncs):
"""Insert ``newfuncs`` in the :py:attr:`functions` list before the function named
``name``, or raise :py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> def bar(): pass
>>> algo.insert_before('foo', bar)
>>> algo.get_names()
['bar', 'foo']
>>> def baz(): pass
>>> algo.insert_before('foo', baz)
>>> algo.get_names()
['bar', 'baz', 'foo']
>>> def bal(): pass
>>> algo.insert_before(Algorithm.START, bal)
>>> algo.get_names()
['bal', 'bar', 'baz', 'foo']
>>> def bah(): pass
>>> algo.insert_before(Algorithm.END, bah)
>>> algo.get_names()
['bal', 'bar', 'baz', 'foo', 'bah']
"""
if name == self.START:
i = 0
elif name == self.END:
i = len(self.functions)
else:
i = self.functions.index(self[name])
self.functions[i:i] = newfuncs
def insert_after(self, name, *newfuncs):
"""Insert ``newfuncs`` in the :py:attr:`functions` list after the function named
``name``, or raise :py:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = Algorithm(foo)
>>> def bar(): pass
>>> algo.insert_after('foo', bar)
>>> algo.get_names()
['foo', 'bar']
>>> def baz(): pass
>>> algo.insert_after('bar', baz)
>>> algo.get_names()
['foo', 'bar', 'baz']
>>> def bal(): pass
>>> algo.insert_after(Algorithm.START, bal)
>>> algo.get_names()
['bal', 'foo', 'bar', 'baz']
>>> def bah(): pass
>>> algo.insert_before(Algorithm.END, bah)
>>> algo.get_names()
['bal', 'foo', 'bar', 'baz', 'bah']
"""
if name == self.START:
i = 0
elif name == self.END:
i = len(self.functions)
else:
i = self.functions.index(self[name]) + 1
self.functions[i:i] = newfuncs
def remove(self, *names):
"""Remove the functions named ``name`` from the :py:attr:`functions` list, or raise
:py:exc:`FunctionNotFound`.
"""
for name in names:
func = self[name]
self.functions.remove(func)
@classmethod
def from_dotted_name(cls, dotted_name, **kw):
"""Construct a new instance from an algorithm definition module.
:param dotted_name: the dotted name of a Python module containing an
algorithm definition
:param kw: keyword arguments are passed through to the default constructor
This is a convenience constructor that lets you take an algorithm
definition from a regular Python file. For example, create a file named
``blah_algorithm.py`` on your ``PYTHONPATH``::
def foo():
return {'baz': 1}
def bar():
return {'buz': 2}
def bloo(baz, buz):
return {'sum': baz + buz}
Then pass the dotted name of the file to this constructor:
>>> blah = Algorithm.from_dotted_name('blah_algorithm')
All functions defined in the file whose name doesn't begin with ``_``
are loaded into a list in the order they're defined in the file, and
this list is passed to the default class constructor.
>>> blah.functions #doctest: +ELLIPSIS
[<function foo ...>, <function bar ...>, <function bloo ...>]
"""
module = cls._load_module_from_dotted_name(dotted_name)
functions = cls._load_functions_from_module(module)
return cls(*functions, **kw)
def debug(self, function):
"""Given a function, return a copy of the function with a breakpoint
immediately inside it.
:param function function: a function object
This method wraps the module-level function :py:func:`algorithm.debug`,
adding three conveniences.
First, calling this method not only returns a copy of the function with
a breakpoint installed, it actually replaces the old function in the
algorithm with the copy. So you can do:
>>> def foo():
... pass
...
>>> algo = Algorithm(foo)
>>> algo.debug(foo) #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Second, it provides a method on itself to install via function name
instead of function object:
>>> algo = Algorithm(foo)
>>> algo.debug.by_name('foo') #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Third, it aliases the :py:meth:`~DebugMethod.by_name` method as
:py:meth:`~_DebugMethod.__getitem__` so you can use mapping access as well:
>>> algo = Algorithm(foo)
>>> algo.debug['foo'] #doctest: +ELLIPSIS
<function foo at ...>
>>> algo.run() #doctest: +SKIP
(Pdb)
Why would you want to do that? Well, let's say you've written a library
that includes an algorithm:
>>> def foo(): pass
...
>>> def bar(): pass
...
>>> def baz(): pass
...
>>> blah = Algorithm(foo, bar, baz)
And now some user of your library ends up rebuilding the functions list
using some of the original functions and some of their own:
>>> def mine(): pass
...
>>> def precious(): pass
...
>>> blah.functions = [ blah['foo']
... , mine
... , blah['bar']
... , precious
... , blah['baz']
... ]
Now the user of your library wants to debug ``blah['bar']``, but since
they're using your code as a library it's inconvenient for them to drop
a breakpoint in your source code. With this feature, they can just
insert ``.debug`` in their own source code like so:
>>> blah.functions = [ blah['foo']
... , mine
... , blah.debug['bar']
... , precious
... , blah['baz']
... ]
Now when they run the algorithm they'll hit a pdb breakpoint just
inside your ``bar`` function:
>>> blah.run() #doctest: +SKIP
(Pdb)
"""
raise NotImplementedError # Should be overriden by _DebugMethod in constructor.
# Helpers for loading from a file.
# ================================
@staticmethod
def _load_module_from_dotted_name(dotted_name):
class RootModule(object): pass
module = RootModule() # let's us use getattr to traverse down
exec_('import {0}'.format(dotted_name), module.__dict__)
for name in dotted_name.split('.'):
module = getattr(module, name)
return module
@staticmethod
def _load_functions_from_module(module):
"""Given a module object, return a list of functions from the module, sorted by lineno.
"""
functions_with_lineno = []
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if type(obj) != types.FunctionType:
continue
func = obj
lineno = func.__code__.co_firstlineno
functions_with_lineno.append((lineno, func))
functions_with_lineno.sort()
return [func for lineno, func in functions_with_lineno]
# Debugging Helpers
# =================
class _DebugMethod(object):
# See docstring at Algorithm.debug.
def __init__(self, algorithm):
self.algorithm = algorithm
def __call__(self, function):
debugging_function = debug(function)
for i, candidate in enumerate(self.algorithm.functions):
if candidate is function:
self.algorithm.functions[i] = debugging_function
return debugging_function
def by_name(self, name):
return self(self.algorithm[name])
__getitem__ = by_name
def debug(function):
"""Given a function, return a copy of the function with a breakpoint
immediately inside it.
:param function function: a function object
Okay! This is fun. :-)
This is a decorator, because it takes a function and returns a function.
But it would be useless in situations where you could actually decorate a
function using the normal decorator syntax, because then you have the
source code in front of you and you could just insert the breakpoint
yourself. It's also pretty useless when you have a function object that
you're about to call, because you can simply add a ``set_trace`` before the
function call and then step into the function. No: this helper is only
useful when you've got a function object that you want to debug, and you
have neither the definition nor the call conveniently at hand. See the
method :py:meth:`Algorithm.debug` for an explanation of how this situation
arises with the :py:mod:`algorithm` module.
For our purposes here, it's enough to know that you can wrap any function:
>>> def foo(bar, baz):
... return bar + baz
...
>>> func = debug(foo)
And then calling the function will drop you into pdb:
>>> func(1, 2) #doctest: +SKIP
(Pdb)
The fun part is how this is implemented: we dynamically modify the
function's bytecode to insert the statements ``import pdb;
pdb.set_trace()``. Neat, huh? :-)
"""
# Build bytecode for a set_trace call.
# ====================================
NOARG = object()
codes = ( ('LOAD_CONST', 0)
, ('LOAD_CONST', None)
, ('IMPORT_NAME', 'pdb')
, ('STORE_GLOBAL', 'pdb')
, ('LOAD_GLOBAL', 'pdb')
, ('LOAD_ATTR', 'set_trace')
, ('CALL_FUNCTION', 0)
, ('POP_TOP', NOARG)
)
new_names = function.__code__.co_names
new_consts = function.__code__.co_consts
new_code = b''
addr_pad = 0
if PYTHON_2:
_chr = chr
else:
# In Python 3 chr returns a str (== 2's unicode), not a bytes (== 2's
# str). However, the func_new constructor wants a bytes for both code
# and lnotab. We use latin-1 to encode these to bytes, per the docs:
#
# The simplest method is to map the codepoints 0-255 to the bytes
# 0x0-0xff. This means that a string object that contains codepoints
# above U+00FF can't be encoded with this method (which is called
# 'latin-1' or 'iso-8859-1').
#
# http://docs.python.org/3/library/codecs.html#encodings-and-unicode
_chr = lambda x: chr(x).encode('latin-1')
for name, arg in codes:
# This is the inverse of the subset of dis.disassemble needed to handle
# our use case.
addr_pad += 1
op = opcode.opmap[name]
new_code += _chr(op)
if op >= opcode.HAVE_ARGUMENT:
addr_pad += 2
if op in opcode.hasconst:
if arg not in new_consts:
new_consts += (arg,)
val = new_consts.index(arg)
elif op in opcode.hasname:
if PYTHON_2:
# In Python 3, func_new wants str (== unicode) for names.
arg = arg.encode('ASCII')
if arg not in new_names:
new_names += (arg,)
val = new_names.index(arg)
elif name == 'CALL_FUNCTION':
val = arg # number of args
new_code += _chr(val) + _chr(0)
# Finish inserting our new bytecode in front of the old.
# ======================================================
# Loop over old_code and append it to new_code, fixing up absolute jump
# references along the way. Then fix up the line number table.
old_code = function.__code__.co_code
i = 0
n = len(old_code)
while i < n:
c = old_code[i]
if type(c) is int:
# In Python 3, index access on a bytestring returns an int.
c = _chr(c)
op = ord(c)
i += 1
new_code += c
if op >= opcode.HAVE_ARGUMENT:
if PYTHON_2:
oparg = ord(old_code[i]) + ord(old_code[i+1])*256
else:
oparg = old_code[i] + old_code[i+1]*256
if op in opcode.hasjabs:
oparg += addr_pad
i += 2
new_code += _chr(oparg) + _chr(0)
old = function.__code__.co_lnotab
new_lnotab = ( old[:2]
+ _chr( (ord(old[2]) if len(old) > 2 else 0)
+ addr_pad
)
+ old[3:]
)
# Now construct new code and function objects.
# ============================================
# See Objects/codeobject.c in Python source.
common_args = ( function.__code__.co_nlocals
, function.__code__.co_stacksize
, function.__code__.co_flags
, new_code
, new_consts
, new_names
, function.__code__.co_varnames
, function.__code__.co_filename
, function.__code__.co_name
, function.__code__.co_firstlineno
, new_lnotab
, function.__code__.co_freevars
, function.__code__.co_cellvars
)
if PYTHON_2:
new_code = type(function.__code__)(function.__code__.co_argcount, *common_args)
new_function = type(function)( new_code
, function.func_globals
, function.func_name
, function.func_defaults
, function.func_closure
)
else:
new_code = type(function.__code__)( function.__code__.co_argcount
, function.__code__.co_kwonlyargcount
, *common_args
)
new_function = type(function)( new_code
, function.__globals__
, function.__name__
, function.__defaults__
, function.__closure__
)
return new_function
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#!/usr/env/python
## Import General Tools
import sys
import os
from gooey import Gooey, GooeyParser
import requests
import json
import re
from datetime import datetime as dt
from datetime import timedelta as tdelta
from astropy.table import Table, Column
from astropy.time import Time
from astropy.coordinates import EarthLocation
from astroplan import Observer
##-------------------------------------------------------------------------
## ICS File Object
##-------------------------------------------------------------------------
class ICSFile(object):
'''
Class to represent an ICS calendar file.
'''
def __init__(self, filename):
self.file = filename
self.lines = ['BEGIN:VCALENDAR\n',
'PRODID:-//hacksw/handcal//NONSGML v1.0//EN\n',
'\n']
def add_event(self, title, starttime, endtime, description, verbose=False):
assert type(title) is str
assert type(starttime) in [dt, str]
assert type(endtime) in [dt, str]
assert type(description) in [list, str]
now = dt.utcnow()
try:
starttime = starttime.strftime('%Y%m%dT%H%M%S')
except:
pass
try:
endtime = endtime.strftime('%Y%m%dT%H%M%S')
except:
pass
if verbose:
print('{} {}'.format(starttime[0:8], title))
if type(description) is list:
description = '\\n'.join(description)
new_lines = ['BEGIN:VEVENT\n',
'UID:{}@mycalendar.com\n'.format(now.strftime('%Y%m%dT%H%M%S.%fZ')),
'DTSTAMP:{}\n'.format(now.strftime('%Y%m%dT%H%M%SZ')),
'DTSTART;TZID=Pacific/Honolulu:{}\n'.format(starttime),
'DTEND;TZID=Pacific/Honolulu:{}\n'.format(endtime),
'SUMMARY:{}\n'.format(title),
'DESCRIPTION: {}\n'.format(description),
'END:VEVENT\n',
'\n',
]
self.lines.extend(new_lines)
def write(self):
self.lines.append('END:VCALENDAR\n')
if os.path.exists(self.file): os.remove(self.file)
with open(self.file, 'w') as FO:
for line in self.lines:
FO.write(line)
##-------------------------------------------------------------------------
## Get Twilight Info for Date
##-------------------------------------------------------------------------
def get_twilights(date):
""" Determine sunrise and sunset times """
location = EarthLocation(
lat=19+49/60+33.40757/60**2,
lon=-(155+28/60+28.98665/60**2),
height=4159.58,
)
obs = Observer(location=location, name='Keck', timezone='US/Hawaii')
date = Time(dt.strptime(f'{date} 14:00:00', '%Y-%m-%d %H:%M:%S'))
t = {}
t['seto'] = obs.sun_set_time(date, which='next').datetime
t['ento'] = obs.twilight_evening_nautical(date, which='next').datetime
t['eato'] = obs.twilight_evening_astronomical(date, which='next').datetime
t['mato'] = obs.twilight_morning_astronomical(date, which='next').datetime
t['mnto'] = obs.twilight_morning_nautical(date, which='next').datetime
t['riseo'] = obs.sun_rise_time(date, which='next').datetime
t['set'] = t['seto'].strftime('%H:%M UT')
t['ent'] = t['ento'].strftime('%H:%M UT')
t['eat'] = t['eato'].strftime('%H:%M UT')
t['mat'] = t['mato'].strftime('%H:%M UT')
t['mnt'] = t['mnto'].strftime('%H:%M UT')
t['rise'] = t['riseo'].strftime('%H:%M UT')
return t
##-------------------------------------------------------------------------
## Get Telescope Schedule
##-------------------------------------------------------------------------
def querydb(req):
url = f"https://www.keck.hawaii.edu/software/db_api/telSchedule.php?{req}"
r = requests.get(url)
return json.loads(r.text)
def get_SA(date=None, tel=1):
if date is None:
return None
req = f"cmd=getNightStaff&date={date}&type=sa&telnr={tel}"
try:
sa = querydb(req)[0]['Alias']
except:
sa= ''
return sa
def get_telsched(from_date=None, ndays=None):
if from_date is None:
now = dt.now()
from_date = now.strftime('%Y-%m-%d')
else:
assert dt.strptime(from_date, '%Y-%m-%d')
req = f"cmd=getSchedule&date={from_date}"
if ndays is not None:
req += f"&numdays={ndays}"
telsched = Table(data=querydb(req))
telsched.sort(keys=['Date', 'TelNr'])
return telsched
def add_SA_to_telsched(telsched):
sas = [get_SA(date=x['Date'], tel=x['TelNr']) for x in telsched]
telsched.add_column(Column(sas, name='SA'))
return telsched
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
@Gooey
def main():
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
parser = GooeyParser(
description="Generates ICS file of support nights from telescope DB.")
## add arguments
# parser.add_argument('-s', '--sa',
# type=str, dest="sa", default='jwalawender',
# help='SA name. Use enough to make a search unique for the "Alias".')
parser.add_argument('-s', '--sa',
type=str, dest="sa", help='SA alias.', widget='Dropdown',
choices=['jwalawender', 'arettura', 'calvarez', 'gdoppmann', 'jlyke',
'lrizzi', 'pgomez', 'randyc', 'syeh'])
parser.add_argument('--sem', '--semester',
type=str, dest="semester",
help="Semester (e.g. '18B')")
parser.add_argument('-b', '--begin',
type=str, dest="begin", #widget='DateChooser',
help="Start date in YYYY-mm-dd format.")
parser.add_argument('-e', '--end',
type=str, dest="end", #widget='DateChooser',
help="End date in YYYY-mm-dd format.")
args = parser.parse_args()
## Set start date
if args.begin is not None:
try:
from_dto = dt.strptime(args.begin, '%Y-%m-%d')
except:
from_dto = dt.now()
else:
from_dto = dt.now()
## Determine ndays from args.end
if args.end is not None:
try:
end_dto = dt.strptime(args.end, '%Y-%m-%d')
except:
pass
else:
delta = end_dto - from_dto
ndays = delta.days + 1
else:
ndays = None
## If semester is set, use that for start and end dates
if args.semester is not None:
try:
matched = re.match('S?(\d\d)([AB])', args.semester)
if matched is not None:
year = int(f"20{matched.group(1)}")
if matched.group(2) == 'A':
from_dto = dt(year, 2, 1)
end_dto = dt(year, 7, 31)
else:
from_dto = dt(year, 8, 1)
end_dto = dt(year+1, 1, 31)
delta = end_dto - from_dto
ndays = delta.days + 1
except:
pass
print('Retrieving telescope schedule')
from_date = from_dto.strftime('%Y-%m-%d')
telsched = get_telsched(from_date=from_date, ndays=ndays)
dates = sorted(set(telsched['Date']))
ndays = len(dates)
print(f"Retrieved schedule for {dates[0]} to {dates[-1]} ({ndays} days)")
print(f"Retrieving SA schedule")
telsched = add_SA_to_telsched(telsched)
print('Done')
##-------------------------------------------------------------------------
## Create Output iCal File
##-------------------------------------------------------------------------
ical_file = ICSFile('Nights.ics')
month_night_count = {}
month_nights = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
dual_support_count = 0
split_night_count = 0
sasched = telsched[telsched['SA'] == args.sa.lower()]
night_count = len(set(sasched['Date']))
print('Building calendar')
for date in sorted(set(sasched['Date'])):
progs = sasched[sasched['Date'] == date]
progsbytel = progs.group_by('TelNr')
if len(progsbytel.groups) > 1:
dual_support_count += 1
print(f" Creating calendar entry for {date}")
month = date[:7]
if month in month_night_count.keys():
month_night_count[month] += 1
else:
month_night_count[month] = 1
twilights = get_twilights(date)
# Loop over both telNr if needed
for idx in range(len(progsbytel.groups)):
supporttype = 'Support'
if len(progsbytel.groups[idx]) > 1:
supporttype = 'Split Night'
split_night_count += 1
instruments = list(progsbytel.groups[idx]['Instrument'])
if len(set(instruments)) == 1:
title = f"{instruments[0]} {supporttype}"
else:
title = f"{'/'.join(instruments)} {supporttype}"
calstart = (twilights['seto']-tdelta(0,10*60*60)).strftime('%Y%m%dT%H%M00')
calend = f"{date.replace('-', '')}T230000"
description = [title,
f"Sunset: {twilights['set']}",
f"12deg: {twilights['ent']}",
]
for entry in progsbytel.groups[idx]:
obslist = entry['Observers'].split(',')
loclist = entry['Location'].split(',')
assert len(obslist) == len(loclist)
observers = [f"{obs}({loclist[i]})" for i,obs in enumerate(obslist)]
description.append('')
description.append(f"Instrument: {entry['Instrument']} ({entry['Account']})")
description.append(f"PI: {entry['Principal']}")
description.append(f"Observers: {', '.join(observers)}")
description.append(f"Start Time: {entry['StartTime']}")
description.append('')
description.append(f"12deg: {twilights['mnt']}")
description.append(f"Sunrise: {twilights['rise']}")
ical_file.add_event(title, calstart, calend, description)
ical_file.write()
print(f"Found {night_count:d} / {ndays:d} nights ({100*night_count/ndays:.1f} %) where SA matches {args.sa:}")
print(f"Found {split_night_count:d} split nights")
for month in sorted(month_night_count.keys()):
nsupport = month_night_count[month]
nnights = month_nights[int(month[-2:])]
print(f" For {month}: {nsupport:2d} / {nnights:2d} nights ({100*nsupport/nnights:4.1f} %)")
# for entry in telsched:
# found = re.search(args.sa.lower(), entry['SA'].lower())
# if found is not None:
# night_count += 1
# month = entry['Date'][:7]
# if month in month_night_count.keys():
# month_night_count[month] += 1
# else:
# month_night_count[month] = 1
# supporttype = determine_type(entry, telsched, args)
# if supporttype.find('Split Night') > 0:
# split_night_count += 1
# title = '{} {} ({})'.format(entry['Instrument'], supporttype, entry['Location'])
# twilight = parse_twilight(entry)
# calend = '{}T{}'.format(entry['Date'].replace('-', ''), '230000')
# description = [title,
# f"Sunset @ {twilight['sunsetstr']}",
# f"12 deg Twilight @ {twilight['dusk_12degstr']}",
# f"12 deg Twilight @ {twilight['dawn_12degstr']}",
# f"Sunrise @ {twilight['sunrisestr']}",
# f"PI: {entry['Principal']}",
# f"Observers: {entry['Observers']}",
# f"Location: {entry['Location']}",
# f"Account: {entry['InstrAcc']}",
# ]
# print(f"{entry['Date']:10s} K{entry['TelNr']:d} {title:s}")
# ical_file.add_event(title, twilight['sunset'].strftime('%Y%m%dT%H%M%S'),
# calend, description)
# ical_file.write()
# print(f"Found {night_count:d} / {ndays:d} nights ({100*night_count/ndays:.1f} %) where SA matches {args.sa:}")
# print(f"Found {split_night_count:d} split nights")
#
# for month in month_night_count:
# nsupport = month_night_count[month]
# nnights = month_nights[int(month[-2:])]
# print(f" For {month}: {nsupport:2d} / {nnights:2d} nights ({100*nsupport/nnights:4.1f} %)")
if __name__ == '__main__':
main()
Add Maunakea horizon
#!/usr/env/python
## Import General Tools
import sys
import os
from gooey import Gooey, GooeyParser
import requests
import json
import re
from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy import units as u
from astropy.table import Table, Column
from astropy.time import Time
from astropy.coordinates import EarthLocation
from astroplan import Observer
##-------------------------------------------------------------------------
## ICS File Object
##-------------------------------------------------------------------------
class ICSFile(object):
'''
Class to represent an ICS calendar file.
'''
def __init__(self, filename):
self.file = filename
self.lines = ['BEGIN:VCALENDAR\n',
'PRODID:-//hacksw/handcal//NONSGML v1.0//EN\n',
'\n']
def add_event(self, title, starttime, endtime, description, verbose=False):
assert type(title) is str
assert type(starttime) in [dt, str]
assert type(endtime) in [dt, str]
assert type(description) in [list, str]
now = dt.utcnow()
try:
starttime = starttime.strftime('%Y%m%dT%H%M%S')
except:
pass
try:
endtime = endtime.strftime('%Y%m%dT%H%M%S')
except:
pass
if verbose:
print('{} {}'.format(starttime[0:8], title))
if type(description) is list:
description = '\\n'.join(description)
new_lines = ['BEGIN:VEVENT\n',
'UID:{}@mycalendar.com\n'.format(now.strftime('%Y%m%dT%H%M%S.%fZ')),
'DTSTAMP:{}\n'.format(now.strftime('%Y%m%dT%H%M%SZ')),
'DTSTART;TZID=Pacific/Honolulu:{}\n'.format(starttime),
'DTEND;TZID=Pacific/Honolulu:{}\n'.format(endtime),
'SUMMARY:{}\n'.format(title),
'DESCRIPTION: {}\n'.format(description),
'END:VEVENT\n',
'\n',
]
self.lines.extend(new_lines)
def write(self):
self.lines.append('END:VCALENDAR\n')
if os.path.exists(self.file): os.remove(self.file)
with open(self.file, 'w') as FO:
for line in self.lines:
FO.write(line)
##-------------------------------------------------------------------------
## Get Twilight Info for Date
##-------------------------------------------------------------------------
def get_twilights(date):
""" Determine sunrise and sunset times """
location = EarthLocation(
lat=19+49/60+33.40757/60**2,
lon=-(155+28/60+28.98665/60**2),
height=4159.58,
)
obs = Observer(location=location, name='Keck', timezone='US/Hawaii')
date = Time(dt.strptime(f'{date} 14:00:00', '%Y-%m-%d %H:%M:%S'))
h = 4.2*u.km
R = (1.0*u.earthRad).to(u.km)
d = np.sqrt(h*(2*R+h))
phi = (np.arccos((d/R).value)*u.radian).to(u.deg)
MK = phi - 90*u.deg
t = {}
t['seto'] = obs.sun_set_time(date, which='next', horizon=MK).datetime
t['ento'] = obs.twilight_evening_nautical(date, which='next').datetime
t['eato'] = obs.twilight_evening_astronomical(date, which='next').datetime
t['mato'] = obs.twilight_morning_astronomical(date, which='next').datetime
t['mnto'] = obs.twilight_morning_nautical(date, which='next').datetime
t['riseo'] = obs.sun_rise_time(date, which='next').datetime
t['set'] = t['seto'].strftime('%H:%M UT')
t['ent'] = t['ento'].strftime('%H:%M UT')
t['eat'] = t['eato'].strftime('%H:%M UT')
t['mat'] = t['mato'].strftime('%H:%M UT')
t['mnt'] = t['mnto'].strftime('%H:%M UT')
t['rise'] = t['riseo'].strftime('%H:%M UT')
return t
##-------------------------------------------------------------------------
## Get Telescope Schedule
##-------------------------------------------------------------------------
def querydb(req):
url = f"https://www.keck.hawaii.edu/software/db_api/telSchedule.php?{req}"
r = requests.get(url)
return json.loads(r.text)
def get_SA(date=None, tel=1):
if date is None:
return None
req = f"cmd=getNightStaff&date={date}&type=sa&telnr={tel}"
try:
sa = querydb(req)[0]['Alias']
except:
sa= ''
return sa
def get_telsched(from_date=None, ndays=None):
if from_date is None:
now = dt.now()
from_date = now.strftime('%Y-%m-%d')
else:
assert dt.strptime(from_date, '%Y-%m-%d')
req = f"cmd=getSchedule&date={from_date}"
if ndays is not None:
req += f"&numdays={ndays}"
telsched = Table(data=querydb(req))
telsched.sort(keys=['Date', 'TelNr'])
return telsched
def add_SA_to_telsched(telsched):
sas = [get_SA(date=x['Date'], tel=x['TelNr']) for x in telsched]
telsched.add_column(Column(sas, name='SA'))
return telsched
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
@Gooey
def main():
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
parser = GooeyParser(
description="Generates ICS file of support nights from telescope DB.")
## add arguments
# parser.add_argument('-s', '--sa',
# type=str, dest="sa", default='jwalawender',
# help='SA name. Use enough to make a search unique for the "Alias".')
parser.add_argument('-s', '--sa',
type=str, dest="sa", help='SA alias.', widget='Dropdown',
choices=['jwalawender', 'arettura', 'calvarez', 'gdoppmann', 'jlyke',
'lrizzi', 'pgomez', 'randyc', 'syeh'])
parser.add_argument('--sem', '--semester',
type=str, dest="semester",
help="Semester (e.g. '18B')")
parser.add_argument('-b', '--begin',
type=str, dest="begin", #widget='DateChooser',
help="Start date in YYYY-mm-dd format.")
parser.add_argument('-e', '--end',
type=str, dest="end", #widget='DateChooser',
help="End date in YYYY-mm-dd format.")
args = parser.parse_args()
## Set start date
if args.begin is not None:
try:
from_dto = dt.strptime(args.begin, '%Y-%m-%d')
except:
from_dto = dt.now()
else:
from_dto = dt.now()
## Determine ndays from args.end
if args.end is not None:
try:
end_dto = dt.strptime(args.end, '%Y-%m-%d')
except:
pass
else:
delta = end_dto - from_dto
ndays = delta.days + 1
else:
ndays = None
## If semester is set, use that for start and end dates
if args.semester is not None:
try:
matched = re.match('S?(\d\d)([AB])', args.semester)
if matched is not None:
year = int(f"20{matched.group(1)}")
if matched.group(2) == 'A':
from_dto = dt(year, 2, 1)
end_dto = dt(year, 7, 31)
else:
from_dto = dt(year, 8, 1)
end_dto = dt(year+1, 1, 31)
delta = end_dto - from_dto
ndays = delta.days + 1
except:
pass
print('Retrieving telescope schedule')
from_date = from_dto.strftime('%Y-%m-%d')
telsched = get_telsched(from_date=from_date, ndays=ndays)
dates = sorted(set(telsched['Date']))
ndays = len(dates)
print(f"Retrieved schedule for {dates[0]} to {dates[-1]} ({ndays} days)")
print(f"Retrieving SA schedule")
telsched = add_SA_to_telsched(telsched)
print('Done')
##-------------------------------------------------------------------------
## Create Output iCal File
##-------------------------------------------------------------------------
ical_file = ICSFile('Nights.ics')
month_night_count = {}
month_nights = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
dual_support_count = 0
split_night_count = 0
sasched = telsched[telsched['SA'] == args.sa.lower()]
night_count = len(set(sasched['Date']))
print('Building calendar')
for date in sorted(set(sasched['Date'])):
progs = sasched[sasched['Date'] == date]
progsbytel = progs.group_by('TelNr')
if len(progsbytel.groups) > 1:
dual_support_count += 1
print(f" Creating calendar entry for {date}")
month = date[:7]
if month in month_night_count.keys():
month_night_count[month] += 1
else:
month_night_count[month] = 1
twilights = get_twilights(date)
# Loop over both telNr if needed
for idx in range(len(progsbytel.groups)):
supporttype = 'Support'
if len(progsbytel.groups[idx]) > 1:
supporttype = 'Split Night'
split_night_count += 1
instruments = list(progsbytel.groups[idx]['Instrument'])
if len(set(instruments)) == 1:
title = f"{instruments[0]} {supporttype}"
else:
title = f"{'/'.join(instruments)} {supporttype}"
calstart = (twilights['seto']-tdelta(0,10*60*60)).strftime('%Y%m%dT%H%M00')
calend = f"{date.replace('-', '')}T230000"
description = [title,
f"Sunset: {twilights['set']}",
f"12deg: {twilights['ent']}",
]
for entry in progsbytel.groups[idx]:
obslist = entry['Observers'].split(',')
loclist = entry['Location'].split(',')
assert len(obslist) == len(loclist)
observers = [f"{obs}({loclist[i]})" for i,obs in enumerate(obslist)]
description.append('')
description.append(f"Instrument: {entry['Instrument']} ({entry['Account']})")
description.append(f"PI: {entry['Principal']}")
description.append(f"Observers: {', '.join(observers)}")
description.append(f"Start Time: {entry['StartTime']}")
description.append('')
description.append(f"12deg: {twilights['mnt']}")
description.append(f"Sunrise: {twilights['rise']}")
ical_file.add_event(title, calstart, calend, description)
ical_file.write()
print(f"Found {night_count:d} / {ndays:d} nights ({100*night_count/ndays:.1f} %) where SA matches {args.sa:}")
print(f"Found {split_night_count:d} split nights")
for month in sorted(month_night_count.keys()):
nsupport = month_night_count[month]
nnights = month_nights[int(month[-2:])]
print(f" For {month}: {nsupport:2d} / {nnights:2d} nights ({100*nsupport/nnights:4.1f} %)")
# for entry in telsched:
# found = re.search(args.sa.lower(), entry['SA'].lower())
# if found is not None:
# night_count += 1
# month = entry['Date'][:7]
# if month in month_night_count.keys():
# month_night_count[month] += 1
# else:
# month_night_count[month] = 1
# supporttype = determine_type(entry, telsched, args)
# if supporttype.find('Split Night') > 0:
# split_night_count += 1
# title = '{} {} ({})'.format(entry['Instrument'], supporttype, entry['Location'])
# twilight = parse_twilight(entry)
# calend = '{}T{}'.format(entry['Date'].replace('-', ''), '230000')
# description = [title,
# f"Sunset @ {twilight['sunsetstr']}",
# f"12 deg Twilight @ {twilight['dusk_12degstr']}",
# f"12 deg Twilight @ {twilight['dawn_12degstr']}",
# f"Sunrise @ {twilight['sunrisestr']}",
# f"PI: {entry['Principal']}",
# f"Observers: {entry['Observers']}",
# f"Location: {entry['Location']}",
# f"Account: {entry['InstrAcc']}",
# ]
# print(f"{entry['Date']:10s} K{entry['TelNr']:d} {title:s}")
# ical_file.add_event(title, twilight['sunset'].strftime('%Y%m%dT%H%M%S'),
# calend, description)
# ical_file.write()
# print(f"Found {night_count:d} / {ndays:d} nights ({100*night_count/ndays:.1f} %) where SA matches {args.sa:}")
# print(f"Found {split_night_count:d} split nights")
#
# for month in month_night_count:
# nsupport = month_night_count[month]
# nnights = month_nights[int(month[-2:])]
# print(f" For {month}: {nsupport:2d} / {nnights:2d} nights ({100*nsupport/nnights:4.1f} %)")
if __name__ == '__main__':
main()
|
"""How we keep track of the state of the game."""
import random
from operator import attrgetter
from .data import Data
from .model import Room, Message, Dwarf, Pirate
YESNO_ANSWERS = {'y': True, 'yes': True, 'n': False, 'no': False}
class Game(Data):
look_complaints = 3 # how many times to "SORRY, BUT I AM NOT ALLOWED..."
full_description_period = 5 # how often we use a room's full description
full_wests = 0 # how many times they have typed "west" instead of "w"
dwarf_stage = 0 # how active the dwarves are
dwarves_killed = 0
gave_up = False
treasures_not_found = 0 # how many treasures have not yet been seen
impossible_treasures = 0 # how many treasures can never be retrieved
lamp_turns = 330
warned_about_dim_lamp = False
bonus = 0 # how they exited the final bonus round
deaths = 0 # how many times the player has died
max_deaths = 4 # how many times the player can die
turns = 0
def __init__(self, writer, end_game, seed=None):
Data.__init__(self)
self.writer = writer
self.end_game = end_game # function to call to end us
self.yesno_callback = False
self.yesno_casual = False # whether to insist they answer
self.is_closing = False # is the cave closing?
self.panic = False # they tried to leave during closing?
self.is_closed = False # is the cave closed?
self.could_fall_in_pit = False # could the player fall into a pit?
self.random_instance = random.Random()
if seed is not None:
self.random_instance.seed(seed)
self.random = self.random_instance.random
self.randint = self.random_instance.randint
self.choice = self.random_instance.choice
def write(self, s):
"""Output the Unicode representation of `s`."""
s = str(s).upper()
if s:
self.writer(s)
self.writer('\n')
def write_message(self, n):
self.write(self.messages[n])
def yesno(self, s, yesno_callback, casual=False):
"""Ask a question and prepare to receive a yes-or-no answer."""
self.write(s)
self.yesno_callback = yesno_callback
self.yesno_casual = casual
# Properties of the cave.
@property
def is_dark(self):
lamp = self.objects['lamp']
if self.is_here(lamp) and lamp.prop:
return False
return self.loc.is_dark
@property
def inventory(self):
return [ obj for obj in self.object_list if obj.is_toting ]
@property
def treasures(self):
return [ obj for obj in self.object_list if obj.n >= 50 ]
@property
def objects_here(self):
return self.objects_at(self.loc)
def objects_at(self, room):
return [ obj for obj in self.object_list if room in obj.rooms ]
def is_here(self, obj):
return obj.is_toting or (self.loc in obj.rooms)
# Game startup
def start(self):
"""Start the game."""
self.chest_room = self.rooms[114]
self.bottle.contents = self.water
self.yesno(self.messages[65], self.start2) # want instructions?
def start2(self, yes):
"""Print out instructions if the user wants them."""
if yes:
self.write_message(1)
self.hints[3].used = True
self.lamp_turns = 1000
self.oldloc2 = self.oldloc = self.loc = self.rooms[1]
self.dwarves = [ Dwarf(self.rooms[n]) for n in (19, 27, 33, 44, 64) ]
self.pirate = Pirate(self.chest_room)
treasures = self.treasures
self.treasures_not_found = len(treasures)
for treasure in treasures:
treasure.prop = -1
self.describe_location()
# Routines that handle the aftermath of "big" actions like movement.
# Although these are called at the end of each `do_command()` cycle,
# we place here at the top of `game.py` to mirror the order in the
# advent.for file.
def move_to(self, newloc=None): #2
loc = self.loc
if newloc is None:
newloc = loc
if self.is_closing and newloc.is_aboveground:
self.write_message(130)
newloc = loc # cancel move and put him back underground
if not self.panic:
self.clock2 = 15
self.panic = True
must_allow_move = ((newloc is loc) or (loc.is_forced)
or (loc.is_forbidden_to_pirate))
dwarf_blocking_the_way = any(
dwarf.old_room is newloc and dwarf.has_seen_adventurer
for dwarf in self.dwarves
)
if not must_allow_move and dwarf_blocking_the_way:
newloc = loc # cancel move they were going to make
self.write_message(2) # dwarf is blocking the way
self.loc = loc = newloc #74
# IF LOC.EQ.0 ?
is_dwarf_area = not (loc.is_forced or loc.is_forbidden_to_pirate)
if is_dwarf_area and self.dwarf_stage > 0:
self.move_dwarves()
else:
if is_dwarf_area and loc.is_after_hall_of_mists:
self.dwarf_stage = 1
self.describe_location()
def move_dwarves(self):
#6000
if self.dwarf_stage == 1:
# 5% chance per turn of meeting first dwarf
if self.loc.is_before_hall_of_mists or self.random() < .95:
self.describe_location()
return
self.dwarf_stage = 2
for i in range(2): # randomly remove 0, 1, or 2 dwarves
if self.random() < .5:
del self.dwarves[self.randint(0, len(self.dwarves) - 1)]
for dwarf in self.dwarves:
if dwarf.room is self.loc: # move dwarf away from our loc
dwarf.start_at(self.rooms[18])
self.write_message(3) # dwarf throws axe and curses
self.axe.drop(self.loc)
self.describe_location()
return
#6010
dwarf_count = dwarf_attacks = knife_wounds = 0
for dwarf in self.dwarves + [ self.pirate ]:
locations = { move.action for move in dwarf.room.travel_table
if dwarf.can_move(move)
and move.action is not dwarf.old_room
and move.action is not dwarf.room }
# Without stabilizing the order with a sort, the room chosen
# would depend on how the Room addresses in memory happen to
# order the rooms in the set() - and make it impossible to
# test the game by setting the random number generator seed
# and then playing through the game.
locations = sorted(locations, key=attrgetter('n'))
if locations:
new_room = self.choice(locations)
else:
new_room = dwarf.old_room
dwarf.old_room, dwarf.room = dwarf.room, new_room
if self.loc in (dwarf.room, dwarf.old_room):
dwarf.has_seen_adventurer = True
elif self.loc.is_before_hall_of_mists:
dwarf.has_seen_adventurer = False
if not dwarf.has_seen_adventurer:
continue
dwarf.room = self.loc
if dwarf.is_dwarf:
dwarf_count += 1
# A dwarf cannot walk and attack at the same time.
if dwarf.room is dwarf.old_room:
dwarf_attacks += 1
#knfloc here
if self.random() < .095 * (self.dwarf_stage - 2):
knife_wounds += 1
else: # the pirate
pirate = dwarf
if self.loc is self.chest_room or self.chest.prop >= 0:
continue # decide that the pirate is not really here
treasures = [ t for t in self.treasures if t.is_toting ]
if (self.platinum in treasures and self.loc.n in (100, 101)):
treasures.remove(self.pyramid)
if not treasures:
h = any( t for t in self.treasures if self.is_here(t) )
one_treasure_left = (self.treasures_not_found ==
self.impossible_treasures + 1)
shiver_me_timbers = (
one_treasure_left and not h and self.chest.room.n == 0
and self.is_here(self.lamp) and self.lamp.prop == 1
)
if not shiver_me_timbers:
if (pirate.old_room != pirate.room
and self.random() < .2):
self.write_message(127)
continue # proceed to the next character? aren't any!
self.write_message(186)
self.chest.drop(self.chest_room)
self.message.drop(self.rooms[140])
else:
#6022 I'll just take all this booty
self.write_message(128)
if not self.message.rooms:
self.chest.drop(self.chest_room)
self.message.drop(self.rooms[140])
for treasure in treasures:
treasure.drop(self.chest_room)
#6024
pirate.old_room = pirate.room = self.chest_room
pirate.has_seen_adventurer = False # free to move
# Report what has happened.
if dwarf_count == 1:
self.write_message(4)
elif dwarf_count:
self.write('There are %d threatening little dwarves in the'
' room with you.\n' % dwarf_count)
if dwarf_attacks and self.dwarf_stage == 2:
self.dwarf_stage = 3
if dwarf_attacks == 1:
self.write_message(5)
k = 52
elif dwarf_attacks:
self.write('%d of them throw knives at you!\n' % dwarf_attacks)
k = 6
if not dwarf_attacks:
pass
elif not knife_wounds:
self.write_message(k)
else:
if knife_wounds == 1:
self.write_message(k + 1)
else:
self.write('%d of them get you!\n' % knife_wounds)
self.oldloc2 = self.loc
self.die()
return
self.describe_location()
def describe_location(self): #2000
# check for whether they already have died? or do as sep func?
loc = self.loc
could_fall = self.is_dark and self.could_fall_in_pit
if could_fall and not loc.is_forced and self.random() < .35:
self.die_here()
return
if self.bear.is_toting:
self.write_message(141)
if self.is_dark and not loc.is_forced:
self.write_message(16)
else:
do_short = loc.times_described % self.full_description_period
loc.times_described += 1
if do_short and loc.short_description:
self.write(loc.short_description)
else:
self.write(loc.long_description)
if loc.is_forced:
self.do_motion(self.vocabulary[2]) # dummy motion verb
return
if loc.n == 33 and self.random() < .25 and not self.is_closing:
self.write_message(8)
if not self.is_dark:
for obj in self.objects_here:
if obj is self.steps and self.gold.is_toting:
continue
if obj.prop < 0: # finding a treasure the first time
if self.is_closed:
continue
obj.prop = 1 if obj in (self.rug, self.chain) else 0
self.treasures_not_found -= 1
if (self.treasures_not_found > 0 and
self.treasures_not_found == self.impossible_treasures):
self.lamp_turns = min(35, self.lamp_turns)
if obj is self.steps and self.loc is self.steps.rooms[1]:
prop = 1
else:
prop = obj.prop
self.write(obj.messages[prop])
self.finish_turn()
def say_okay_and_finish(self): #2009
self.write_message(54)
self.finish_turn()
#2009 sets SPK="OK" then...
#2011 speaks SPK then...
#2012 blanks VERB and OBJ and calls:
def finish_turn(self, obj=None): #2600
for hint in self.hints.values():
if hint.turns_needed == 9999 or hint.used:
continue
if self.loc in hint.rooms:
hint.turn_counter += 1
if hint.turn_counter >= hint.turns_needed:
if hint.n != 5: # hint 5 counter does not get reset
hint.turn_counter = 0
if self.should_offer_hint(hint, obj):
hint.turn_counter = 0
def callback(yes):
if yes:
self.write(hint.message)
else:
self.write_message(54)
self.yesno(hint.question, callback)
return
else:
hint.turn_counter = 0
if self.is_closed:
if self.oyste.prop < 0: # and toting it
self.write(self.oyste.messages[1])
# put closing-time "prop" special case here
self.could_fall_in_pit = self.is_dark #2605
# remove knife from cave if they moved away from it
# Advance random number generator so each input affects future.
self.random()
# The central do_command() method, that should be called over and
# over again with words supplied by the user.
def do_command(self, words): #2608
"""Parse and act upon the command in the list of strings `words`."""
if self.yesno_callback is not None:
answer = YESNO_ANSWERS.get(words[0], None)
if answer is None:
if self.yesno_casual:
self.yesno_callback = None
else:
self.write('Please answer the question.')
return
else:
callback = self.yesno_callback
self.yesno_callback = None
callback(answer)
return
self.turns += 1
if self.lamp.prop:
self.lamp_turns -= 1
if self.lamp_turns <= 30 and self.is_here(self.battery) \
and self.battery.prop == 0 and self.is_here(self.lamp):
self.write_message(188)
self.battery.prop = 1
if self.battery.is_toting:
self.battery.drop(self.loc)
self.lamp_turns += 2500
self.warned_about_dim_lamp = False
if self.lamp_turns == 0:
self.lamp_turns = -1
self.lamp.prop = 0
if self.is_here(self.lamp):
self.write_message(184)
elif self.lamp_turns < 0 and self.loc.is_aboveground:
self.write_message(185)
self.gave_up = True
self.score_and_exit()
return
elif self.lamp_turns <= 30 and not self.warned_about_dim_lamp \
and self.is_here(self.lamp):
self.warned_about_dim_lamp = True
if self.battery.prop == 1:
self.write_message(189)
elif not self.battery.rooms:
self.write_message(183)
else:
self.write_message(187)
self.dispatch_command(words)
def dispatch_command(self, words):
if words[0] not in self.vocabulary:
n = self.randint(1, 5)
if n == 1:
self.write_message(61)
elif n == 2:
self.write_message(13)
else:
self.write_message(60)
self.finish_turn()
return
word = self.vocabulary[words[0]]
if word.kind == 'motion':
if words[0] == 'west':
self.full_wests += 1
if self.full_wests == 10:
self.write_message(17)
self.do_motion(word)
elif word.kind == 'verb':
prefix = 't_' if len(words) == 2 else 'i_' # (in)transitive
if len(words) == 2:
word2 = self.vocabulary[words[1]]
obj = self.objects[word2.n % 1000]
#5000
if word == 'say':
args = (word, word2)
elif not self.is_here(obj):
self.write('I see no %s here.\n' % obj.names[0])
self.finish_turn(obj)
return
else:
args = (word, obj)
else:
args = (word,)
getattr(self, prefix + word.synonyms[0].text)(*args)
# Motion.
def do_motion(self, word): #8
if word == 'null': #2
self.move_to()
return
elif word == 'back': #20
dest = self.oldloc2 if self.oldloc.is_forced else self.oldloc
self.oldloc2, self.oldloc = self.oldloc, self.loc
if dest is self.loc:
self.write_message(91)
self.move_to()
return
alt = None
for move in self.loc.travel_table:
if move.action is dest:
word = move.verbs[0] # arbitrary verb going to `dest`
break # Fall through, to attempt the move.
elif (isinstance(move.action, Room)
and move.action.is_forced
and move.action.travel_table[0].action is dest):
alt = move.verbs[0]
else: # no direct route is available
if alt is not None:
word = alt # take a forced move if it's the only option
else:
self.write_message(140)
self.move_to()
return
elif word == 'look': #30
if self.look_complaints > 0:
self.write_message(15)
self.look_complaints -= 1
self.loc.times_described = 0
self.move_to()
self.could_fall_in_pit = False
return
elif word == 'cave': #40
self.write(self.messages[57 if self.loc.is_aboveground else 58])
self.move_to()
return
self.oldloc2, self.oldloc = self.oldloc, self.loc
for move in self.loc.travel_table:
if move.is_forced or word in move.verbs:
c = move.condition
if c[0] is None or c[0] == 'not_dwarf':
allowed = True
elif c[0] == '%':
allowed = 100 * self.random() < c[1]
elif c[0] == 'carrying':
allowed = self.objects[c[1]].is_toting
elif c[0] == 'carrying_or_in_room_with':
allowed = self.is_here(self.objects[c[1]])
elif c[0] == 'prop!=':
allowed = self.objects[c[1]].prop != c[2]
if not allowed:
continue
if isinstance(move.action, Room):
self.move_to(move.action)
return
elif isinstance(move.action, Message):
self.write(move.action)
self.move_to()
return
elif move.action == 301: #30100
inv = self.inventory
if len(inv) != 0 and inv != [ self.emerald ]:
self.write_message(117)
self.move_to()
elif self.loc.n == 100:
self.move_to(self.rooms[99])
else:
self.move_to(self.rooms[100])
return
elif move.action == 302: #30200
self.emerald.drop(self.loc)
self.do_motion(word)
return
else:
raise NotImplementedError(move.action)
#50
n = word.n
if 29 <= n <= 30 or 43 <= n <= 50:
self.write_message(9)
elif n in (7, 36, 37):
self.write_message(10)
elif n in (11, 19):
self.write_message(11)
elif word == 'find' or word == 'invent': # ? this might be wrong
self.write_message(59)
elif n in (62, 65):
self.write_message(42)
elif n == 17:
self.write_message(80)
else:
self.write_message(12)
self.move_to()
return
# Death and reincarnation.
def die_here(self): #90
self.write_message(23)
self.oldloc2 = self.loc
self.die()
def die(self): #99
self.deaths += 1
if self.is_closing:
self.write_message(131)
self.score_and_exit()
return
def callback(yes):
if yes:
self.write_message(80 + self.deaths * 2)
if self.deaths < self.max_deaths:
# do water and oil thing
if self.lamp.is_toting:
self.lamp.prop = 0
for obj in self.inventory:
if obj is self.lamp:
obj.drop(self.rooms[1])
else:
obj.drop(self.oldloc2)
self.loc = self.rooms[3]
self.describe_location()
return
else:
self.write_message(54)
self.score_and_exit()
self.yesno(self.messages[79 + self.deaths * 2], callback)
# Verbs.
def print_do_what(self, verb, *args): #8000
self.write('%s What?\n' % verb.text)
self.finish_turn()
i_drop = print_do_what
i_say = print_do_what
i_wave = print_do_what
i_calm = print_do_what
i_rub = print_do_what
i_toss = print_do_what
i_find = print_do_what
i_feed = print_do_what
i_break = print_do_what
i_wake = print_do_what
def i_carry(self, verb): #8010
is_dwarf_here = any( dwarf.room == self.loc for dwarf in self.dwarves )
if len(self.loc.objects) != 1 or is_dwarf_here:
self.print_do_what(verb)
obj = self.loc.objects[0]
self.t_carry(verb, obj)
def t_carry(self, verb, obj): #9010
if obj.is_toting:
self.write_message(verb.default_message or 54)
self.finish_turn()
return
if obj.is_fixed or len(obj.rooms) > 1:
if obj is self.plant and obj.prop <= 0:
self.write_message(115)
elif obj is self.bear and obj.prop == 1:
self.write_message(169)
elif obj is self.chain and self.chain.prop != 0:
self.write_message(170)
else:
self.write_message(25)
self.finish_turn()
return
if obj is self.water or obj is self.oil:
if self.is_here(self.bottle) and self.bottle.contents is obj:
# They want to carry the filled bottle.
obj = self.bottle
else:
# They must mean they want to fill the bottle.
if self.bottle.is_toting and self.bottle.contents is None:
self.t_fill(self.bottle) # hand off control to "fill"
return
elif self.bottle.contents is not None:
self.write_message(105)
elif not self.bottle.is_toting:
self.write_message(104)
else:
self.write_message(verb.default_message)
self.finish_turn()
return
if len(self.inventory) >= 7:
self.write_message(92)
self.finish_turn()
return
if obj is self.bird and obj.prop == 0:
if self.rod.is_toting:
self.write_message(26)
self.finish_turn()
return
if not self.cage.is_toting:
self.write_message(27)
self.finish_turn()
return
self.bird.prop = 1
if (obj is self.bird or obj is self.cage) and self.bird.prop != 0:
self.bird.carry()
self.cage.carry()
else:
obj.carry()
if obj is self.bottle and self.bottle.contents is not None:
self.bottle.contents.carry()
self.say_okay_and_finish()
def t_drop(self, verb, obj): #9020
if obj is self.rod and not self.rod.is_toting and self.rod2.is_toting:
obj = self.rod2
if not obj.is_toting:
self.write_message(verb.default_message)
self.finish_turn()
return
bird, snake, dragon, bear, troll = self.bird, self.snake, self.dragon, \
self.bear, self.troll
if obj is bird and self.is_here(snake):
self.write_message(30)
if self.is_closed:
self.write_message(136)
self.score_and_exit()
snake.prop = 1
snake.destroy()
bird.prop = 0
bird.drop(self.loc)
elif obj is self.coins and self.is_here(self.machine):
obj.destroy()
self.battery.drop(self.loc)
self.write(self.battery.messages[0])
elif obj is bird and self.is_here(dragon) and dragon.prop == 0:
self.write_message(154)
bird.destroy()
bird.prop = 0
if snake.rooms:
self.impossible_treasures += 1
elif obj is bear and troll.is_at(self.loc):
self.write_message(163)
troll.destroy() # and something about fixed?
# something else about fixed and troll2
# juggle?
troll.prop = 2
bear.drop(self.loc)
elif obj is self.vase and self.loc is not self.rooms[96]:
if self.pillow.is_at(self.loc):
self.vase.prop = 0
else:
self.vase.prop = 2
self.vase.is_fixed = True
self.write(self.vase.messages[self.vase.prop + 1])
obj.drop(self.loc)
else:
if obj is self.cage and self.bird.prop != 0:
bird.drop(self.loc)
elif obj is self.bird:
obj.prop = 0
self.write_message(54)
obj.drop(self.loc)
self.finish_turn()
return
def t_say(self, verb, word): #9030
if word.n in (62, 65, 71, 2025):
self.dispatch_command([ word ])
else:
self.write('Okay, "{}".'.format(word.text))
self.finish_turn()
def t_unlock(self, verb, obj): #9040
if obj is self.clam or obj is self.oyster:
oy = 1 if (obj is self.oyster) else 0
if verb == 'lock':
self.write_message(61)
elif not self.trident.is_toting:
self.write_message(122 + oy)
elif obj.is_toting:
self.write_message(120 + oy)
elif obj is self.oyster:
self.write_message(125)
else:
self.write_message(124)
self.clam.destroy()
self.oyster.drop(self.loc)
self.pearl.drop(self.rooms[105])
elif obj is self.door:
if obj.prop == 1:
self.write_message(54)
else:
self.write_message(111)
elif obj is self.cage:
self.write_message(32)
elif obj is self.keys:
self.write_message(55)
elif obj is self.grate or obj is self.chain:
# if keys are not here, write message 31 and give up
if obj is self.chain:
raise NotImplementedError() #9048
else:
if self.is_closing:
raise NotImplementedError() # set panic clock etc
else:
oldprop = obj.prop
obj.prop = 0 if verb == 'lock' else 1
self.write_message(34 + oldprop + 2 * obj.prop)
else:
self.write(verb.names)
self.write(obj.names)
self.write(verb.default_message)
self.finish_turn()
def t_light(self, verb, obj): #9070
# if not here lamp: 2011
# if lamp out: 2011
self.objects['lamp'].prop = 1
self.write_message(39)
if self.loc.is_dark:
self.describe_location()
else:
self.finish_turn()
def t_wave(self, verb, obj): #9090
fissure = self.fissure
if (obj is self.rod and obj.is_toting and self.is_here(fissure)
and not self.is_closing):
fissure.prop = 0 if fissure.prop else 1
self.write(fissure.messages[2 - fissure.prop])
else:
if obj.is_toting or (obj is self.rod and self.rod2.is_toting):
self.write(verb.default_message)
else:
self.write_message(29)
self.finish_turn()
def t_attack(self, verb, obj): #9120
if obj is None:
raise NotImplementedError()
if obj is self.bird:
if self.is_closed:
self.write_message(137)
else:
obj.destroy()
obj.prop = 0
if self.snake.rooms:
self.impossible_treasures += 1
self.write_message(45)
elif obj is self.clam or obj is self.oyster:
self.write_message(150)
elif obj is self.snake:
self.write_message(46)
elif obj is self.dwarf:
if self.is_closed:
die
return
self.write_message(49)
elif obj is self.dragon:
if self.dragon.prop != 0:
self.write_message(167)
else:
def callback(yes):
self.write(obj.messages[1])
obj.prop = 2
obj.is_fixed = True
oldroom1 = obj.rooms[0]
oldroom2 = obj.rooms[1]
newroom = self.rooms[ (oldroom1.n + oldroom2.n) // 2 ]
obj.drop(newroom)
self.rug.prop = 0
self.rug.is_fixed = False
self.rug.drop(newroom)
for oldroom in (oldroom1, oldroom2):
for o in self.objects_at(oldroom):
o.drop(newroom)
self.move_to(newroom)
self.yesno(self.messages[49], callback, casual=True)
return
elif obj is self.troll:
self.write_message(157)
elif obj is self.bear:
self.write_message(165 + (self.bear.prop + 1) // 2)
else:
self.write_message(44)
self.finish_turn()
def t_throw(self, verb, obj): #9170
if obj is self.rod and not self.rod.is_toting and self.rod2.is_toting:
obj = self.rod2
if not obj.is_toting:
self.write(verb.default_message)
self.finish_turn()
return
if obj.is_treasure and self.is_here(self.troll):
# Pay the troll toll
self.write_message(159)
obj.destroy()
self.troll2.rooms = self.troll.rooms
self.troll.destroy()
self.finish_turn()
return
if obj is self.food and self.is_here(self.bear):
self.t_feed(self.bear)
return
if obj is not self.axe:
self.t_drop(verb, obj)
return
dwarves_here = [ d for d in self.dwarves if d.room is self.loc ]
if dwarves_here:
if self.randint(0, 2): # 1/3rd chance of killing a dwarf
self.write_message(48) # Miss
else:
self.dwarves.remove(dwarves_here[0])
self.dwarves_killed += 1
if self.dwarves_killed == 1:
self.write_message(149)
else:
self.write_message(47)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.dragon) and self.dragon.prop == 0:
self.write_message(152)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.troll):
self.write_message(156)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.bear) and self.bear.prop == 0:
self.write_message(164)
self.axe.drop(self.loc)
self.axe.is_fixed = True
self.axe.prop = 1
self.finish_turn()
return
self.t_attack(verb, None)
def i_inventory(self, verb): #8200
first = True
objs = [ obj for obj in self.inventory if obj is not self.bear ]
for obj in objs:
if first:
self.write_message(99)
first = False
self.write(obj.inventory_message)
if self.bear.is_toting:
self.write_message(141)
if not objs:
self.write_message(98)
self.finish_turn()
def i_score(self, verb): #8240
score, max_score = self.compute_score(for_score_command=True)
self.write('If you were to quit now, you would score %d'
' out of a possible %d.\n' % (score, max_score))
def callback(yes):
self.write_message(54)
if yes:
self.score_and_exit()
return
self.yesno(self.messages[143], callback)
def should_offer_hint(self, hint, obj): #40000
if hint == 4: # cave
return self.grate.prop == 0 and not self.is_here(self.keys)
elif hint == 5: # bird
bird = self.bird
return self.is_here(bird) and self.rod.is_toting and obj is bird
elif hint == 6: # snake
return self.is_here(self.snake) and not self.is_here(bird)
elif hint == 7: # maze
return (not len(self.objects_here) and
not len(self.objects_at(self.oldloc)) and
not len(self.objects_at(self.oldloc2)) and
len(self.inventory) > 1)
elif hint == 8: # dark
return self.emerald.prop != 1 and self.pyramid.prop != 1
elif hint == 9: # witt
return True
def compute_score(self, for_score_command=False): #20000
score = maxscore = 2
for treasure in self.treasures:
# if ptext(0) is zero?
if treasure.n > self.chest.n:
value = 16
elif treasure is self.chest:
value = 14
else:
value = 12
maxscore += value
if treasure.prop >= 0:
score += 2
if treasure.rooms and treasure.rooms[0].n == 3 \
and treasure.prop == 0:
score += value - 2
maxscore += self.max_deaths * 10
score += (self.max_deaths - self.deaths) * 10
maxscore += 4
if not for_score_command and not self.gave_up:
score += 4
maxscore += 25
if self.dwarf_stage:
score += 25
maxscore += 25
if self.is_closing:
maxscore += 25
maxscore += 45
if self.is_closed:
score += {0: 10, 135: 25, 134: 30, 133: 45}[self.bonus]
maxscore += 1
if self.magazine.rooms[0].n == 108:
score += 1
for hint in list(self.hints.values()):
if hint.used:
score -= hint.penalty
return score, maxscore
def score_and_exit(self):
score, maxscore = self.compute_score()
self.write('\nYou scored %d out of a possible %d using %d turns.'
% (score, maxscore, self.turns))
for i, (minimum, text) in enumerate(self.class_messages):
if minimum >= score:
break
self.write('\n%s\n' % text)
if i < len(self.class_messages) - 1:
d = self.class_messages[i+1][0] + 1 - score
self.write('To achieve the next higher rating, you need'
' %s more point%s\n' % (d, 's' if d > 1 else ''))
else:
self.write('To achieve the next higher rating '
'would be a neat trick!\n\nCongratulations!!\n')
self.end_game()
Implemented intransitive unlock and attack.
"""How we keep track of the state of the game."""
import random
from operator import attrgetter
from .data import Data
from .model import Room, Message, Dwarf, Pirate
YESNO_ANSWERS = {'y': True, 'yes': True, 'n': False, 'no': False}
class Game(Data):
look_complaints = 3 # how many times to "SORRY, BUT I AM NOT ALLOWED..."
full_description_period = 5 # how often we use a room's full description
full_wests = 0 # how many times they have typed "west" instead of "w"
dwarf_stage = 0 #DFLAG how active the dwarves are
dwarves_killed = 0
gave_up = False
treasures_not_found = 0 # how many treasures have not yet been seen
impossible_treasures = 0 # how many treasures can never be retrieved
lamp_turns = 330
warned_about_dim_lamp = False
bonus = 0 # how they exited the final bonus round
deaths = 0 # how many times the player has died
max_deaths = 4 # how many times the player can die
turns = 0
def __init__(self, writer, end_game, seed=None):
Data.__init__(self)
self.writer = writer
self.end_game = end_game # function to call to end us
self.yesno_callback = False
self.yesno_casual = False # whether to insist they answer
self.is_closing = False # is the cave closing?
self.panic = False # they tried to leave during closing?
self.is_closed = False # is the cave closed?
self.could_fall_in_pit = False # could the player fall into a pit?
self.random_instance = random.Random()
if seed is not None:
self.random_instance.seed(seed)
self.random = self.random_instance.random
self.randint = self.random_instance.randint
self.choice = self.random_instance.choice
def write(self, s):
"""Output the Unicode representation of `s`."""
s = str(s).upper()
if s:
self.writer(s)
self.writer('\n')
def write_message(self, n):
self.write(self.messages[n])
def yesno(self, s, yesno_callback, casual=False):
"""Ask a question and prepare to receive a yes-or-no answer."""
self.write(s)
self.yesno_callback = yesno_callback
self.yesno_casual = casual
# Properties of the cave.
@property
def is_dark(self):
lamp = self.objects['lamp']
if self.is_here(lamp) and lamp.prop:
return False
return self.loc.is_dark
@property
def inventory(self):
return [ obj for obj in self.object_list if obj.is_toting ]
@property
def treasures(self):
return [ obj for obj in self.object_list if obj.n >= 50 ]
@property
def objects_here(self):
return self.objects_at(self.loc)
def objects_at(self, room):
return [ obj for obj in self.object_list if room in obj.rooms ]
def is_here(self, obj):
return obj.is_toting or (self.loc in obj.rooms)
# Game startup
def start(self):
"""Start the game."""
self.chest_room = self.rooms[114]
self.bottle.contents = self.water
self.yesno(self.messages[65], self.start2) # want instructions?
def start2(self, yes):
"""Print out instructions if the user wants them."""
if yes:
self.write_message(1)
self.hints[3].used = True
self.lamp_turns = 1000
self.oldloc2 = self.oldloc = self.loc = self.rooms[1]
self.dwarves = [ Dwarf(self.rooms[n]) for n in (19, 27, 33, 44, 64) ]
self.pirate = Pirate(self.chest_room)
treasures = self.treasures
self.treasures_not_found = len(treasures)
for treasure in treasures:
treasure.prop = -1
self.describe_location()
# Routines that handle the aftermath of "big" actions like movement.
# Although these are called at the end of each `do_command()` cycle,
# we place here at the top of `game.py` to mirror the order in the
# advent.for file.
def move_to(self, newloc=None): #2
loc = self.loc
if newloc is None:
newloc = loc
if self.is_closing and newloc.is_aboveground:
self.write_message(130)
newloc = loc # cancel move and put him back underground
if not self.panic:
self.clock2 = 15
self.panic = True
must_allow_move = ((newloc is loc) or (loc.is_forced)
or (loc.is_forbidden_to_pirate))
dwarf_blocking_the_way = any(
dwarf.old_room is newloc and dwarf.has_seen_adventurer
for dwarf in self.dwarves
)
if not must_allow_move and dwarf_blocking_the_way:
newloc = loc # cancel move they were going to make
self.write_message(2) # dwarf is blocking the way
self.loc = loc = newloc #74
# IF LOC.EQ.0 ?
is_dwarf_area = not (loc.is_forced or loc.is_forbidden_to_pirate)
if is_dwarf_area and self.dwarf_stage > 0:
self.move_dwarves()
else:
if is_dwarf_area and loc.is_after_hall_of_mists:
self.dwarf_stage = 1
self.describe_location()
def move_dwarves(self):
#6000
if self.dwarf_stage == 1:
# 5% chance per turn of meeting first dwarf
if self.loc.is_before_hall_of_mists or self.random() < .95:
self.describe_location()
return
self.dwarf_stage = 2
for i in range(2): # randomly remove 0, 1, or 2 dwarves
if self.random() < .5:
del self.dwarves[self.randint(0, len(self.dwarves) - 1)]
for dwarf in self.dwarves:
if dwarf.room is self.loc: # move dwarf away from our loc
dwarf.start_at(self.rooms[18])
self.write_message(3) # dwarf throws axe and curses
self.axe.drop(self.loc)
self.describe_location()
return
#6010
dwarf_count = dwarf_attacks = knife_wounds = 0
for dwarf in self.dwarves + [ self.pirate ]:
locations = { move.action for move in dwarf.room.travel_table
if dwarf.can_move(move)
and move.action is not dwarf.old_room
and move.action is not dwarf.room }
# Without stabilizing the order with a sort, the room chosen
# would depend on how the Room addresses in memory happen to
# order the rooms in the set() - and make it impossible to
# test the game by setting the random number generator seed
# and then playing through the game.
locations = sorted(locations, key=attrgetter('n'))
if locations:
new_room = self.choice(locations)
else:
new_room = dwarf.old_room
dwarf.old_room, dwarf.room = dwarf.room, new_room
if self.loc in (dwarf.room, dwarf.old_room):
dwarf.has_seen_adventurer = True
elif self.loc.is_before_hall_of_mists:
dwarf.has_seen_adventurer = False
if not dwarf.has_seen_adventurer:
continue
dwarf.room = self.loc
if dwarf.is_dwarf:
dwarf_count += 1
# A dwarf cannot walk and attack at the same time.
if dwarf.room is dwarf.old_room:
dwarf_attacks += 1
#knfloc here
if self.random() < .095 * (self.dwarf_stage - 2):
knife_wounds += 1
else: # the pirate
pirate = dwarf
if self.loc is self.chest_room or self.chest.prop >= 0:
continue # decide that the pirate is not really here
treasures = [ t for t in self.treasures if t.is_toting ]
if (self.platinum in treasures and self.loc.n in (100, 101)):
treasures.remove(self.pyramid)
if not treasures:
h = any( t for t in self.treasures if self.is_here(t) )
one_treasure_left = (self.treasures_not_found ==
self.impossible_treasures + 1)
shiver_me_timbers = (
one_treasure_left and not h and self.chest.room.n == 0
and self.is_here(self.lamp) and self.lamp.prop == 1
)
if not shiver_me_timbers:
if (pirate.old_room != pirate.room
and self.random() < .2):
self.write_message(127)
continue # proceed to the next character? aren't any!
self.write_message(186)
self.chest.drop(self.chest_room)
self.message.drop(self.rooms[140])
else:
#6022 I'll just take all this booty
self.write_message(128)
if not self.message.rooms:
self.chest.drop(self.chest_room)
self.message.drop(self.rooms[140])
for treasure in treasures:
treasure.drop(self.chest_room)
#6024
pirate.old_room = pirate.room = self.chest_room
pirate.has_seen_adventurer = False # free to move
# Report what has happened.
if dwarf_count == 1:
self.write_message(4)
elif dwarf_count:
self.write('There are %d threatening little dwarves in the'
' room with you.\n' % dwarf_count)
if dwarf_attacks and self.dwarf_stage == 2:
self.dwarf_stage = 3
if dwarf_attacks == 1:
self.write_message(5)
k = 52
elif dwarf_attacks:
self.write('%d of them throw knives at you!\n' % dwarf_attacks)
k = 6
if not dwarf_attacks:
pass
elif not knife_wounds:
self.write_message(k)
else:
if knife_wounds == 1:
self.write_message(k + 1)
else:
self.write('%d of them get you!\n' % knife_wounds)
self.oldloc2 = self.loc
self.die()
return
self.describe_location()
def describe_location(self): #2000
# check for whether they already have died? or do as sep func?
loc = self.loc
could_fall = self.is_dark and self.could_fall_in_pit
if could_fall and not loc.is_forced and self.random() < .35:
self.die_here()
return
if self.bear.is_toting:
self.write_message(141)
if self.is_dark and not loc.is_forced:
self.write_message(16)
else:
do_short = loc.times_described % self.full_description_period
loc.times_described += 1
if do_short and loc.short_description:
self.write(loc.short_description)
else:
self.write(loc.long_description)
if loc.is_forced:
self.do_motion(self.vocabulary[2]) # dummy motion verb
return
if loc.n == 33 and self.random() < .25 and not self.is_closing:
self.write_message(8)
if not self.is_dark:
for obj in self.objects_here:
if obj is self.steps and self.gold.is_toting:
continue
if obj.prop < 0: # finding a treasure the first time
if self.is_closed:
continue
obj.prop = 1 if obj in (self.rug, self.chain) else 0
self.treasures_not_found -= 1
if (self.treasures_not_found > 0 and
self.treasures_not_found == self.impossible_treasures):
self.lamp_turns = min(35, self.lamp_turns)
if obj is self.steps and self.loc is self.steps.rooms[1]:
prop = 1
else:
prop = obj.prop
self.write(obj.messages[prop])
self.finish_turn()
def say_okay_and_finish(self): #2009
self.write_message(54)
self.finish_turn()
#2009 sets SPK="OK" then...
#2011 speaks SPK then...
#2012 blanks VERB and OBJ and calls:
def finish_turn(self, obj=None): #2600
for hint in self.hints.values():
if hint.turns_needed == 9999 or hint.used:
continue
if self.loc in hint.rooms:
hint.turn_counter += 1
if hint.turn_counter >= hint.turns_needed:
if hint.n != 5: # hint 5 counter does not get reset
hint.turn_counter = 0
if self.should_offer_hint(hint, obj):
hint.turn_counter = 0
def callback(yes):
if yes:
self.write(hint.message)
else:
self.write_message(54)
self.yesno(hint.question, callback)
return
else:
hint.turn_counter = 0
if self.is_closed:
if self.oyste.prop < 0: # and toting it
self.write(self.oyste.messages[1])
# put closing-time "prop" special case here
self.could_fall_in_pit = self.is_dark #2605
# remove knife from cave if they moved away from it
# Advance random number generator so each input affects future.
self.random()
# The central do_command() method, that should be called over and
# over again with words supplied by the user.
def do_command(self, words): #2608
"""Parse and act upon the command in the list of strings `words`."""
if self.yesno_callback is not None:
answer = YESNO_ANSWERS.get(words[0], None)
if answer is None:
if self.yesno_casual:
self.yesno_callback = None
else:
self.write('Please answer the question.')
return
else:
callback = self.yesno_callback
self.yesno_callback = None
callback(answer)
return
self.turns += 1
if self.lamp.prop:
self.lamp_turns -= 1
if self.lamp_turns <= 30 and self.is_here(self.battery) \
and self.battery.prop == 0 and self.is_here(self.lamp):
self.write_message(188)
self.battery.prop = 1
if self.battery.is_toting:
self.battery.drop(self.loc)
self.lamp_turns += 2500
self.warned_about_dim_lamp = False
if self.lamp_turns == 0:
self.lamp_turns = -1
self.lamp.prop = 0
if self.is_here(self.lamp):
self.write_message(184)
elif self.lamp_turns < 0 and self.loc.is_aboveground:
self.write_message(185)
self.gave_up = True
self.score_and_exit()
return
elif self.lamp_turns <= 30 and not self.warned_about_dim_lamp \
and self.is_here(self.lamp):
self.warned_about_dim_lamp = True
if self.battery.prop == 1:
self.write_message(189)
elif not self.battery.rooms:
self.write_message(183)
else:
self.write_message(187)
self.dispatch_command(words)
def dispatch_command(self, words):
if words[0] not in self.vocabulary:
n = self.randint(1, 5)
if n == 1:
self.write_message(61)
elif n == 2:
self.write_message(13)
else:
self.write_message(60)
self.finish_turn()
return
word = self.vocabulary[words[0]]
if word.kind == 'motion':
if words[0] == 'west':
self.full_wests += 1
if self.full_wests == 10:
self.write_message(17)
self.do_motion(word)
elif word.kind == 'verb':
prefix = 't_' if len(words) == 2 else 'i_' # (in)transitive
if len(words) == 2:
word2 = self.vocabulary[words[1]]
obj = self.objects[word2.n % 1000]
#5000
if word == 'say':
args = (word, word2)
elif not self.is_here(obj):
self.write('I see no %s here.\n' % obj.names[0])
self.finish_turn(obj)
return
else:
args = (word, obj)
else:
args = (word,)
getattr(self, prefix + word.synonyms[0].text)(*args)
# Motion.
def do_motion(self, word): #8
if word == 'null': #2
self.move_to()
return
elif word == 'back': #20
dest = self.oldloc2 if self.oldloc.is_forced else self.oldloc
self.oldloc2, self.oldloc = self.oldloc, self.loc
if dest is self.loc:
self.write_message(91)
self.move_to()
return
alt = None
for move in self.loc.travel_table:
if move.action is dest:
word = move.verbs[0] # arbitrary verb going to `dest`
break # Fall through, to attempt the move.
elif (isinstance(move.action, Room)
and move.action.is_forced
and move.action.travel_table[0].action is dest):
alt = move.verbs[0]
else: # no direct route is available
if alt is not None:
word = alt # take a forced move if it's the only option
else:
self.write_message(140)
self.move_to()
return
elif word == 'look': #30
if self.look_complaints > 0:
self.write_message(15)
self.look_complaints -= 1
self.loc.times_described = 0
self.move_to()
self.could_fall_in_pit = False
return
elif word == 'cave': #40
self.write(self.messages[57 if self.loc.is_aboveground else 58])
self.move_to()
return
self.oldloc2, self.oldloc = self.oldloc, self.loc
for move in self.loc.travel_table:
if move.is_forced or word in move.verbs:
c = move.condition
if c[0] is None or c[0] == 'not_dwarf':
allowed = True
elif c[0] == '%':
allowed = 100 * self.random() < c[1]
elif c[0] == 'carrying':
allowed = self.objects[c[1]].is_toting
elif c[0] == 'carrying_or_in_room_with':
allowed = self.is_here(self.objects[c[1]])
elif c[0] == 'prop!=':
allowed = self.objects[c[1]].prop != c[2]
if not allowed:
continue
if isinstance(move.action, Room):
self.move_to(move.action)
return
elif isinstance(move.action, Message):
self.write(move.action)
self.move_to()
return
elif move.action == 301: #30100
inv = self.inventory
if len(inv) != 0 and inv != [ self.emerald ]:
self.write_message(117)
self.move_to()
elif self.loc.n == 100:
self.move_to(self.rooms[99])
else:
self.move_to(self.rooms[100])
return
elif move.action == 302: #30200
self.emerald.drop(self.loc)
self.do_motion(word)
return
else:
raise NotImplementedError(move.action)
#50
n = word.n
if 29 <= n <= 30 or 43 <= n <= 50:
self.write_message(9)
elif n in (7, 36, 37):
self.write_message(10)
elif n in (11, 19):
self.write_message(11)
elif word == 'find' or word == 'invent': # ? this might be wrong
self.write_message(59)
elif n in (62, 65):
self.write_message(42)
elif n == 17:
self.write_message(80)
else:
self.write_message(12)
self.move_to()
return
# Death and reincarnation.
def die_here(self): #90
self.write_message(23)
self.oldloc2 = self.loc
self.die()
def die(self): #99
self.deaths += 1
if self.is_closing:
self.write_message(131)
self.score_and_exit()
return
def callback(yes):
if yes:
self.write_message(80 + self.deaths * 2)
if self.deaths < self.max_deaths:
# do water and oil thing
if self.lamp.is_toting:
self.lamp.prop = 0
for obj in self.inventory:
if obj is self.lamp:
obj.drop(self.rooms[1])
else:
obj.drop(self.oldloc2)
self.loc = self.rooms[3]
self.describe_location()
return
else:
self.write_message(54)
self.score_and_exit()
self.yesno(self.messages[79 + self.deaths * 2], callback)
# Verbs.
def print_do_what(self, verb, *args): #8000
self.write('%s What?\n' % verb.text)
self.finish_turn()
i_drop = print_do_what
i_say = print_do_what
i_wave = print_do_what
i_calm = print_do_what
i_rub = print_do_what
i_toss = print_do_what
i_find = print_do_what
i_feed = print_do_what
i_break = print_do_what
i_wake = print_do_what
def i_carry(self, verb): #8010
is_dwarf_here = any( dwarf.room == self.loc for dwarf in self.dwarves )
if len(self.loc.objects) != 1 or is_dwarf_here:
self.print_do_what(verb)
obj = self.loc.objects[0]
self.t_carry(verb, obj)
def t_carry(self, verb, obj): #9010
if obj.is_toting:
self.write_message(verb.default_message or 54)
self.finish_turn()
return
if obj.is_fixed or len(obj.rooms) > 1:
if obj is self.plant and obj.prop <= 0:
self.write_message(115)
elif obj is self.bear and obj.prop == 1:
self.write_message(169)
elif obj is self.chain and self.chain.prop != 0:
self.write_message(170)
else:
self.write_message(25)
self.finish_turn()
return
if obj is self.water or obj is self.oil:
if self.is_here(self.bottle) and self.bottle.contents is obj:
# They want to carry the filled bottle.
obj = self.bottle
else:
# They must mean they want to fill the bottle.
if self.bottle.is_toting and self.bottle.contents is None:
self.t_fill(self.bottle) # hand off control to "fill"
return
elif self.bottle.contents is not None:
self.write_message(105)
elif not self.bottle.is_toting:
self.write_message(104)
else:
self.write_message(verb.default_message)
self.finish_turn()
return
if len(self.inventory) >= 7:
self.write_message(92)
self.finish_turn()
return
if obj is self.bird and obj.prop == 0:
if self.rod.is_toting:
self.write_message(26)
self.finish_turn()
return
if not self.cage.is_toting:
self.write_message(27)
self.finish_turn()
return
self.bird.prop = 1
if (obj is self.bird or obj is self.cage) and self.bird.prop != 0:
self.bird.carry()
self.cage.carry()
else:
obj.carry()
if obj is self.bottle and self.bottle.contents is not None:
self.bottle.contents.carry()
self.say_okay_and_finish()
def t_drop(self, verb, obj): #9020
if obj is self.rod and not self.rod.is_toting and self.rod2.is_toting:
obj = self.rod2
if not obj.is_toting:
self.write_message(verb.default_message)
self.finish_turn()
return
bird, snake, dragon, bear, troll = self.bird, self.snake, self.dragon, \
self.bear, self.troll
if obj is bird and self.is_here(snake):
self.write_message(30)
if self.is_closed:
self.write_message(136)
self.score_and_exit()
snake.prop = 1
snake.destroy()
bird.prop = 0
bird.drop(self.loc)
elif obj is self.coins and self.is_here(self.machine):
obj.destroy()
self.battery.drop(self.loc)
self.write(self.battery.messages[0])
elif obj is bird and self.is_here(dragon) and dragon.prop == 0:
self.write_message(154)
bird.destroy()
bird.prop = 0
if snake.rooms:
self.impossible_treasures += 1
elif obj is bear and troll.is_at(self.loc):
self.write_message(163)
troll.destroy() # and something about fixed?
# something else about fixed and troll2
# juggle?
troll.prop = 2
bear.drop(self.loc)
elif obj is self.vase and self.loc is not self.rooms[96]:
if self.pillow.is_at(self.loc):
self.vase.prop = 0
else:
self.vase.prop = 2
self.vase.is_fixed = True
self.write(self.vase.messages[self.vase.prop + 1])
obj.drop(self.loc)
else:
if obj is self.cage and self.bird.prop != 0:
bird.drop(self.loc)
elif obj is self.bird:
obj.prop = 0
self.write_message(54)
obj.drop(self.loc)
self.finish_turn()
return
def t_say(self, verb, word): #9030
if word.n in (62, 65, 71, 2025):
self.dispatch_command([ word ])
else:
self.write('Okay, "{}".'.format(word.text))
self.finish_turn()
def i_unlock(self, verb): #8040
for obj in self.grate, self.door, self.oyster, self.clam:
if self.is_here(obj):
if self.is_here(self.chain):
return self.print_do_what(verb)
return self.t_unlock(verb, obj)
if self.is_here(self.chain):
return self.t_unlock(verb, self.chain)
self.write_message(28)
self.finish_turn()
def t_unlock(self, verb, obj): #9040
if obj is self.clam or obj is self.oyster:
oy = 1 if (obj is self.oyster) else 0
if verb == 'lock':
self.write_message(61)
elif not self.trident.is_toting:
self.write_message(122 + oy)
elif obj.is_toting:
self.write_message(120 + oy)
elif obj is self.oyster:
self.write_message(125)
else:
self.write_message(124)
self.clam.destroy()
self.oyster.drop(self.loc)
self.pearl.drop(self.rooms[105])
elif obj is self.door:
if obj.prop == 1:
self.write_message(54)
else:
self.write_message(111)
elif obj is self.cage:
self.write_message(32)
elif obj is self.keys:
self.write_message(55)
elif obj is self.grate or obj is self.chain:
# if keys are not here, write message 31 and give up
if obj is self.chain:
raise NotImplementedError() #9048
else:
if self.is_closing:
raise NotImplementedError() # set panic clock etc
else:
oldprop = obj.prop
obj.prop = 0 if verb == 'lock' else 1
self.write_message(34 + oldprop + 2 * obj.prop)
else:
self.write(verb.names)
self.write(obj.names)
self.write(verb.default_message)
self.finish_turn()
def t_light(self, verb, obj=None): #9070
if not self.is_here(self.lamp):
self.write(verb.default_message)
elif self.lamp_turns == 0:
self.write_message(184)
else:
self.lamp.prop = 1
self.write_message(39)
if self.loc.is_dark:
return self.describe_location()
self.finish_turn()
i_light = t_light
def t_extinguish(self, verb, obj=None): #9080
if not self.is_here(self.lamp):
self.write(verb.default_message)
else:
self.lamp.prop = 0
self.write_message(40)
if self.loc.is_dark:
self.write_message(16)
self.finish_turn()
i_extinguish = t_extinguish
def t_wave(self, verb, obj): #9090
fissure = self.fissure
if (obj is self.rod and obj.is_toting and self.is_here(fissure)
and not self.is_closing):
fissure.prop = 0 if fissure.prop else 1
self.write(fissure.messages[2 - fissure.prop])
else:
if obj.is_toting or (obj is self.rod and self.rod2.is_toting):
self.write(verb.default_message)
else:
self.write_message(29)
self.finish_turn()
def i_attack(self, verb): #9120
enemies = [ self.snake, self.dragon, self.troll, self.bear ]
if self.dwarf_stage >= 2:
enemies.extend(self.dwarves)
dangers = filter(self.is_here, enemies)
if len(dangers) > 1:
return self.print_do_what(verb)
if len(dangers) == 1:
return self.t_attack(verb, dangers[0])
targets = []
if self.is_here(self.bird) and verb != 'throw':
targets.append(self.bird)
if self.is_here(self.clam) or self.is_here(self.oyster):
targets.append(self.clam)
if len(targets) > 1:
return self.print_do_what(verb)
elif len(targets) == 1:
return self.t_attack(verb, targets[0])
else:
return self.t_attack(verb, None)
def t_attack(self, verb, obj): #9124 (but control goes to 9120 first)
if obj is self.bird:
if self.is_closed:
self.write_message(137)
else:
obj.destroy()
obj.prop = 0
if self.snake.rooms:
self.impossible_treasures += 1
self.write_message(45)
elif obj is self.clam or obj is self.oyster:
self.write_message(150)
elif obj is self.snake:
self.write_message(46)
elif obj is self.dwarf:
if self.is_closed:
die
return
self.write_message(49)
elif obj is self.dragon:
if self.dragon.prop != 0:
self.write_message(167)
else:
def callback(yes):
self.write(obj.messages[1])
obj.prop = 2
obj.is_fixed = True
oldroom1 = obj.rooms[0]
oldroom2 = obj.rooms[1]
newroom = self.rooms[ (oldroom1.n + oldroom2.n) // 2 ]
obj.drop(newroom)
self.rug.prop = 0
self.rug.is_fixed = False
self.rug.drop(newroom)
for oldroom in (oldroom1, oldroom2):
for o in self.objects_at(oldroom):
o.drop(newroom)
self.move_to(newroom)
self.yesno(self.messages[49], callback, casual=True)
return
elif obj is self.troll:
self.write_message(157)
elif obj is self.bear:
self.write_message(165 + (self.bear.prop + 1) // 2)
else:
self.write_message(44)
self.finish_turn()
def t_throw(self, verb, obj): #9170
if obj is self.rod and not self.rod.is_toting and self.rod2.is_toting:
obj = self.rod2
if not obj.is_toting:
self.write(verb.default_message)
self.finish_turn()
return
if obj.is_treasure and self.is_here(self.troll):
# Pay the troll toll
self.write_message(159)
obj.destroy()
self.troll2.rooms = self.troll.rooms
self.troll.destroy()
self.finish_turn()
return
if obj is self.food and self.is_here(self.bear):
self.t_feed(self.bear)
return
if obj is not self.axe:
self.t_drop(verb, obj)
return
dwarves_here = [ d for d in self.dwarves if d.room is self.loc ]
if dwarves_here:
if self.randint(0, 2): # 1/3rd chance of killing a dwarf
self.write_message(48) # Miss
else:
self.dwarves.remove(dwarves_here[0])
self.dwarves_killed += 1
if self.dwarves_killed == 1:
self.write_message(149)
else:
self.write_message(47)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.dragon) and self.dragon.prop == 0:
self.write_message(152)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.troll):
self.write_message(156)
self.axe.drop(self.loc)
self.do_motion(self.vocabulary['null'])
return
if self.is_here(self.bear) and self.bear.prop == 0:
self.write_message(164)
self.axe.drop(self.loc)
self.axe.is_fixed = True
self.axe.prop = 1
self.finish_turn()
return
self.t_attack(verb, None)
def i_inventory(self, verb): #8200
first = True
objs = [ obj for obj in self.inventory if obj is not self.bear ]
for obj in objs:
if first:
self.write_message(99)
first = False
self.write(obj.inventory_message)
if self.bear.is_toting:
self.write_message(141)
if not objs:
self.write_message(98)
self.finish_turn()
def i_score(self, verb): #8240
score, max_score = self.compute_score(for_score_command=True)
self.write('If you were to quit now, you would score %d'
' out of a possible %d.\n' % (score, max_score))
def callback(yes):
self.write_message(54)
if yes:
self.score_and_exit()
return
self.yesno(self.messages[143], callback)
def should_offer_hint(self, hint, obj): #40000
if hint == 4: # cave
return self.grate.prop == 0 and not self.is_here(self.keys)
elif hint == 5: # bird
bird = self.bird
return self.is_here(bird) and self.rod.is_toting and obj is bird
elif hint == 6: # snake
return self.is_here(self.snake) and not self.is_here(bird)
elif hint == 7: # maze
return (not len(self.objects_here) and
not len(self.objects_at(self.oldloc)) and
not len(self.objects_at(self.oldloc2)) and
len(self.inventory) > 1)
elif hint == 8: # dark
return self.emerald.prop != 1 and self.pyramid.prop != 1
elif hint == 9: # witt
return True
def compute_score(self, for_score_command=False): #20000
score = maxscore = 2
for treasure in self.treasures:
# if ptext(0) is zero?
if treasure.n > self.chest.n:
value = 16
elif treasure is self.chest:
value = 14
else:
value = 12
maxscore += value
if treasure.prop >= 0:
score += 2
if treasure.rooms and treasure.rooms[0].n == 3 \
and treasure.prop == 0:
score += value - 2
maxscore += self.max_deaths * 10
score += (self.max_deaths - self.deaths) * 10
maxscore += 4
if not for_score_command and not self.gave_up:
score += 4
maxscore += 25
if self.dwarf_stage:
score += 25
maxscore += 25
if self.is_closing:
maxscore += 25
maxscore += 45
if self.is_closed:
score += {0: 10, 135: 25, 134: 30, 133: 45}[self.bonus]
maxscore += 1
if self.magazine.rooms[0].n == 108:
score += 1
for hint in list(self.hints.values()):
if hint.used:
score -= hint.penalty
return score, maxscore
def score_and_exit(self):
score, maxscore = self.compute_score()
self.write('\nYou scored %d out of a possible %d using %d turns.'
% (score, maxscore, self.turns))
for i, (minimum, text) in enumerate(self.class_messages):
if minimum >= score:
break
self.write('\n%s\n' % text)
if i < len(self.class_messages) - 1:
d = self.class_messages[i+1][0] + 1 - score
self.write('To achieve the next higher rating, you need'
' %s more point%s\n' % (d, 's' if d > 1 else ''))
else:
self.write('To achieve the next higher rating '
'would be a neat trick!\n\nCongratulations!!\n')
self.end_game()
|
import time
import operator
import numpy as np
import subprocess
import vdj
import refseq
import sequtils
import alignmentcore
class vdj_aligner(object):
def __init__(self, verbose=False):
t0 = time.time()
self.numCrudeVCandidates = 5
self.numCrudeDCandidates = 10
self.numCrudeJCandidates = 2
# Define seed patterns
patternA='111011001011010111'
patternB='1111000100010011010111'
patternC='111111111111'
patternD='110100001100010101111'
patternE='1110111010001111'
self.seedpatterns = [patternA,patternB,patternC,patternD,patternE]
self.miniseedpatterns = ['111011','110111']
self.patternPos = '111111111111'
# Generate hashes from reference data for sequence alignment
self.Vseqlistannot,self.Vseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHV_seqs, self.seedpatterns )
self.Dseqlistannotmini,self.Dseqlistkeysmini = vdj_aligner.seqdict2kmerannot( refseq.IGHD_seqs, self.miniseedpatterns )
self.Jseqlistannot,self.Jseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHJ_seqs, self.seedpatterns )
# Generate reference data for positive sequence ID
posVseqlistannot,posVseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHV_seqs, [self.patternPos] )
posJseqlistannot,posJseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHJ_seqs, [self.patternPos] )
negVseqlistannot,negVseqlistkeys = vdj_aligner.seqdict2kmerannot( vdj_aligner.seqdict2revcompseqdict(refseq.IGHV_seqs), [self.patternPos] )
negJseqlistannot,negJseqlistkeys = vdj_aligner.seqdict2kmerannot( vdj_aligner.seqdict2revcompseqdict(refseq.IGHJ_seqs), [self.patternPos] )
# collect possible keys
posset = set([])
for key in posVseqlistkeys.keys():
posset.update(posVseqlistkeys[key][self.patternPos])
for key in posJseqlistkeys.keys():
posset.update(posJseqlistkeys[key][self.patternPos])
negset = set([])
for key in negVseqlistkeys.keys():
negset.update(negVseqlistkeys[key][self.patternPos])
for key in negJseqlistkeys.keys():
negset.update(negJseqlistkeys[key][self.patternPos])
# get keys unique to positive or negative versions of reference set
possetnew = posset - negset
negsetnew = negset - posset
self.posset = possetnew
self.negset = negsetnew
t1 = time.time()
if verbose: print "Database init:", t1-t0
# spawn the aligner
self.aligner = subprocess.Popen(["./malign",imgtvseq], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def align_external(self, seq):
self.aligner.stdin.write(seq + "\n")
ch = self.aligner.stdout.read(1)
# burn off newlines if they exist
while ch == "\n":
ch = self.aligner.stdout.read(1)
while ch != "\n":
acc += ch
ch = self.aligner.stdout.read(1)
return acc
def align_seq(self,seq,verbose=False):
chain = vdj.ImmuneChain(descr='sequence',seq=seq)
self.align_chain(chain,verbose)
return chain
def align_chain(self,chain,verbose=False):
query = chain.seq
t0 = time.time()
# compute hashes from query seq
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.seedpatterns)
t1 = time.time()
Vscores_hash = {}
# for each reference V segment and each pattern, how many shared k-mers are there?
for Vseg in refseq.IGHV_seqs.keys():
score = 0
for pattern in self.seedpatterns:
score += len( self.Vseqlistkeys[Vseg][pattern] & querykeys[pattern] )
Vscores_hash[Vseg] = score
# get numCrudeVCandidates highest scores in Vscores and store their names in descending order
goodVseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Vscores_hash,'value')[0:self.numCrudeVCandidates] ]
if verbose:
print goodVseglist
t2 = time.time()
bestVseg = ''
bestVscore = 100 # derived from calibration data 20090710
bestVscoremat = []
bestVtracemat = []
# perform Needleman-Wunsch on top V seg candidates and remember which had the highest score
for goodVseg in goodVseglist:
# C implementation:
# carve out memory
# note that we are using zero initial conditions, so matrices are initialized too
# notation is like Durbin p.29
seq1 = refseq.IGHV_seqs[goodVseg]
seq2 = query
scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
Ix = np.zeros( [len(seq1)+1, len(seq2)+1] )
Iy = np.zeros( [len(seq1)+1, len(seq2)+1] )
trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
alignmentcore.alignNW( scores, Ix, Iy, trace, seq1, seq2 )
currscore = vdj_aligner.scoreVJalign(scores)
if currscore > bestVscore:
bestVscore = currscore
bestVseg = goodVseg
bestVscoremat = scores
bestVtracemat = trace
chain.v = bestVseg
t3 = time.time()
# reconstruct the alignment and chop off V region through beginning of CDR3 (IMGT)
v_end_idx = 0 # to ensure it gets defined (for processing j_start_idx below)
if bestVseg != '':
Valnref,Valnrefcoords,Valnquery,Valnquerycoords = vdj_aligner.construct_alignment( refseq.IGHV_seqs[bestVseg], query, bestVscoremat, bestVtracemat )
query,v_end_idx = vdj_aligner.pruneVregion( Valnref, Valnrefcoords, Valnquery, Valnquerycoords, bestVseg, query )
chain.add_tags('v_end_idx|%d'%v_end_idx)
t4 = time.time()
# compute hashes from (pruned) query seq (junction + J)
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.seedpatterns)
t5 = time.time()
Jscores_hash = {}
# for each reference J segment and each pattern, how many shared k-mers are there?
for Jseg in refseq.IGHJ_seqs.keys():
score = 0
for pattern in self.seedpatterns:
score += len( self.Jseqlistkeys[Jseg][pattern] & querykeys[pattern] )
Jscores_hash[Jseg] = score
# get numCrudeJCandidates highest scores in Jscores and store their names in descending order
goodJseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Jscores_hash,'value')[0:self.numCrudeJCandidates] ]
if verbose:
print goodJseglist
t6 = time.time()
bestJseg = ''
bestJscore = 13 # derived from calibration data 20090710
bestJscoremat = []
bestJtracemat = []
# perform Needleman-Wunsch on top J seg candidates and remember which had the highest score
for goodJseg in goodJseglist:
# C implementation:
# carve out memory
# note that we are using zero initial conditions, so matrices are initialize too
# notation is like Durbin p.29
seq1 = refseq.IGHJ_seqs[goodJseg]
seq2 = query
scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
Ix = np.zeros( [len(seq1)+1, len(seq2)+1] )
Iy = np.zeros( [len(seq1)+1, len(seq2)+1] )
trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
alignmentcore.alignNW( scores, Ix, Iy, trace, seq1, seq2 )
# pure python:
#scores,trace = vdj_aligner.alignNW( refseq.IGHJ_seqs[goodJseg], query )
currscore = vdj_aligner.scoreVJalign(scores)
if currscore > bestJscore:
bestJscore = currscore
bestJseg = goodJseg
bestJscoremat = scores
bestJtracemat = trace
chain.j = bestJseg
t7 = time.time()
# reconstruct the alignment and chop off J region after the TRP (IMGT)
if bestJseg != '':
Jalnref,Jalnrefcoords,Jalnquery,Jalnquerycoords = vdj_aligner.construct_alignment( refseq.IGHJ_seqs[bestJseg], query, bestJscoremat, bestJtracemat )
(query,j_start_idx) = vdj_aligner.pruneJregion( Jalnref, Jalnrefcoords, Jalnquery, Jalnquerycoords, bestJseg, query )
chain.add_tags('j_start_idx|%d'%(v_end_idx+j_start_idx))
t8 = time.time()
# only attempt D alignment if both V and J were successful
if bestVseg != '' and bestJseg != '':
chain.junction = query
chain.cdr3 = len(query)
# compute hashes from junction sequence using mini seed patterns
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.miniseedpatterns)
t9 = time.time()
Dscores_hash = {}
# for each reference D segment and each pattern, how many shared k-mers are there?
for Dseg in refseq.IGHD_seqs.keys():
score = 0
for pattern in self.miniseedpatterns:
score += len( self.Dseqlistkeysmini[Dseg][pattern] & querykeys[pattern] )
Dscores_hash[Dseg] = score
# get numCrudeDCandidates highest scores in Dscores and store their names in descending order
goodDseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Dscores_hash,'value')[0:self.numCrudeDCandidates] ]
if verbose:
print goodDseglist
t10 = time.time()
bestDseg = ''
bestDscore = 4 # derived from calibration data 20090710
bestDscoremat = []
bestDtracemat = []
# perform Smith-Waterman on top D seg candidates and remember which had the highest score
for goodDseg in goodDseglist:
# C implementation:
# carve out memory
# note that we are using zero initial conditions, so matrices are initialize too
# notation is like Durbin p.29
seq1 = refseq.IGHD_seqs[goodDseg]
seq2 = query
scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
alignmentcore.alignSW( scores, trace, seq1, seq2 )
# pure python:
#scores,trace = vdj_aligner.alignSW( refseq.IGHD_seqs[goodDseg], query )
currscore = vdj_aligner.scoreDalign(scores)
if currscore > bestDscore:
bestDscore = currscore
bestDseg = goodDseg
bestDscoremat = scores
bestDtracemat = trace
t11 = time.time()
chain.d = bestDseg
else:
bestDseg = ''
t9 = t8
t10 = t8
t11 = t8
if verbose:
print t1-t0, "Full query hashes"
print t2-t1, "Comparing hashes to V database"
print t3-t2, "V seg NW alignment"
print t4-t3, "Construct alignment and prune V region off"
print t5-t4, "Pruned query hashes"
print t6-t5, "Comparing hashes to J database"
print t7-t6, "J seg NW alignment"
print t8-t7, "Construct alignment and prune J region off"
print t9-t8, "Pruned query hashes (junction only)"
print t10-t9, "Comparing hashes to D database"
print t11-t10, "D seg SW alignment"
print t11-t0, "Total time"
return chain.v, chain.d, chain.j, chain.junction
def seq2posstrand(self,seq):
seqannot,seqkeys = vdj_aligner.seq2kmerannot(seq,[self.patternPos])
seqwords = seqkeys[self.patternPos]
strandid = 1
if len(self.negset & seqwords) > len(self.posset & seqwords):
strandid = -1
return strandid
@staticmethod
def seq2kmerannot(seq,patterns):
"""Given sequence and patterns, for each pattern, compute all corresponding k-mers from sequence.
The result is seqannot[pattern][key]=[pos1,pos2,...,posN] in seq
seqkeys[pattern] = set([kmers])
"""
seqannot = {}
patlens = []
for pattern in patterns:
patlens.append(len(pattern))
seqannot[pattern] = {}
maxpatlen = max(patlens)
for i in xrange(len(seq)):
word = seq[i:i+maxpatlen]
for pattern in patterns:
patlen = len(pattern)
if len(word) >= patlen:
key = ''
for j in xrange(patlen):
if pattern[j] == '1':
key += word[j]
prevkmers = seqannot[pattern].get(key,[])
seqannot[pattern][key] = prevkmers + [i]
seqkeys = {}
for pattern in patterns:
seqkeys[pattern] = set( seqannot[pattern].keys() )
return seqannot,seqkeys
@staticmethod
def seqdict2kmerannot(seqdict,patterns):
seqlistannot = {}
seqlistkeys = {}
for seq in seqdict.iteritems():
seqannot,seqkeys = vdj_aligner.seq2kmerannot(seq[1],patterns)
seqlistannot[seq[0]] = {}
seqlistkeys[seq[0]] = {}
for pattern in patterns:
seqlistannot[seq[0]][pattern] = seqannot[pattern]
seqlistkeys[seq[0]][pattern] = seqkeys[pattern]
return seqlistannot,seqlistkeys
@staticmethod
def pruneVregion( alnref, alnrefcoords, alnquery, alnquerycoords, refID, queryseq ):
"""Prune V region out of query sequence based on alignment.
Given ref and query alignments of V region, refID, and the original
query sequence, return a sequence with the V region cut out, leaving
the 2nd-CYS. Also needs query alignment coords.
"""
#DEBUG
# # check that alnref actually has the whole reference segment
# # otherwise, I would need to pass something like alnrefcoords
# if alnref.replace('-','') != refseq.IGHV_seqs[refID]:
# raise Exception, "Aligned reference segment is not equal to vdj.refseq reference segment."
FR3end = refseq.IGHV_offset[refID] - alnrefcoords[0] # first candidate position
#FR3end = refseq.IGHV_offset[refID] # first candidate position
refgaps = alnref[:FR3end].count('-') # count gaps up to putative CYS pos
seengaps = 0
#while refgaps != 0: # iteratively find all gaps up to the CYS
while refgaps > 0: # iteratively find all gaps up to the CYS
seengaps += refgaps
FR3end += refgaps # adjust if for gaps in ref alignment
refgaps = alnref[:FR3end].count('-') - seengaps # any add'l gaps?
querygaps = alnquery[:FR3end].count('-')
# v_end_idx = idx of start of aln of query + distance into aln - # of gaps
v_end_idx = alnquerycoords[0] + FR3end - querygaps
return (queryseq[v_end_idx:], v_end_idx)
@staticmethod
def pruneJregion( alnref, alnrefcoords, alnquery, alnquerycoords, refID, queryseq ):
"""Prune J region out of query sequence based on alignment.
Given ref and query alignments of J region, refID, and the original
query sequence, return a sequence with the J region cut out, leaving
the J-TRP. Also needs query alignment coords.
"""
#DEBUG
# # check that alnref actually has the whole reference segment
# # otherwise, I would need to pass something like alnrefcoords
# if alnref.replace('-','') != refseq.IGHJ_seqs[refID]:
# raise Exception, "Aligned reference segment is not equal to vdj.refseq reference segment."
FR4start = refseq.IGHJ_offset[refID] - alnrefcoords[0] # first candidate position of J-TRP start
refgaps = alnref[:FR4start].count('-') # count gaps up to putative TRP pos
seengaps = 0
#while refgaps != 0: # iteratively find all gaps up to the TRP
while refgaps > 0: # iteratively find all gaps up to the TRP
seengaps += refgaps
FR4start += refgaps # adjust for gaps in ref alignment
refgaps = alnref[:FR4start].count('-') - seengaps # any add'l gaps?
querygaps = alnquery[:FR4start].count('-')
# j_start_idx = idx of start of aln of query + distance into aln - # of gaps + 3 nt for J-TRP
j_start_idx = alnquerycoords[0] + FR4start - querygaps + 3
return (queryseq[:j_start_idx],j_start_idx)
@staticmethod
def construct_alignment(seq1,seq2,scoremat,tracemat):
"""Construct alignment of ref segment to query from score and trace matrices."""
nrows,ncols = scoremat.shape
# do some error checking
if len(seq1)+1 != nrows or len(seq2)+1 != ncols:
raise Exception, "nrows and ncols must be equal to len(seq1)+1 and len(seq2)+1"
#DEBUG
# if not nrows <= ncols:
# raise Exception, "score matrix must have nrows < ncols"
# if not len(seq1) <= len(seq2):
# raise Exception, "len of seq1 must be smaller than seq2"
# translate integer traces to coords
deltas = {
0 : (1,1),
1 : (1,0),
2 : (0,1),
3 : (0,0)
}
# compute col where alignment should start
if nrows <= ncols:
col = np.argmax( scoremat[nrows-1,:] )
row = nrows-1
else:
col = ncols-1
row = np.argmax( scoremat[:,ncols-1] )
#DEBUG
# col = np.argmax( scoremat[nrows-1,:] )
# row = nrows-1
# if row is coord in matrix, row-1 is coord in seq
aln1 = seq1[row-1]
aln2 = seq2[col-1]
aln1end = row
aln2end = col
#DEBUG
#while row-1 > 0:
while (row-1 > 0) and (col-1 > 0):
# compute direction of moves
rowchange,colchange = deltas[ tracemat[row,col] ]
# WORKS WITH PURE PYTHON alignNW trace return
#rowchange = row-tracemat[row,col][0]
#colchange = col-tracemat[row,col][1]
# emit appropriate symbols
if rowchange == 1:
row -= 1
aln1 = seq1[row-1] + aln1
elif rowchange == 0:
aln1 = '-' + aln1
else:
raise Exception, "Trace matrix contained jump of greater than one row/col."
if colchange == 1:
col -= 1
aln2 = seq2[col-1] + aln2
elif colchange == 0:
aln2 = '-' + aln2
else:
raise Exception, "Trace matrix contained jump of greater than one row/col."
aln1start = row-1
aln2start = col-1
return aln1, (aln1start,aln1end), aln2, (aln2start,aln2end)
@staticmethod
def scoreVJalign(scorematrix):
"""Computes score of V alignment given Needleman-Wunsch score matrix
ASSUMES num rows < num cols, i.e., refseq V seg is on vertical axis
"""
nrows,ncols = scorematrix.shape
if nrows <= ncols:
return np.max( scorematrix[nrows-1,:] )
else:
return np.max( scorematrix[:,ncols-1] )
#DEBUG
#OLD WAY
# nrows,ncols = scorematrix.shape
#
# if not nrows < ncols:
# raise Exception, "score matrix must have nrows < ncols"
#
# return np.max( scorematrix[nrows-1,:] )
@staticmethod
def scoreDalign(scorematrix):
"""Computes score of D alignment given Smith-Waterman score matrix
"""
return np.max( scorematrix )
@staticmethod
def dict2sorteddecreasingitemlist(dictionary,keyorvalue='value'):
pos = {'key':0, 'value':1}
di = dictionary.items()
di.sort(key=operator.itemgetter(pos[keyorvalue]))
di.reverse()
return di
@staticmethod
def seqdict2revcompseqdict(seqdict):
revcompdict = {}
for item in seqdict.iteritems():
revcompdict[item[0]] = sequtils.reverse_complement(item[1])
return revcompdict
The banded aligner is /actually/ linked in
import time
import operator
import numpy as np
import subprocess
import vdj
import refseq
import sequtils
import alignmentcore
class vdj_aligner(object):
def __init__(self, verbose=False):
t0 = time.time()
self.numCrudeVCandidates = 5
self.numCrudeDCandidates = 10
self.numCrudeJCandidates = 2
# Define seed patterns
patternA='111011001011010111'
patternB='1111000100010011010111'
patternC='111111111111'
patternD='110100001100010101111'
patternE='1110111010001111'
self.seedpatterns = [patternA,patternB,patternC,patternD,patternE]
self.miniseedpatterns = ['111011','110111']
self.patternPos = '111111111111'
# Generate hashes from reference data for sequence alignment
self.Vseqlistannot,self.Vseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHV_seqs, self.seedpatterns )
self.Dseqlistannotmini,self.Dseqlistkeysmini = vdj_aligner.seqdict2kmerannot( refseq.IGHD_seqs, self.miniseedpatterns )
self.Jseqlistannot,self.Jseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHJ_seqs, self.seedpatterns )
# Generate reference data for positive sequence ID
posVseqlistannot,posVseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHV_seqs, [self.patternPos] )
posJseqlistannot,posJseqlistkeys = vdj_aligner.seqdict2kmerannot( refseq.IGHJ_seqs, [self.patternPos] )
negVseqlistannot,negVseqlistkeys = vdj_aligner.seqdict2kmerannot( vdj_aligner.seqdict2revcompseqdict(refseq.IGHV_seqs), [self.patternPos] )
negJseqlistannot,negJseqlistkeys = vdj_aligner.seqdict2kmerannot( vdj_aligner.seqdict2revcompseqdict(refseq.IGHJ_seqs), [self.patternPos] )
# collect possible keys
posset = set([])
for key in posVseqlistkeys.keys():
posset.update(posVseqlistkeys[key][self.patternPos])
for key in posJseqlistkeys.keys():
posset.update(posJseqlistkeys[key][self.patternPos])
negset = set([])
for key in negVseqlistkeys.keys():
negset.update(negVseqlistkeys[key][self.patternPos])
for key in negJseqlistkeys.keys():
negset.update(negJseqlistkeys[key][self.patternPos])
# get keys unique to positive or negative versions of reference set
possetnew = posset - negset
negsetnew = negset - posset
self.posset = possetnew
self.negset = negsetnew
t1 = time.time()
if verbose: print "Database init:", t1-t0
# spawn the aligner
self.aligner = subprocess.Popen(["./malign",imgtvseq], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def align_external(self, seq):
self.aligner.stdin.write(seq + "\n")
ch = self.aligner.stdout.read(1)
# burn off newlines if they exist
while ch == "\n":
ch = self.aligner.stdout.read(1)
# collect the output. There's a deadlock bug here preventing us from using readline()
while ch != "\n":
acc += ch
ch = self.aligner.stdout.read(1)
return acc
def align_seq(self,seq,verbose=False):
chain = vdj.ImmuneChain(descr='sequence',seq=seq)
self.align_chain(chain,verbose)
return chain
def align_chain(self,chain,verbose=False):
query = chain.seq
t0 = time.time()
# compute hashes from query seq
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.seedpatterns)
t1 = time.time()
Vscores_hash = {}
# for each reference V segment and each pattern, how many shared k-mers are there?
for Vseg in refseq.IGHV_seqs.keys():
score = 0
for pattern in self.seedpatterns:
score += len( self.Vseqlistkeys[Vseg][pattern] & querykeys[pattern] )
Vscores_hash[Vseg] = score
# get numCrudeVCandidates highest scores in Vscores and store their names in descending order
goodVseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Vscores_hash,'value')[0:self.numCrudeVCandidates] ]
if verbose:
print goodVseglist
t2 = time.time()
bestVseg = ''
bestVscore = 100 # derived from calibration data 20090710
bestVscoremat = []
bestVtracemat = []
# perform Needleman-Wunsch on top V seg candidates and remember which had the highest score
#for goodVseg in goodVseglist:
# # C implementation:
# # carve out memory
# # note that we are using zero initial conditions, so matrices are initialized too
# # notation is like Durbin p.29
# seq1 = refseq.IGHV_seqs[goodVseg]
# seq2 = query
# scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
# Ix = np.zeros( [len(seq1)+1, len(seq2)+1] )
# Iy = np.zeros( [len(seq1)+1, len(seq2)+1] )
# trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
# alignmentcore.alignNW( scores, Ix, Iy, trace, seq1, seq2 )
#
# currscore = vdj_aligner.scoreVJalign(scores)
# if currscore > bestVscore:
# bestVscore = currscore
# bestVseg = goodVseg
# bestVscoremat = scores
# bestVtracemat = trace
#chain.v = bestVseg
chain.v = align_external(seq)
t3 = time.time()
# reconstruct the alignment and chop off V region through beginning of CDR3 (IMGT)
v_end_idx = 0 # to ensure it gets defined (for processing j_start_idx below)
if bestVseg != '':
Valnref,Valnrefcoords,Valnquery,Valnquerycoords = vdj_aligner.construct_alignment( refseq.IGHV_seqs[bestVseg], query, bestVscoremat, bestVtracemat )
query,v_end_idx = vdj_aligner.pruneVregion( Valnref, Valnrefcoords, Valnquery, Valnquerycoords, bestVseg, query )
chain.add_tags('v_end_idx|%d'%v_end_idx)
t4 = time.time()
# compute hashes from (pruned) query seq (junction + J)
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.seedpatterns)
t5 = time.time()
Jscores_hash = {}
# for each reference J segment and each pattern, how many shared k-mers are there?
for Jseg in refseq.IGHJ_seqs.keys():
score = 0
for pattern in self.seedpatterns:
score += len( self.Jseqlistkeys[Jseg][pattern] & querykeys[pattern] )
Jscores_hash[Jseg] = score
# get numCrudeJCandidates highest scores in Jscores and store their names in descending order
goodJseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Jscores_hash,'value')[0:self.numCrudeJCandidates] ]
if verbose:
print goodJseglist
t6 = time.time()
bestJseg = ''
bestJscore = 13 # derived from calibration data 20090710
bestJscoremat = []
bestJtracemat = []
# perform Needleman-Wunsch on top J seg candidates and remember which had the highest score
for goodJseg in goodJseglist:
# C implementation:
# carve out memory
# note that we are using zero initial conditions, so matrices are initialize too
# notation is like Durbin p.29
seq1 = refseq.IGHJ_seqs[goodJseg]
seq2 = query
scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
Ix = np.zeros( [len(seq1)+1, len(seq2)+1] )
Iy = np.zeros( [len(seq1)+1, len(seq2)+1] )
trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
alignmentcore.alignNW( scores, Ix, Iy, trace, seq1, seq2 )
# pure python:
#scores,trace = vdj_aligner.alignNW( refseq.IGHJ_seqs[goodJseg], query )
currscore = vdj_aligner.scoreVJalign(scores)
if currscore > bestJscore:
bestJscore = currscore
bestJseg = goodJseg
bestJscoremat = scores
bestJtracemat = trace
chain.j = bestJseg
t7 = time.time()
# reconstruct the alignment and chop off J region after the TRP (IMGT)
if bestJseg != '':
Jalnref,Jalnrefcoords,Jalnquery,Jalnquerycoords = vdj_aligner.construct_alignment( refseq.IGHJ_seqs[bestJseg], query, bestJscoremat, bestJtracemat )
(query,j_start_idx) = vdj_aligner.pruneJregion( Jalnref, Jalnrefcoords, Jalnquery, Jalnquerycoords, bestJseg, query )
chain.add_tags('j_start_idx|%d'%(v_end_idx+j_start_idx))
t8 = time.time()
# only attempt D alignment if both V and J were successful
if bestVseg != '' and bestJseg != '':
chain.junction = query
chain.cdr3 = len(query)
# compute hashes from junction sequence using mini seed patterns
queryannot,querykeys = vdj_aligner.seq2kmerannot(query,self.miniseedpatterns)
t9 = time.time()
Dscores_hash = {}
# for each reference D segment and each pattern, how many shared k-mers are there?
for Dseg in refseq.IGHD_seqs.keys():
score = 0
for pattern in self.miniseedpatterns:
score += len( self.Dseqlistkeysmini[Dseg][pattern] & querykeys[pattern] )
Dscores_hash[Dseg] = score
# get numCrudeDCandidates highest scores in Dscores and store their names in descending order
goodDseglist = [ seg[0] for seg in vdj_aligner.dict2sorteddecreasingitemlist(Dscores_hash,'value')[0:self.numCrudeDCandidates] ]
if verbose:
print goodDseglist
t10 = time.time()
bestDseg = ''
bestDscore = 4 # derived from calibration data 20090710
bestDscoremat = []
bestDtracemat = []
# perform Smith-Waterman on top D seg candidates and remember which had the highest score
for goodDseg in goodDseglist:
# C implementation:
# carve out memory
# note that we are using zero initial conditions, so matrices are initialize too
# notation is like Durbin p.29
seq1 = refseq.IGHD_seqs[goodDseg]
seq2 = query
scores = np.zeros( [len(seq1)+1, len(seq2)+1] )
trace = np.zeros( [len(seq1)+1, len(seq2)+1], dtype=np.int)
alignmentcore.alignSW( scores, trace, seq1, seq2 )
# pure python:
#scores,trace = vdj_aligner.alignSW( refseq.IGHD_seqs[goodDseg], query )
currscore = vdj_aligner.scoreDalign(scores)
if currscore > bestDscore:
bestDscore = currscore
bestDseg = goodDseg
bestDscoremat = scores
bestDtracemat = trace
t11 = time.time()
chain.d = bestDseg
else:
bestDseg = ''
t9 = t8
t10 = t8
t11 = t8
if verbose:
print t1-t0, "Full query hashes"
print t2-t1, "Comparing hashes to V database"
print t3-t2, "V seg NW alignment"
print t4-t3, "Construct alignment and prune V region off"
print t5-t4, "Pruned query hashes"
print t6-t5, "Comparing hashes to J database"
print t7-t6, "J seg NW alignment"
print t8-t7, "Construct alignment and prune J region off"
print t9-t8, "Pruned query hashes (junction only)"
print t10-t9, "Comparing hashes to D database"
print t11-t10, "D seg SW alignment"
print t11-t0, "Total time"
return chain.v, chain.d, chain.j, chain.junction
def seq2posstrand(self,seq):
seqannot,seqkeys = vdj_aligner.seq2kmerannot(seq,[self.patternPos])
seqwords = seqkeys[self.patternPos]
strandid = 1
if len(self.negset & seqwords) > len(self.posset & seqwords):
strandid = -1
return strandid
@staticmethod
def seq2kmerannot(seq,patterns):
"""Given sequence and patterns, for each pattern, compute all corresponding k-mers from sequence.
The result is seqannot[pattern][key]=[pos1,pos2,...,posN] in seq
seqkeys[pattern] = set([kmers])
"""
seqannot = {}
patlens = []
for pattern in patterns:
patlens.append(len(pattern))
seqannot[pattern] = {}
maxpatlen = max(patlens)
for i in xrange(len(seq)):
word = seq[i:i+maxpatlen]
for pattern in patterns:
patlen = len(pattern)
if len(word) >= patlen:
key = ''
for j in xrange(patlen):
if pattern[j] == '1':
key += word[j]
prevkmers = seqannot[pattern].get(key,[])
seqannot[pattern][key] = prevkmers + [i]
seqkeys = {}
for pattern in patterns:
seqkeys[pattern] = set( seqannot[pattern].keys() )
return seqannot,seqkeys
@staticmethod
def seqdict2kmerannot(seqdict,patterns):
seqlistannot = {}
seqlistkeys = {}
for seq in seqdict.iteritems():
seqannot,seqkeys = vdj_aligner.seq2kmerannot(seq[1],patterns)
seqlistannot[seq[0]] = {}
seqlistkeys[seq[0]] = {}
for pattern in patterns:
seqlistannot[seq[0]][pattern] = seqannot[pattern]
seqlistkeys[seq[0]][pattern] = seqkeys[pattern]
return seqlistannot,seqlistkeys
@staticmethod
def pruneVregion( alnref, alnrefcoords, alnquery, alnquerycoords, refID, queryseq ):
"""Prune V region out of query sequence based on alignment.
Given ref and query alignments of V region, refID, and the original
query sequence, return a sequence with the V region cut out, leaving
the 2nd-CYS. Also needs query alignment coords.
"""
#DEBUG
# # check that alnref actually has the whole reference segment
# # otherwise, I would need to pass something like alnrefcoords
# if alnref.replace('-','') != refseq.IGHV_seqs[refID]:
# raise Exception, "Aligned reference segment is not equal to vdj.refseq reference segment."
FR3end = refseq.IGHV_offset[refID] - alnrefcoords[0] # first candidate position
#FR3end = refseq.IGHV_offset[refID] # first candidate position
refgaps = alnref[:FR3end].count('-') # count gaps up to putative CYS pos
seengaps = 0
#while refgaps != 0: # iteratively find all gaps up to the CYS
while refgaps > 0: # iteratively find all gaps up to the CYS
seengaps += refgaps
FR3end += refgaps # adjust if for gaps in ref alignment
refgaps = alnref[:FR3end].count('-') - seengaps # any add'l gaps?
querygaps = alnquery[:FR3end].count('-')
# v_end_idx = idx of start of aln of query + distance into aln - # of gaps
v_end_idx = alnquerycoords[0] + FR3end - querygaps
return (queryseq[v_end_idx:], v_end_idx)
@staticmethod
def pruneJregion( alnref, alnrefcoords, alnquery, alnquerycoords, refID, queryseq ):
"""Prune J region out of query sequence based on alignment.
Given ref and query alignments of J region, refID, and the original
query sequence, return a sequence with the J region cut out, leaving
the J-TRP. Also needs query alignment coords.
"""
#DEBUG
# # check that alnref actually has the whole reference segment
# # otherwise, I would need to pass something like alnrefcoords
# if alnref.replace('-','') != refseq.IGHJ_seqs[refID]:
# raise Exception, "Aligned reference segment is not equal to vdj.refseq reference segment."
FR4start = refseq.IGHJ_offset[refID] - alnrefcoords[0] # first candidate position of J-TRP start
refgaps = alnref[:FR4start].count('-') # count gaps up to putative TRP pos
seengaps = 0
#while refgaps != 0: # iteratively find all gaps up to the TRP
while refgaps > 0: # iteratively find all gaps up to the TRP
seengaps += refgaps
FR4start += refgaps # adjust for gaps in ref alignment
refgaps = alnref[:FR4start].count('-') - seengaps # any add'l gaps?
querygaps = alnquery[:FR4start].count('-')
# j_start_idx = idx of start of aln of query + distance into aln - # of gaps + 3 nt for J-TRP
j_start_idx = alnquerycoords[0] + FR4start - querygaps + 3
return (queryseq[:j_start_idx],j_start_idx)
@staticmethod
def construct_alignment(seq1,seq2,scoremat,tracemat):
"""Construct alignment of ref segment to query from score and trace matrices."""
nrows,ncols = scoremat.shape
# do some error checking
if len(seq1)+1 != nrows or len(seq2)+1 != ncols:
raise Exception, "nrows and ncols must be equal to len(seq1)+1 and len(seq2)+1"
#DEBUG
# if not nrows <= ncols:
# raise Exception, "score matrix must have nrows < ncols"
# if not len(seq1) <= len(seq2):
# raise Exception, "len of seq1 must be smaller than seq2"
# translate integer traces to coords
deltas = {
0 : (1,1),
1 : (1,0),
2 : (0,1),
3 : (0,0)
}
# compute col where alignment should start
if nrows <= ncols:
col = np.argmax( scoremat[nrows-1,:] )
row = nrows-1
else:
col = ncols-1
row = np.argmax( scoremat[:,ncols-1] )
#DEBUG
# col = np.argmax( scoremat[nrows-1,:] )
# row = nrows-1
# if row is coord in matrix, row-1 is coord in seq
aln1 = seq1[row-1]
aln2 = seq2[col-1]
aln1end = row
aln2end = col
#DEBUG
#while row-1 > 0:
while (row-1 > 0) and (col-1 > 0):
# compute direction of moves
rowchange,colchange = deltas[ tracemat[row,col] ]
# WORKS WITH PURE PYTHON alignNW trace return
#rowchange = row-tracemat[row,col][0]
#colchange = col-tracemat[row,col][1]
# emit appropriate symbols
if rowchange == 1:
row -= 1
aln1 = seq1[row-1] + aln1
elif rowchange == 0:
aln1 = '-' + aln1
else:
raise Exception, "Trace matrix contained jump of greater than one row/col."
if colchange == 1:
col -= 1
aln2 = seq2[col-1] + aln2
elif colchange == 0:
aln2 = '-' + aln2
else:
raise Exception, "Trace matrix contained jump of greater than one row/col."
aln1start = row-1
aln2start = col-1
return aln1, (aln1start,aln1end), aln2, (aln2start,aln2end)
@staticmethod
def scoreVJalign(scorematrix):
"""Computes score of V alignment given Needleman-Wunsch score matrix
ASSUMES num rows < num cols, i.e., refseq V seg is on vertical axis
"""
nrows,ncols = scorematrix.shape
if nrows <= ncols:
return np.max( scorematrix[nrows-1,:] )
else:
return np.max( scorematrix[:,ncols-1] )
#DEBUG
#OLD WAY
# nrows,ncols = scorematrix.shape
#
# if not nrows < ncols:
# raise Exception, "score matrix must have nrows < ncols"
#
# return np.max( scorematrix[nrows-1,:] )
@staticmethod
def scoreDalign(scorematrix):
"""Computes score of D alignment given Smith-Waterman score matrix
"""
return np.max( scorematrix )
@staticmethod
def dict2sorteddecreasingitemlist(dictionary,keyorvalue='value'):
pos = {'key':0, 'value':1}
di = dictionary.items()
di.sort(key=operator.itemgetter(pos[keyorvalue]))
di.reverse()
return di
@staticmethod
def seqdict2revcompseqdict(seqdict):
revcompdict = {}
for item in seqdict.iteritems():
revcompdict[item[0]] = sequtils.reverse_complement(item[1])
return revcompdict
|
# -*- coding: utf-8 -*-
# TODO: Temporarily Disabled
# import os
# os.environ["DYLD_LIBRARY_PATH"] = "../lib/python2.7/site-packages/savReaderWriter/spssio/macos"
from collections import OrderedDict
# TODO: Temporarily Disabled
# from savReaderWriter import SavReader, SavHeaderReader
import csv
import pandas as pd
import numpy as np
import iribaker
import magic
import os
import traceback
from app import config
class Adapter(object):
def __init__(self, dataset):
self.dataset = dataset
(head, dataset_local_name) = os.path.split(dataset['filename'])
(dataset_name, extension) = os.path.splitext(dataset_local_name)
self.dataset_name = dataset_name
self.dataset_uri = iribaker.to_iri(config.QBR_BASE + dataset_name)
print "Initialized adapter"
return
def get_data(self):
return self.data
def get_reader(self):
return self.reader
def get_header(self):
return self.header
def get_dataset_uri(self):
return self.dataset_uri
def get_dataset_name(self):
return self.dataset_name
def get_metadata(self):
if self.metadata:
return self.metadata
else:
return None
def load_metadata(self):
metadata = OrderedDict()
if 'metadata' in self.dataset:
print "Loading metadata..."
metadata_filename = self.dataset['metadata']
with open(metadata_filename, "r") as metadata_file:
metadata_reader = csv.reader(metadata_file, delimiter=";", quotechar="\"")
for l in metadata_reader:
metadata[l[0].strip()] = l[1].strip()
elif self.header:
print "No metadata... reconstructing from header"
for h in self.header:
metadata[h] = h
else:
print "No metadata or header"
return metadata
def validate_header(self):
"""Checks whether the header in the file and the metadata provided are exactly the same"""
if self.header and self.metadata:
# Find the difference between header and metadata keys
diff = set(self.header).difference(set(self.metadata.keys()))
if len(diff) > 0:
print "Header and metadata do *not* correspond"
# print zip(self.header,self.metadata.keys())
return False
else:
print "Header and metadata are aligned"
return True
else:
print "No header or no metadata present"
return False
def get_values(self):
"""
Return all unique values, and converts it to samples for each column.
"""
# Get all unique values for each column
stats = {}
for col in self.data.columns:
istats = []
counts = self.data[col].value_counts()
# print self.data[col][0]
for i in counts.index:
print col, i
# The URI for the variable value
i_uri = iribaker.to_iri(u"{}/value/{}/{}"
.format(self.dataset_uri, col, i))
# Capture the counts and label in a dictionary for the value
stat = {
'original': {
'uri': i_uri,
'label': i
},
'label': i,
'uri': i_uri,
'count': counts[i]
}
# And append it to the list of variable values
istats.append(stat)
# The URI for the variable
variable_uri = iribaker.to_iri("{}/variable/{}"
.format(self.dataset_uri, col))
# The URI for a (potential) codelist for the variable
codelist_uri = iribaker.to_iri("{}/codelist/{}"
.format(self.dataset_uri, col))
codelist_label = "Codelist generated from the values for '{}'".format(col)
codelist = {
'original': {
'uri': codelist_uri,
'label': codelist_label
},
'uri': codelist_uri,
'label': codelist_label
}
stats[col] = {
'original': {
'uri': variable_uri,
'label': col
},
'uri': variable_uri,
'label': col,
'description': "The variable '{}' as taken "
"from the '{}' dataset."
.format(col, self.dataset_name),
'category': 'identifier',
'type': 'http://purl.org/linked-data/cube#DimensionProperty', # This is the default
'values': istats,
'codelist': codelist
}
return stats
# TODO: Temporarily Disabled
# class SavAdapter(Adapter):
#
# def __init__(self, dataset):
# super(SavAdapter, self).__init__(dataset)
#
# if not dataset['format'] == 'SPSS':
# raise Exception('This is an SPSS adapter, not {}'.format(dataset['format']))
#
# self.filename = dataset['filename']
#
# self.has_header = dataset['header']
#
# self.reader = SavReader(self.filename, ioLocale='en_US.UTF-8')
#
# if self.has_header:
# with SavHeaderReader(self.filename, ioLocale='en_US.UTF-8') as hr:
# self.header = hr.varNames
#
# else :
# self.header = None
#
# self.metadata = self.load_metadata()
#
# print self.validate_header()
# return
#
# def get_examples(self):
# """Returns first 10000 rows, and converts it to samples for each column."""
#
# # Get first 10000 rows
# rows = self.reader.head(10000)
#
# # Assume metadata keys are best (since if no metadata exists, the header
# # will be used to generate it)
# header = self.metadata.keys()
#
# # Convert the rows to a list of dictionaries with keys from the header
# data_dictionaries = [dict(zip(header, [v.strip() if type(v) == str
# else v for v in values ])) for values in rows]
#
# # Convert the list of dictionaries to a dictionary of sets
# data = defaultdict(set)
# for d in data_dictionaries:
# for k, v in d.items():
# data[k].add(v)
#
# json_ready_data = {}
# for k,v in data.items():
# json_ready_data[k] = list(v)[:250]
#
# return json_ready_data
class CsvAdapter(Adapter):
def __init__(self, dataset):
"""Initializes an adapter for reading a CSV dataset"""
super(CsvAdapter, self).__init__(dataset)
if not dataset['format'] == 'text/csv':
raise Exception('This is a CSV adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
self.data = pd.read_csv(fn, index_col=0, parse_dates=True, encoding='utf-8')
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
class ExcelAdapter(Adapter):
def __init__(self, dataset, clio=True):
"""Initializes an adapter for reading an Excel dataset"""
super(ExcelAdapter, self).__init__(dataset)
if not (dataset['format'] == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' or dataset['format'] == 'application/vnd.ms-excel'):
raise Exception('This is an Excel adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
# If this is ClioInfra data, we skip the first two rows of the Worksheet
if clio:
skiprows = [0, 1]
header = 0
else:
skiprows = None
header = 0
self.data = pd.read_excel(fn, skiprows=skiprows, header=header)
if clio:
# Unpivot the table, excluding the first 6 columns (webmapper ids, country, period)
id_vars = [
'Webmapper code',
'Webmapper numeric code',
'ccode',
'country name',
'start year',
'end year'
]
self.data = pd.melt(self.data, id_vars=id_vars, var_name='year', value_name='GDPPC')
self.data = self.data[np.isfinite(self.data['GDPPC'])]
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
class TabAdapter(Adapter):
def __init__(self, dataset):
"""Initializes an adapter for reading a Tab-delimited dataset"""
super(TabAdapter, self).__init__(dataset)
if dataset['format'] not in ['text/tab-separated-values', 'text/plain']:
raise Exception('This is a Tab adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
self.data = pd.DataFrame.from_csv(fn, sep='\t')
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
mappings = {
# "SPSS": SavAdapter,
"text/csv": CsvAdapter,
"text/tab-separated-values": TabAdapter,
"text/plain": TabAdapter,
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ExcelAdapter,
"application/vnd.ms-excel": ExcelAdapter
}
def get_adapter(dataset):
if 'format' in dataset:
mimetype = dataset['format']
elif dataset['filename'].endswith('.tsv') or dataset['filename'].endswith('.tab'):
mimetype = 'text/tab-separated-values'
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
elif dataset['filename'].endswith('.csv'):
mimetype = 'text/csv'
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
else:
csv_fileh = open(dataset['filename'], 'rb')
try:
dialect = csv.Sniffer().sniff(csv_fileh.read(1024))
# Perform various checks on the dialect (e.g., lineseparator,
# delimiter) to make sure it's sane
# Don't forget to reset the read position back to the start of
# the file before reading any entries.
csv_fileh.seek(0)
if dialect.delimiter == ',' or dialect.delimiter == ';':
print "Detected CSV"
mimetype = 'text/csv'
elif dialect.delimiter == '\t':
print "Detected TAB"
mimetype = 'text/tab-separated-values'
else:
# Probably not very wise, but we'll default to the CSV mimetype
# and rely on Panda's ability to guess the separator
print "Fallback to CSV"
mimetype = 'text/csv'
except csv.Error:
# File appears not to be in CSV format; try libmagic (not very useful)
mymagic = magic.Magic(mime=True)
mimetype = mymagic.from_buffer(open(dataset['filename']).read(1024), mime=True)
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
try:
adapterClass = mappings[mimetype]
adapter = adapterClass(dataset)
return adapter
except Exception as e:
traceback.print_exc()
raise(e)
# raise(Exception("No adapter for this file type: '{}'".format(mimetype)))
Fix issue where pd.from_csv takes first column as index
# -*- coding: utf-8 -*-
# TODO: Temporarily Disabled
# import os
# os.environ["DYLD_LIBRARY_PATH"] = "../lib/python2.7/site-packages/savReaderWriter/spssio/macos"
from collections import OrderedDict
# TODO: Temporarily Disabled
# from savReaderWriter import SavReader, SavHeaderReader
import csv
import pandas as pd
import numpy as np
import iribaker
import magic
import os
import traceback
from app import config
class Adapter(object):
def __init__(self, dataset):
self.dataset = dataset
(head, dataset_local_name) = os.path.split(dataset['filename'])
(dataset_name, extension) = os.path.splitext(dataset_local_name)
self.dataset_name = dataset_name
self.dataset_uri = iribaker.to_iri(config.QBR_BASE + dataset_name)
print "Initialized adapter"
return
def get_data(self):
return self.data
def get_reader(self):
return self.reader
def get_header(self):
return self.header
def get_dataset_uri(self):
return self.dataset_uri
def get_dataset_name(self):
return self.dataset_name
def get_metadata(self):
if self.metadata:
return self.metadata
else:
return None
def load_metadata(self):
metadata = OrderedDict()
if 'metadata' in self.dataset:
print "Loading metadata..."
metadata_filename = self.dataset['metadata']
with open(metadata_filename, "r") as metadata_file:
metadata_reader = csv.reader(metadata_file, delimiter=";", quotechar="\"")
for l in metadata_reader:
metadata[l[0].strip()] = l[1].strip()
elif self.header:
print "No metadata... reconstructing from header"
for h in self.header:
metadata[h] = h
else:
print "No metadata or header"
return metadata
def validate_header(self):
"""Checks whether the header in the file and the metadata provided are exactly the same"""
if self.header and self.metadata:
# Find the difference between header and metadata keys
diff = set(self.header).difference(set(self.metadata.keys()))
if len(diff) > 0:
print "Header and metadata do *not* correspond"
# print zip(self.header,self.metadata.keys())
return False
else:
print "Header and metadata are aligned"
return True
else:
print "No header or no metadata present"
return False
def get_values(self):
"""
Return all unique values, and converts it to samples for each column.
"""
# Get all unique values for each column
stats = {}
for col in self.data.columns:
istats = []
counts = self.data[col].value_counts()
# print self.data[col][0]
for i in counts.index:
print col, i
# The URI for the variable value
i_uri = iribaker.to_iri(u"{}/value/{}/{}"
.format(self.dataset_uri, col, i))
# Capture the counts and label in a dictionary for the value
stat = {
'original': {
'uri': i_uri,
'label': i
},
'label': i,
'uri': i_uri,
'count': counts[i]
}
# And append it to the list of variable values
istats.append(stat)
# The URI for the variable
variable_uri = iribaker.to_iri("{}/variable/{}"
.format(self.dataset_uri, col))
# The URI for a (potential) codelist for the variable
codelist_uri = iribaker.to_iri("{}/codelist/{}"
.format(self.dataset_uri, col))
codelist_label = "Codelist generated from the values for '{}'".format(col)
codelist = {
'original': {
'uri': codelist_uri,
'label': codelist_label
},
'uri': codelist_uri,
'label': codelist_label
}
stats[col] = {
'original': {
'uri': variable_uri,
'label': col
},
'uri': variable_uri,
'label': col,
'description': "The variable '{}' as taken "
"from the '{}' dataset."
.format(col, self.dataset_name),
'category': 'identifier',
'type': 'http://purl.org/linked-data/cube#DimensionProperty', # This is the default
'values': istats,
'codelist': codelist
}
return stats
# TODO: Temporarily Disabled
# class SavAdapter(Adapter):
#
# def __init__(self, dataset):
# super(SavAdapter, self).__init__(dataset)
#
# if not dataset['format'] == 'SPSS':
# raise Exception('This is an SPSS adapter, not {}'.format(dataset['format']))
#
# self.filename = dataset['filename']
#
# self.has_header = dataset['header']
#
# self.reader = SavReader(self.filename, ioLocale='en_US.UTF-8')
#
# if self.has_header:
# with SavHeaderReader(self.filename, ioLocale='en_US.UTF-8') as hr:
# self.header = hr.varNames
#
# else :
# self.header = None
#
# self.metadata = self.load_metadata()
#
# print self.validate_header()
# return
#
# def get_examples(self):
# """Returns first 10000 rows, and converts it to samples for each column."""
#
# # Get first 10000 rows
# rows = self.reader.head(10000)
#
# # Assume metadata keys are best (since if no metadata exists, the header
# # will be used to generate it)
# header = self.metadata.keys()
#
# # Convert the rows to a list of dictionaries with keys from the header
# data_dictionaries = [dict(zip(header, [v.strip() if type(v) == str
# else v for v in values ])) for values in rows]
#
# # Convert the list of dictionaries to a dictionary of sets
# data = defaultdict(set)
# for d in data_dictionaries:
# for k, v in d.items():
# data[k].add(v)
#
# json_ready_data = {}
# for k,v in data.items():
# json_ready_data[k] = list(v)[:250]
#
# return json_ready_data
class CsvAdapter(Adapter):
def __init__(self, dataset):
"""Initializes an adapter for reading a CSV dataset"""
super(CsvAdapter, self).__init__(dataset)
if not dataset['format'] == 'text/csv':
raise Exception('This is a CSV adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
self.data = pd.read_csv(fn, index_col=False, parse_dates=True, encoding='utf-8')
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
class ExcelAdapter(Adapter):
def __init__(self, dataset, clio=True):
"""Initializes an adapter for reading an Excel dataset"""
super(ExcelAdapter, self).__init__(dataset)
if not (dataset['format'] == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' or dataset['format'] == 'application/vnd.ms-excel'):
raise Exception('This is an Excel adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
# If this is ClioInfra data, we skip the first two rows of the Worksheet
if clio:
skiprows = [0, 1]
header = 0
else:
skiprows = None
header = 0
self.data = pd.read_excel(fn, skiprows=skiprows, header=header)
if clio:
# Unpivot the table, excluding the first 6 columns (webmapper ids, country, period)
id_vars = [
'Webmapper code',
'Webmapper numeric code',
'ccode',
'country name',
'start year',
'end year'
]
self.data = pd.melt(self.data, id_vars=id_vars, var_name='year', value_name='GDPPC')
self.data = self.data[np.isfinite(self.data['GDPPC'])]
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
class TabAdapter(Adapter):
def __init__(self, dataset):
"""Initializes an adapter for reading a Tab-delimited dataset"""
super(TabAdapter, self).__init__(dataset)
if dataset['format'] not in ['text/tab-separated-values', 'text/plain']:
raise Exception('This is a Tab adapter, not {}'.format(dataset['format']))
self.filename = dataset['filename']
self.has_header = dataset['header']
with open(self.filename, 'r') as fn:
self.data = pd.DataFrame.from_csv(fn, index_col=False, sep='\t')
if self.has_header:
self.header = list(self.data.columns)
elif self.metadata:
self.header = self.metadata.keys()
else:
self.header = None
self.metadata = self.load_metadata()
print self.validate_header()
return
mappings = {
# "SPSS": SavAdapter,
"text/csv": CsvAdapter,
"text/tab-separated-values": TabAdapter,
"text/plain": TabAdapter,
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ExcelAdapter,
"application/vnd.ms-excel": ExcelAdapter
}
def get_adapter(dataset):
if 'format' in dataset:
mimetype = dataset['format']
elif dataset['filename'].endswith('.tsv') or dataset['filename'].endswith('.tab'):
mimetype = 'text/tab-separated-values'
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
elif dataset['filename'].endswith('.csv'):
mimetype = 'text/csv'
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
else:
csv_fileh = open(dataset['filename'], 'rb')
try:
dialect = csv.Sniffer().sniff(csv_fileh.read(1024))
# Perform various checks on the dialect (e.g., lineseparator,
# delimiter) to make sure it's sane
# Don't forget to reset the read position back to the start of
# the file before reading any entries.
csv_fileh.seek(0)
if dialect.delimiter == ',' or dialect.delimiter == ';':
print "Detected CSV"
mimetype = 'text/csv'
elif dialect.delimiter == '\t':
print "Detected TAB"
mimetype = 'text/tab-separated-values'
else:
# Probably not very wise, but we'll default to the CSV mimetype
# and rely on Panda's ability to guess the separator
print "Fallback to CSV"
mimetype = 'text/csv'
except csv.Error:
# File appears not to be in CSV format; try libmagic (not very useful)
mymagic = magic.Magic(mime=True)
mimetype = mymagic.from_buffer(open(dataset['filename']).read(1024), mime=True)
# Make sure we set the guessed mimetype as format for the dataset
dataset['format'] = mimetype
try:
adapterClass = mappings[mimetype]
adapter = adapterClass(dataset)
return adapter
except Exception as e:
traceback.print_exc()
raise(e)
# raise(Exception("No adapter for this file type: '{}'".format(mimetype)))
|
#
# Components
# ==========
#
# %%LICENSE%%
#
import os;
from dep import config, opts, scm
from dep.helpers import *
class BasicComponent:
def __init__(self, name, path, parent):
self.name = name
self.rel_path = path
self.abs_path = os.path.join(parent.abs_path, path) if parent else path
self.parent = parent
self.root = parent.root if parent else self
self.children = []
def __str__(self):
return "{} '{}' at {}".format(self.__class__.__name__, self.name, self.abs_path)
def _read_config(self):
pass
def _write_config(self):
pass
def _refresh_work_dir(self):
pass
def _record_to_parent_config(self):
pass
def _get_child_config_sections(self):
return None
def add_child(self, child):
self.children.append(child)
def _create_children_from_config(self):
self.children = []
for section in self._get_child_config_sections():
new_dep = self.root._find_or_create_component(section=section, parent=self)
def _build_dep_tree_recurse(self, refresh=False):
self._read_config()
self._create_children_from_config()
for child in self.children:
if refresh:
child._refresh_work_dir()
child._build_dep_tree_recurse(refresh)
def read_dep_tree(self):
self._build_dep_tree_recurse()
self.debug_dump("read: ")
def refresh_dep_tree(self):
self._build_dep_tree_recurse(True)
self.debug_dump("refresh: ")
def _record_dep_tree_recurse(self):
for child in self.children:
child._record_dep_tree_recurse()
self._record_to_parent_config()
def record_dep_tree(self):
self._record_dep_tree_recurse()
self.debug_dump("record: ")
def write_dep_tree_config(self):
for child in self.children:
child.write_dep_tree_config()
self._write_config()
def run_command(self, cmd):
status("##===================================================================================================")
status("## {}:", self)
old_quiet = opts.args.quiet
opts.args.quiet = False
run(*cmd, shell=True, cwd=self.abs_path)
opts.args.quiet = old_quiet
def _debug_dump_content(self, prefix):
pass
def debug_dump(self, prefix=""):
if not opts.args.debug or opts.args.quiet:
return
debug("{}--- {} ---", prefix, repr(self))
debug("{}name = {}", prefix, self.name)
debug("{}rel_path = {}", prefix, self.rel_path)
debug("{}abs_path = {}", prefix, self.abs_path)
debug("{}parent = {}", prefix, repr(self.parent))
debug("{}root = {}", prefix, repr(self.root))
self._debug_dump_content(prefix)
debug("{}children[] = {{", prefix)
for i, c in enumerate(self.children):
if i > 0:
debug("{},".format(prefix))
c.debug_dump("{}[{}] ".format(prefix, i))
debug("{}}}", prefix)
class RealComponent(BasicComponent):
def __init__(self, name, path, parent, url=None):
BasicComponent.__init__(self, name, path, parent)
self._parent_section = None
self.config = config.Config(os.path.join(self.abs_path, ".depconfig"))
self.repository = scm.Repository.create(self.abs_path, url)
@property
def parent_section(self):
if self._parent_section is None:
self._rebuild_parent_section()
return self._parent_section
def _rebuild_parent_section(self):
if self.parent is None:
return
if not self.parent.config.has_section("dep", self.name):
return
self._parent_section = self.parent.config["dep.{}".format(self.name)]
def _read_config(self):
if self.config.need_read:
if self.config.exists():
self.config.read()
self._read_repository_state_from_parent_config()
def _write_config(self):
if self.config.need_write:
self.repository.pre_edit(self.config.path)
self.config.write()
self.repository.post_edit(self.config.path)
self.debug_dump("write: ")
def _has_repo(self):
return self.repository is not None and self.repository.vcs != "file"
def _validate_has_repo(self):
if self._has_repo():
return
error("{} does not have a non-file based SCM repository", self)
def _add_to_parent_config(self):
if self.parent_section:
error("Cannot add {} to {}, already exists", self, self.parent)
return
self._parent_section = self.parent.config.add_section("dep", self.name)
self.parent_section["relpath"] = self.rel_path
def _read_repository_state_from_parent_config(self):
if self.parent_section is None:
return
self.repository.read_state_from_config_section(self.parent_section)
def _refresh_work_dir(self):
self.repository.refresh()
def _record_to_parent_config(self):
if not self.parent_section:
return
self.repository.record()
self.repository.write_state_to_config_section(self.parent_section)
def _get_child_config_sections(self):
return self.config.sections_named("dep")
def initialize_new_config(self):
verbose("Initializing {}", self)
validate_file_notexists(self.config.path)
core = self.config.add_section("core")
core["default-dep-dir"] = "dep"
self.config.need_read = False
self._write_config()
self.debug_dump("init post: ")
def add_new_dependency(self, url):
self._validate_has_repo()
self.read_dep_tree()
new_dep = self.root._find_or_create_component(url=url, parent=self)
verbose("Adding dependency {} to {}", new_dep, self)
new_dep._add_to_parent_config()
new_dep.repository.refresh()
new_dep._record_to_parent_config()
self.debug_dump("add post: ")
self.repository.add_ignore(new_dep.rel_path)
self.refresh_dep_tree()
self.record_dep_tree()
self.write_dep_tree_config()
def refresh_dependencies(self):
self._validate_has_repo()
verbose("Refreshing dependencies under {}", self)
self.refresh_dep_tree()
def record_dependencies(self):
self._validate_has_repo()
verbose("Recording dependencies under {}", self)
self.read_dep_tree()
self.record_dep_tree()
self.write_dep_tree_config()
def list_dependencies(self):
self._validate_has_repo()
self.read_dep_tree()
for top in self.root.top_components:
print top.name
print self.root.name
def foreach_dependency(self, cmd):
self._validate_has_repo()
self.read_dep_tree()
for top in self.root.top_components:
top.run_command(cmd)
self.root.run_command(cmd)
def status_dependencies(self):
self._validate_has_repo()
self.read_dep_tree()
status("M Branch Commit Ahead Behind Path")
status("-- --------------- ---------------------------------------- ------ ------ -----------------------")
for top in self.root.top_components:
top.repository.status_brief(top.rel_path)
self.root.repository.status_brief(".")
def _debug_dump_content(self, prefix=""):
debug("{}parent_section = {}", prefix, self.parent_section)
self.config.debug_dump(prefix)
self.repository.debug_dump(prefix)
class RootComponent(RealComponent):
def __init__(self):
if opts.args.local:
error("--local flag not yet supported")
path = find_root_work_dir()
if path is None:
path = os.getcwd()
name = scm.Repository.determine_name_from_url(path)
RealComponent.__init__(self, name, path, None)
self.top_components = []
def _find_top_component(self, name):
return next((c for c in self.top_components if c.name == name), None)
def _create_top_component(self, name, section, url):
parent = self
if section:
path = section["relpath"]
url = section["url"]
else:
dep_dir = parent.config["core"]["default-dep-dir"]
path = os.path.join(dep_dir, name)
return TopComponent(name, path, parent, url)
def _create_link_component(self, name, section, parent, top_component):
if section:
path = section["relpath"]
else:
dep_dir = parent.config["core"]["default-dep-dir"]
path = os.path.join(dep_dir, name)
return LinkComponent(name, path, parent, top_component)
def _find_or_create_component(self, section=None, url=None, parent=None):
if parent is None:
error("Must pass parent to _find_or_create_component")
if (not section and not url) or (section and url):
error("Must pass section or url to _find_or_create_component")
if section:
name = section.subname
else:
name = scm.Repository.determine_name_from_url(url)
top = self._find_top_component(name)
if top is None:
top = self._create_top_component(name, section, url)
if parent is self:
comp = top
else:
comp = self._create_link_component(name, section, parent, top)
parent.add_child(comp)
return comp
def _debug_dump_content(self, prefix=""):
RealComponent._debug_dump_content(self, prefix)
debug("{}top_components[] = {{", prefix)
for i, c in enumerate(self.top_components):
if i > 0:
debug("{},".format(prefix))
c.debug_dump("{}[{}] ".format(prefix, i))
debug("{}}}", prefix)
class TopComponent(RealComponent):
def __init__(self, name, path, parent, url):
RealComponent.__init__(self, name, path, parent, url)
parent.root.top_components.insert(0, self)
class LinkComponent(BasicComponent):
def __init__(self, name, path, parent, top_component):
BasicComponent.__init__(self, name, path, parent)
self.top_component = top_component
def _read_config(self):
self.top_component._read_config()
def _write_config(self):
self.top_component._write_config()
def _refresh_work_dir(self):
self.top_component._refresh_work_dir()
if not os.path.isdir(self.abs_path):
status("Linking {} to {}", self.top_component, self)
make_relative_symlink(self.top_component.abs_path, self.abs_path)
def _record_to_parent_config(self):
self.top_component._record_to_parent_config()
def _get_child_config_sections(self):
return self.top_component._get_child_config_sections()
def _debug_dump_content(self, prefix=""):
BasicComponent._debug_dump_content(self, prefix)
debug("{}top_component = {}", prefix, repr(self.top_component))
Ensure that top_components is build ordered; move link component to front as added.
#
# Components
# ==========
#
# %%LICENSE%%
#
import os;
from dep import config, opts, scm
from dep.helpers import *
class BasicComponent:
def __init__(self, name, path, parent):
self.name = name
self.rel_path = path
self.abs_path = os.path.join(parent.abs_path, path) if parent else path
self.parent = parent
self.root = parent.root if parent else self
self.children = []
def __str__(self):
return "{} '{}' at {}".format(self.__class__.__name__, self.name, self.abs_path)
def _read_config(self):
pass
def _write_config(self):
pass
def _refresh_work_dir(self):
pass
def _record_to_parent_config(self):
pass
def _get_child_config_sections(self):
return None
def add_child(self, child):
self.children.append(child)
def _create_children_from_config(self):
self.children = []
for section in self._get_child_config_sections():
new_dep = self.root._find_or_create_component(section=section, parent=self)
def _build_dep_tree_recurse(self, refresh=False):
self._read_config()
self._create_children_from_config()
for child in self.children:
if refresh:
child._refresh_work_dir()
child._build_dep_tree_recurse(refresh)
def read_dep_tree(self):
self._build_dep_tree_recurse()
self.debug_dump("read: ")
def refresh_dep_tree(self):
self._build_dep_tree_recurse(True)
self.debug_dump("refresh: ")
def _record_dep_tree_recurse(self):
for child in self.children:
child._record_dep_tree_recurse()
self._record_to_parent_config()
def record_dep_tree(self):
self._record_dep_tree_recurse()
self.debug_dump("record: ")
def write_dep_tree_config(self):
for child in self.children:
child.write_dep_tree_config()
self._write_config()
def run_command(self, cmd):
status("##===================================================================================================")
status("## {}:", self)
old_quiet = opts.args.quiet
opts.args.quiet = False
run(*cmd, shell=True, cwd=self.abs_path)
opts.args.quiet = old_quiet
def _debug_dump_content(self, prefix):
pass
def debug_dump(self, prefix=""):
if not opts.args.debug or opts.args.quiet:
return
debug("{}--- {} ---", prefix, repr(self))
debug("{}name = {}", prefix, self.name)
debug("{}rel_path = {}", prefix, self.rel_path)
debug("{}abs_path = {}", prefix, self.abs_path)
debug("{}parent = {}", prefix, repr(self.parent))
debug("{}root = {}", prefix, repr(self.root))
self._debug_dump_content(prefix)
debug("{}children[] = {{", prefix)
for i, c in enumerate(self.children):
if i > 0:
debug("{},".format(prefix))
c.debug_dump("{}[{}] ".format(prefix, i))
debug("{}}}", prefix)
class RealComponent(BasicComponent):
def __init__(self, name, path, parent, url=None):
BasicComponent.__init__(self, name, path, parent)
self._parent_section = None
self.config = config.Config(os.path.join(self.abs_path, ".depconfig"))
self.repository = scm.Repository.create(self.abs_path, url)
@property
def parent_section(self):
if self._parent_section is None:
self._rebuild_parent_section()
return self._parent_section
def _rebuild_parent_section(self):
if self.parent is None:
return
if not self.parent.config.has_section("dep", self.name):
return
self._parent_section = self.parent.config["dep.{}".format(self.name)]
def _read_config(self):
if self.config.need_read:
if self.config.exists():
self.config.read()
self._read_repository_state_from_parent_config()
def _write_config(self):
if self.config.need_write:
self.repository.pre_edit(self.config.path)
self.config.write()
self.repository.post_edit(self.config.path)
self.debug_dump("write: ")
def _has_repo(self):
return self.repository is not None and self.repository.vcs != "file"
def _validate_has_repo(self):
if self._has_repo():
return
error("{} does not have a non-file based SCM repository", self)
def _add_to_parent_config(self):
if self.parent_section:
error("Cannot add {} to {}, already exists", self, self.parent)
return
self._parent_section = self.parent.config.add_section("dep", self.name)
self.parent_section["relpath"] = self.rel_path
def _read_repository_state_from_parent_config(self):
if self.parent_section is None:
return
self.repository.read_state_from_config_section(self.parent_section)
def _refresh_work_dir(self):
self.repository.refresh()
def _record_to_parent_config(self):
if not self.parent_section:
return
self.repository.record()
self.repository.write_state_to_config_section(self.parent_section)
def _get_child_config_sections(self):
return self.config.sections_named("dep")
def initialize_new_config(self):
verbose("Initializing {}", self)
validate_file_notexists(self.config.path)
core = self.config.add_section("core")
core["default-dep-dir"] = "dep"
self.config.need_read = False
self._write_config()
self.debug_dump("init post: ")
def add_new_dependency(self, url):
self._validate_has_repo()
self.read_dep_tree()
new_dep = self.root._find_or_create_component(url=url, parent=self)
verbose("Adding dependency {} to {}", new_dep, self)
new_dep._add_to_parent_config()
new_dep.repository.refresh()
new_dep._record_to_parent_config()
self.debug_dump("add post: ")
self.repository.add_ignore(new_dep.rel_path)
self.refresh_dep_tree()
self.record_dep_tree()
self.write_dep_tree_config()
def refresh_dependencies(self):
self._validate_has_repo()
verbose("Refreshing dependencies under {}", self)
self.refresh_dep_tree()
def record_dependencies(self):
self._validate_has_repo()
verbose("Recording dependencies under {}", self)
self.read_dep_tree()
self.record_dep_tree()
self.write_dep_tree_config()
def list_dependencies(self):
self._validate_has_repo()
self.read_dep_tree()
for top in self.root.top_components:
print top.name
print self.root.name
def foreach_dependency(self, cmd):
self._validate_has_repo()
self.read_dep_tree()
for top in self.root.top_components:
top.run_command(cmd)
self.root.run_command(cmd)
def status_dependencies(self):
self._validate_has_repo()
self.read_dep_tree()
status("M Branch Commit Ahead Behind Path")
status("-- --------------- ---------------------------------------- ------ ------ -----------------------")
for top in self.root.top_components:
top.repository.status_brief(top.rel_path)
self.root.repository.status_brief(".")
def _debug_dump_content(self, prefix=""):
debug("{}parent_section = {}", prefix, self.parent_section)
self.config.debug_dump(prefix)
self.repository.debug_dump(prefix)
class RootComponent(RealComponent):
def __init__(self):
if opts.args.local:
error("--local flag not yet supported")
path = find_root_work_dir()
if path is None:
path = os.getcwd()
name = scm.Repository.determine_name_from_url(path)
RealComponent.__init__(self, name, path, None)
self.top_components = []
def _find_top_component(self, name):
return next((c for c in self.top_components if c.name == name), None)
def _create_top_component(self, name, section, url):
parent = self
if section:
path = section["relpath"]
url = section["url"]
else:
dep_dir = parent.config["core"]["default-dep-dir"]
path = os.path.join(dep_dir, name)
return TopComponent(name, path, parent, url)
def _create_link_component(self, name, section, parent, top_component):
if section:
path = section["relpath"]
else:
dep_dir = parent.config["core"]["default-dep-dir"]
path = os.path.join(dep_dir, name)
return LinkComponent(name, path, parent, top_component)
def _find_or_create_component(self, section=None, url=None, parent=None):
if parent is None:
error("Must pass parent to _find_or_create_component")
if (not section and not url) or (section and url):
error("Must pass section or url to _find_or_create_component")
if section:
name = section.subname
else:
name = scm.Repository.determine_name_from_url(url)
top = self._find_top_component(name)
if top is None:
top = self._create_top_component(name, section, url)
if parent is self:
comp = top
else:
comp = self._create_link_component(name, section, parent, top)
parent.add_child(comp)
return comp
def _debug_dump_content(self, prefix=""):
RealComponent._debug_dump_content(self, prefix)
debug("{}top_components[] = {{", prefix)
for i, c in enumerate(self.top_components):
if i > 0:
debug("{},".format(prefix))
c.debug_dump("{}[{}] ".format(prefix, i))
debug("{}}}", prefix)
class TopComponent(RealComponent):
def __init__(self, name, path, parent, url):
RealComponent.__init__(self, name, path, parent, url)
parent.root.top_components.insert(0, self)
class LinkComponent(BasicComponent):
def __init__(self, name, path, parent, top_component):
BasicComponent.__init__(self, name, path, parent)
self.top_component = top_component
self._move_top_component_to_front()
def _move_top_component_to_front(self):
self.root.top_components.remove(self.top_component)
self.root.top_components.insert(0, self.top_component)
def _read_config(self):
self.top_component._read_config()
def _write_config(self):
self.top_component._write_config()
def _refresh_work_dir(self):
self.top_component._refresh_work_dir()
if not os.path.isdir(self.abs_path):
status("Linking {} to {}", self.top_component, self)
make_relative_symlink(self.top_component.abs_path, self.abs_path)
def _record_to_parent_config(self):
self.top_component._record_to_parent_config()
def _get_child_config_sections(self):
return self.top_component._get_child_config_sections()
def _debug_dump_content(self, prefix=""):
BasicComponent._debug_dump_content(self, prefix)
debug("{}top_component = {}", prefix, repr(self.top_component))
|
#!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, struct
SHT_STRTAB = 3
DT_NEEDED = 1
DT_RPATH = 15
DT_STRTAB = 5
DT_SONAME = 14
def init_datasizes(self, ptrsize):
self.Half = 'h'
self.HalfSize = 2
self.Word = 'I'
self.WordSize = 4
self.Sword = 'i'
self.SwordSize = 4
if ptrsize == 64:
self.Addr = 'Q'
self.AddrSize = 8
self.Off = 'Q'
self.OffSize = 8
self.XWord = 'Q'
self.XWordSize = 8
self.Sxword = 'q'
self.SxwordSize = 8
else:
self.Addr = 'I'
self.AddrSize = 4
self.Off = 'I'
self.OffSize = 4
class DynamicEntry():
def __init__(self, ifile, ptrsize):
init_datasizes(self, ptrsize)
if ptrsize == 64:
self.d_tag = struct.unpack(self.Sxword, ifile.read(self.SxwordSize))[0];
self.val = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0];
else:
self.d_tag = struct.unpack(self.Sword, ifile.read(self.SwordSize))[0]
self.val = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class SectionHeader():
def __init__(self, ifile, ptrsize):
init_datasizes(self, ptrsize)
if ptrsize == 64:
is_64 = True
else:
is_64 = False
#Elf64_Word
self.sh_name = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_type = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_flags = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_flags = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Addr
self.sh_addr = struct.unpack(self.Addr, ifile.read(self.AddrSize))[0];
#Elf64_Off
self.sh_offset = struct.unpack(self.Off, ifile.read(self.OffSize))[0]
#Elf64_Xword
if is_64:
self.sh_size = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_size = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Word
self.sh_link = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_info = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Xword
if is_64:
self.sh_addralign = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_addralign = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_entsize = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_entsize = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class Elf():
def __init__(self, bfile):
self.bfile = bfile
self.bf = open(bfile, 'r+b')
self.ptrsize = self.detect_elf_type()
init_datasizes(self, self.ptrsize)
self.parse_header()
self.parse_sections()
self.parse_dynamic()
def detect_elf_type(self):
data = self.bf.read(5)
if data[1:4] != b'ELF':
print('File "%s" is not an ELF file.' % self.bfile)
sys.exit(0)
if data[4] == 1:
return 32
if data[4] == 2:
return 64
print('File "%s" has unknown ELF class.' % self.bfile)
sys.exit(1)
def parse_header(self):
self.bf.seek(0)
self.e_ident = struct.unpack('16s', self.bf.read(16))[0]
self.e_type = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_machine = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_version = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_entry = struct.unpack(self.Addr, self.bf.read(self.AddrSize))[0]
self.e_phoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_shoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_flags = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_ehsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shstrndx = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
def parse_sections(self):
self.bf.seek(self.e_shoff)
self.sections = []
for i in range(self.e_shnum):
self.sections.append(SectionHeader(self.bf, self.ptrsize))
def read_str(self):
arr = []
x = self.bf.read(1)
while x != b'\0':
arr.append(x)
x = self.bf.read(1)
if x == b'':
raise RuntimeError('Tried to read past the end of the file')
return b''.join(arr)
def find_section(self, target_name):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
if name == target_name:
return i
def parse_dynamic(self):
sec = self.find_section(b'.dynamic')
self.dynamic = []
self.bf.seek(sec.sh_offset)
while True:
e = DynamicEntry(self.bf, self.ptrsize)
self.dynamic.append(e)
if e.d_tag == 0:
break
def print_section_names(self):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
print(name.decode())
def print_soname(self):
soname = None
strtab = None
for i in self.dynamic:
if i.d_tag == DT_SONAME:
soname = i
if i.d_tag == DT_STRTAB:
strtab = i
self.bf.seek(strtab.val + soname.val)
print(self.read_str())
def get_rpath_offset(self):
sec = self.find_section(b'.dynstr')
for i in self.dynamic:
if i.d_tag == DT_RPATH:
return sec.sh_offset + i.val
return None
def print_rpath(self):
offset = self.get_rpath_offset()
if offset is None:
print("This file does not have an rpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_deps(self):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
print(name)
def fix_deps(self, prefix):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
if name.startswith(prefix):
basename = name.split(b'/')[-1]
padding = b'\0'*(len(name) - len(basename))
newname = basename + padding
assert(len(newname) == len(name))
self.bf.seek(offset)
self.bf.write(newname)
def fix_rpath(self, new_rpath):
rp_off = self.get_rpath_offset()
if rp_off is None:
print('File does not have rpath. It should be a fully static executable.')
return
self.bf.seek(rp_off)
old_rpath = self.read_str()
if len(old_rpath) < len(new_rpath):
print("New rpath must not be longer than the old one.")
self.bf.seek(rp_off)
self.bf.write(new_rpath)
self.bf.write(b'\0'*(len(old_rpath) - len(new_rpath) + 1))
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('This application resets target rpath.')
print('Don\'t run this unless you know what you are doing.')
print('%s: <binary file> <prefix>' % sys.argv[0])
exit(1)
e = Elf(sys.argv[1])
if len(sys.argv) == 2:
e.print_rpath()
else:
new_rpath = sys.argv[2]
e.fix_rpath(new_rpath.encode('utf8'))
#e.fix_deps(prefix.encode())
Detect endianness of elf files so cross compiling to platforms with different endianness works.
#!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, struct
SHT_STRTAB = 3
DT_NEEDED = 1
DT_RPATH = 15
DT_STRTAB = 5
DT_SONAME = 14
def init_datasizes(self, ptrsize, is_le):
if is_le:
p = '<'
else:
p = '>'
self.Half = p+'h'
self.HalfSize = 2
self.Word = p+'I'
self.WordSize = 4
self.Sword = p+'i'
self.SwordSize = 4
if ptrsize == 64:
self.Addr = p+'Q'
self.AddrSize = 8
self.Off = p+'Q'
self.OffSize = 8
self.XWord = p+'Q'
self.XWordSize = 8
self.Sxword = p+'q'
self.SxwordSize = 8
else:
self.Addr = p+'I'
self.AddrSize = 4
self.Off = p+'I'
self.OffSize = 4
class DynamicEntry():
def __init__(self, ifile, ptrsize, is_le):
init_datasizes(self, ptrsize, is_le)
if ptrsize == 64:
self.d_tag = struct.unpack(self.Sxword, ifile.read(self.SxwordSize))[0];
self.val = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0];
else:
self.d_tag = struct.unpack(self.Sword, ifile.read(self.SwordSize))[0]
self.val = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class SectionHeader():
def __init__(self, ifile, ptrsize, is_le):
init_datasizes(self, ptrsize, is_le)
if ptrsize == 64:
is_64 = True
else:
is_64 = False
#Elf64_Word
self.sh_name = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_type = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_flags = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_flags = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Addr
self.sh_addr = struct.unpack(self.Addr, ifile.read(self.AddrSize))[0];
#Elf64_Off
self.sh_offset = struct.unpack(self.Off, ifile.read(self.OffSize))[0]
#Elf64_Xword
if is_64:
self.sh_size = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_size = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Word
self.sh_link = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_info = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Xword
if is_64:
self.sh_addralign = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_addralign = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_entsize = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_entsize = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class Elf():
def __init__(self, bfile):
self.bfile = bfile
self.bf = open(bfile, 'r+b')
(self.ptrsize, self.is_le) = self.detect_elf_type()
init_datasizes(self, self.ptrsize, self.is_le)
self.parse_header()
self.parse_sections()
self.parse_dynamic()
def detect_elf_type(self):
data = self.bf.read(6)
if data[1:4] != b'ELF':
print('File "%s" is not an ELF file.' % self.bfile)
sys.exit(1)
if data[4] == 1:
ptrsize = 32
elif data[4] == 2:
ptrsize = 64
else:
print('File "%s" has unknown ELF class.' % self.bfile)
sys.exit(1)
if data[5] == 1:
is_le = True
elif data[5] == 2:
is_le = False
else:
print('File "%s" has unknown ELF endianness.' % self.bfile)
sys.exit(1)
return (ptrsize, is_le)
def parse_header(self):
self.bf.seek(0)
self.e_ident = struct.unpack('16s', self.bf.read(16))[0]
self.e_type = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_machine = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_version = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_entry = struct.unpack(self.Addr, self.bf.read(self.AddrSize))[0]
self.e_phoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_shoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_flags = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_ehsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shstrndx = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
def parse_sections(self):
self.bf.seek(self.e_shoff)
self.sections = []
for i in range(self.e_shnum):
self.sections.append(SectionHeader(self.bf, self.ptrsize, self.is_le))
def read_str(self):
arr = []
x = self.bf.read(1)
while x != b'\0':
arr.append(x)
x = self.bf.read(1)
if x == b'':
raise RuntimeError('Tried to read past the end of the file')
return b''.join(arr)
def find_section(self, target_name):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
if name == target_name:
return i
def parse_dynamic(self):
sec = self.find_section(b'.dynamic')
self.dynamic = []
self.bf.seek(sec.sh_offset)
while True:
e = DynamicEntry(self.bf, self.ptrsize, self.is_le)
self.dynamic.append(e)
if e.d_tag == 0:
break
def print_section_names(self):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
print(name.decode())
def print_soname(self):
soname = None
strtab = None
for i in self.dynamic:
if i.d_tag == DT_SONAME:
soname = i
if i.d_tag == DT_STRTAB:
strtab = i
self.bf.seek(strtab.val + soname.val)
print(self.read_str())
def get_rpath_offset(self):
sec = self.find_section(b'.dynstr')
for i in self.dynamic:
if i.d_tag == DT_RPATH:
return sec.sh_offset + i.val
return None
def print_rpath(self):
offset = self.get_rpath_offset()
if offset is None:
print("This file does not have an rpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_deps(self):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
print(name)
def fix_deps(self, prefix):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
if name.startswith(prefix):
basename = name.split(b'/')[-1]
padding = b'\0'*(len(name) - len(basename))
newname = basename + padding
assert(len(newname) == len(name))
self.bf.seek(offset)
self.bf.write(newname)
def fix_rpath(self, new_rpath):
rp_off = self.get_rpath_offset()
if rp_off is None:
print('File does not have rpath. It should be a fully static executable.')
return
self.bf.seek(rp_off)
old_rpath = self.read_str()
if len(old_rpath) < len(new_rpath):
print("New rpath must not be longer than the old one.")
self.bf.seek(rp_off)
self.bf.write(new_rpath)
self.bf.write(b'\0'*(len(old_rpath) - len(new_rpath) + 1))
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('This application resets target rpath.')
print('Don\'t run this unless you know what you are doing.')
print('%s: <binary file> <prefix>' % sys.argv[0])
exit(1)
e = Elf(sys.argv[1])
if len(sys.argv) == 2:
e.print_rpath()
else:
new_rpath = sys.argv[2]
e.fix_rpath(new_rpath.encode('utf8'))
#e.fix_deps(prefix.encode())
|
import bisect
import http
import logging
import ujson
from collections import defaultdict, namedtuple
from .constants import _CONTENT_ENC, _COLON, _WILDCARD, _ENC, _SLASH
from .request import Request
from .response import Response
from .router import Router
logger = logging.getLogger(__name__)
class App(Router):
def __init__(self):
self.verbs = defaultdict(lambda: [])
def __call__(self, env, start_response):
try:
fn, path_params = self.dispatch(env['REQUEST_METHOD'], env['SCRIPT_NAME'] + env['PATH_INFO'])
if fn is not None:
req = Request(env, path_params)
result = fn(req)
headers = req.response.headers
if isinstance(result, tuple):
code, content = result[0], [bytes(ujson.dumps(result[1]), _CONTENT_ENC)]
else:
code, content = http.HTTPStatus.OK, [bytes(ujson.dumps(result), _CONTENT_ENC)]
else:
code, content, headers = Response.NOT_FOUND
except Exception:
logger.exception('Internal Server Error')
code, content, headers = Response.INTERNAL_ERR
start_response(f"{code.value} {code.phrase}", headers)
return content
def dispatch(self, verb, url):
routes = self.verbs[verb]
routes_len = len(routes)
route_index = 0
route_char_index = 0
url_char_index = 0
url_bytes = bytes(url, _ENC)
param_stack = []
item = None
while True:
try:
if url_bytes[url_char_index] == routes[route_index][0][route_char_index]:
route_char_index += 1
url_char_index += 1
elif routes[route_index][0][route_char_index] == _WILDCARD:
item = (bytearray(), url_char_index, route_char_index)
route_char_index += 1
while url_bytes[url_char_index] != _SLASH:
item[0].append(url_bytes[url_char_index])
url_char_index += 1
param_stack.append(item)
item = None
elif url_bytes[url_char_index] > routes[route_index][0][route_char_index]:
route_index += 1
if len(param_stack) > 0:
_, url_char_index, route_char_index = param_stack.pop()
else:
return None, "Smaller than current (but we've already looked through all smaller than current)"
except IndexError:
if route_index >= routes_len:
return None, "End of routes"
if item is not None:
param_stack.append(item)
if len(routes[route_index][0]) == route_char_index and len(url_bytes) == url_char_index:
return routes[route_index][1], (routes[route_index][2], param_stack)
else:
route_index += 1
if len(param_stack) > 0:
_, url_char_index, route_char_index = param_stack.pop()
@staticmethod
def parse_url(url):
res = bytearray()
b_url = bytes(url, _ENC)
params = []
l = len(b_url)
i = 0
while i < l:
if b_url[i] == _COLON and b_url[i - 1] == _SLASH:
params.append("")
i += 1
while i < l and b_url[i] != _SLASH:
params[-1] += url[i]
i += 1
res.append(_WILDCARD)
else:
res.append(b_url[i])
i += 1
return res, params
@staticmethod
def normalize_verb(verb: str):
return verb.upper()
def _route(self, verbs, url, handler):
url, params = self.parse_url(url)
params_type = namedtuple('PathParams', params)
for verb in verbs:
bisect.insort(self.verbs[self.normalize_verb(verb)], (url, handler, params_type))
def route(self, verbs, url):
def decorator(fn):
self._route(verbs, url, fn)
return fn
return decorator
Imporove perf
import bisect
import http
import logging
import ujson
from collections import defaultdict, namedtuple
from .constants import _CONTENT_ENC, _COLON, _WILDCARD, _ENC, _SLASH
from .request import Request
from .response import Response
from .router import Router
logger = logging.getLogger(__name__)
class App(Router):
def __init__(self):
self.verbs = defaultdict(lambda: [])
def __call__(self, env, start_response):
try:
fn, path_params = self.dispatch(env['REQUEST_METHOD'], env['SCRIPT_NAME'] + env['PATH_INFO'])
if fn is not None:
req = Request(env, path_params)
result = fn(req)
headers = req.response.headers
if isinstance(result, tuple):
code, content = result[0], [bytes(ujson.dumps(result[1]), _CONTENT_ENC)]
else:
code, content = http.HTTPStatus.OK, [bytes(ujson.dumps(result), _CONTENT_ENC)]
else:
code, content, headers = Response.NOT_FOUND
except Exception:
logger.exception('Internal Server Error')
code, content, headers = Response.INTERNAL_ERR
start_response(f"{code.value} {code.phrase}", headers)
return content
def dispatch(self, verb, url):
routes = self.verbs[verb]
routes_len = len(routes)
route_index = 0
route_char_index = 0
url_char_index = 0
url_bytes = bytes(url, _ENC)
url_bytes_len = len(url_bytes)
param_stack = []
while True:
try:
if url_bytes[url_char_index] == routes[route_index][0][route_char_index]:
route_char_index += 1
url_char_index += 1
elif routes[route_index][0][route_char_index] == _WILDCARD:
item = (bytearray(), url_char_index, route_char_index)
param_stack.append(item)
route_char_index += 1
while url_bytes[url_char_index] != _SLASH:
item[0].append(url_bytes[url_char_index])
url_char_index += 1
elif url_bytes[url_char_index] > routes[route_index][0][route_char_index]:
route_index += 1
if len(param_stack) > 0:
_, url_char_index, route_char_index = param_stack.pop()
else:
return None, "Smaller than current (but we've already looked through all smaller than current)"
except IndexError:
if route_index >= routes_len:
return None, "End of routes"
if url_bytes_len == url_char_index and len(routes[route_index][0]) == route_char_index:
return routes[route_index][1], (routes[route_index][2], param_stack)
else:
route_index += 1
if len(param_stack) > 0:
_, url_char_index, route_char_index = param_stack.pop()
@staticmethod
def parse_url(url):
res = bytearray()
b_url = bytes(url, _ENC)
params = []
l = len(b_url)
i = 0
while i < l:
if b_url[i] == _COLON and b_url[i - 1] == _SLASH:
params.append("")
i += 1
while i < l and b_url[i] != _SLASH:
params[-1] += url[i]
i += 1
res.append(_WILDCARD)
else:
res.append(b_url[i])
i += 1
return res, params
@staticmethod
def normalize_verb(verb: str):
return verb.upper()
def _route(self, verbs, url, handler):
url, params = self.parse_url(url)
params_type = namedtuple('PathParams', params)
for verb in verbs:
bisect.insort(self.verbs[self.normalize_verb(verb)], (url, handler, params_type))
def route(self, verbs, url):
def decorator(fn):
self._route(verbs, url, fn)
return fn
return decorator
|
import os
import pickle
import csv
import logging
import sys
import coloredlogs
import matplotlib.pyplot as plt
import numpy as np
from area_predictor import AreaPredictor
from image import Image
from utils import Utils
plt.switch_backend('Qt5Agg')
csv.field_size_limit(sys.maxsize)
DATA_DIRECTORY = './data/'
DATA_THREEBAND = DATA_DIRECTORY + 'three_band/'
DATA_GRIDS_FILE = DATA_DIRECTORY + 'grid_sizes.csv'
DATA_AREAS_WKT = DATA_DIRECTORY + 'train_wkt_v4.csv'
def get_logger():
logger = logging.getLogger('dstl')
# TODO: Change field colors
coloredlogs.install(level='DEBUG')
handler = logging.StreamHandler()
# TODO: Add fixed width across module+funcname+lineno
# log_format = '%(asctime)s %(relativeCreated)d %(module)s.%(funcName)s:%(lineno)d %(levelname)-8s %(message)s'
log_format = '%(asctime)s %(module)s.%(funcName)s:%(lineno)d %(levelname)-8s %(message)s'
formatter = coloredlogs.ColoredFormatter(log_format)
handler.setFormatter(formatter)
logger.propagate = False
logger.handlers = []
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
def load_grid_sizes():
log.info('Retrieving grid sizes')
with open(DATA_GRIDS_FILE) as grid_file:
reader = csv.reader(grid_file)
next(reader, None) # skip header
result = {}
for i, x, y in reader:
# store x_max/y_min for each image
result[i] = (float(x), float(y))
return result
def scale_percentile(matrix):
w, h, d = matrix.shape
matrix = np.reshape(matrix, [w * h, d]).astype(np.float64)
# Get 2nd and 98th percentile
mins = np.percentile(matrix, 1, axis=0)
maxs = np.percentile(matrix, 99, axis=0) - mins
matrix = (matrix - mins[None, :]) / maxs[None, :]
matrix = np.reshape(matrix, [w, h, d])
matrix = matrix.clip(0, 1)
return matrix
def compare_area_class(area_class, alpha=0.5):
plt.imshow(area_classes.classes[area_class].mask_image, alpha=alpha)
plt.imshow(area_classes.classes[area_class].predicted_mask_image, alpha=alpha)
plt.show()
def evaluate_jaccard(area_class_index):
area_class = area_classes.classes[area_class_index]
intersection = area_class.predicted_submission_polygons.intersection(area_class.areas).area
union = area_class.predicted_submission_polygons.union(area_class.areas).area
print('Jaccard for area class {}: {:.3g} ({:.3g}/{:.3g})'.format( \
area_class_index, 0 if union == 0 else intersection / union, intersection, union))
def load_area_data():
with open(DATA_AREAS_WKT) as csv_file:
data = {}
reader = csv.reader(csv_file)
# skip header
next(reader, None)
for image_id, area_class, areas in reader:
if not image_id in data:
data[image_id] = {}
data[image_id][area_class] = areas
return data
def visualize_prediction(predictions, predictor, image):
# visualize prediction
for area_class, prediction in predictions.items():
binary_prediction = predictor.prediction_to_binary_prediction(prediction)
prediction_polygons = predictor.prediction_mask_to_polygons(binary_prediction)
predictions[area_class] = prediction_polygons
image.area_classes.add_predictions(predictions)
# plot training mask
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.image_mask, alpha=0.5)
# plot predicted mask
plt.figure()
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.prediction_image_mask, alpha=0.5)
plt.show()
def create_and_train_predictor():
LABELS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
predictor = AreaPredictor(LABELS)
for image_id in area_data.keys():
image = Image(image_id, grid_sizes[image_id], DATA_DIRECTORY)
image.load_image()
image.load_areas(area_data[image_id])
# train and predict same image
predictor.train(image)
return predictor
def load_trained_predictor(file_name='predictors.p'):
with open(file_name, 'rb') as predictors_file:
predictor_data = pickle.load(predictors_file)
predictor = AreaPredictor(predictors=predictor_data)
return predictor
def create_submission(entries, file_name='submission.csv'):
with open(file_name, 'w') as submission_file:
writer = csv.writer(submission_file)
writer.writerow(['ImageId', 'ClassType', 'MultipolygonWKT'])
log.warning('Creating submission in {} with {} entries...'.format(file_name, len(entries)))
for entry in entries:
writer.writerow(entry)
log.info('... done!')
# def main():
# setup
log = get_logger()
grid_sizes = load_grid_sizes()
area_data = load_area_data()
predictor = load_trained_predictor()
# process all images
entries = []
image_ids = []
training_images_to_predict = []
with open('./correct-order', 'r') as fh:
reader = csv.reader(fh)
for row in reader:
image_ids.append(row[0])
for i, image_id in enumerate(image_ids):
log.debug('Processing image {}/{} ({})'.format(i + 1, len(image_ids), image_id))
image = Image(image_id, grid_sizes[image_id], DATA_DIRECTORY)
image.load_image()
areas = area_data[image_id] if image_id in area_data else None
image.load_areas(areas)
predictions = predictor.predict(image)
image.area_classes.add_predictions(predictions)
for area_id, area_class in image.area_classes.classes.items():
entries.append((image_id, area_id, area_class.predicted_submission_polygons))
create_submission(entries)
"""
# process single image
IMAGE_ID = '6120_2_2'
image = Image(IMAGE_ID, grid_sizes[IMAGE_ID], DATA_DIRECTORY)
image.load_image()
image.load_areas(area_data[IMAGE_ID])
predictions = predictor.predict(image)
image.area_classes.add_predictions(predictions)
create_submission([image])
"""
"""
# train and predict same image
predictor.train(image)
predictions = predictor.predict(image)
# visualize prediction
for area_class, prediction in predictions.items():
binary_prediction = predictor.prediction_to_binary_prediction(prediction)
prediction_polygons = predictor.prediction_mask_to_polygons(binary_prediction)
predictions[area_class] = prediction_polygons
image.area_classes.add_predictions(predictions)
# plot training mask
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.image_mask, alpha=0.5)
# plot predicted mask
plt.figure()
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.prediction_image_mask, alpha=0.5)
plt.show()
"""
# if __name__ == '__main__':
# main()
logging.shutdown()
Add method to pickle the entries
import os
import pickle
import csv
import logging
import sys
import coloredlogs
import matplotlib.pyplot as plt
import numpy as np
from area_predictor import AreaPredictor
from image import Image
from utils import Utils
plt.switch_backend('Qt5Agg')
csv.field_size_limit(sys.maxsize)
DATA_DIRECTORY = './data/'
DATA_THREEBAND = DATA_DIRECTORY + 'three_band/'
DATA_GRIDS_FILE = DATA_DIRECTORY + 'grid_sizes.csv'
DATA_AREAS_WKT = DATA_DIRECTORY + 'train_wkt_v4.csv'
def get_logger():
logger = logging.getLogger('dstl')
# TODO: Change field colors
coloredlogs.install(level='DEBUG')
handler = logging.StreamHandler()
# TODO: Add fixed width across module+funcname+lineno
# log_format = '%(asctime)s %(relativeCreated)d %(module)s.%(funcName)s:%(lineno)d %(levelname)-8s %(message)s'
log_format = '%(asctime)s %(module)s.%(funcName)s:%(lineno)d %(levelname)-8s %(message)s'
formatter = coloredlogs.ColoredFormatter(log_format)
handler.setFormatter(formatter)
logger.propagate = False
logger.handlers = []
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
def load_grid_sizes():
log.info('Retrieving grid sizes')
with open(DATA_GRIDS_FILE) as grid_file:
reader = csv.reader(grid_file)
next(reader, None) # skip header
result = {}
for i, x, y in reader:
# store x_max/y_min for each image
result[i] = (float(x), float(y))
return result
def scale_percentile(matrix):
w, h, d = matrix.shape
matrix = np.reshape(matrix, [w * h, d]).astype(np.float64)
# Get 2nd and 98th percentile
mins = np.percentile(matrix, 1, axis=0)
maxs = np.percentile(matrix, 99, axis=0) - mins
matrix = (matrix - mins[None, :]) / maxs[None, :]
matrix = np.reshape(matrix, [w, h, d])
matrix = matrix.clip(0, 1)
return matrix
def compare_area_class(area_class, alpha=0.5):
plt.imshow(area_classes.classes[area_class].mask_image, alpha=alpha)
plt.imshow(area_classes.classes[area_class].predicted_mask_image, alpha=alpha)
plt.show()
def evaluate_jaccard(area_class_index):
area_class = area_classes.classes[area_class_index]
intersection = area_class.predicted_submission_polygons.intersection(area_class.areas).area
union = area_class.predicted_submission_polygons.union(area_class.areas).area
print('Jaccard for area class {}: {:.3g} ({:.3g}/{:.3g})'.format( \
area_class_index, 0 if union == 0 else intersection / union, intersection, union))
def load_area_data():
with open(DATA_AREAS_WKT) as csv_file:
data = {}
reader = csv.reader(csv_file)
# skip header
next(reader, None)
for image_id, area_class, areas in reader:
if not image_id in data:
data[image_id] = {}
data[image_id][area_class] = areas
return data
def visualize_prediction(predictions, predictor, image):
# visualize prediction
for area_class, prediction in predictions.items():
binary_prediction = predictor.prediction_to_binary_prediction(prediction)
prediction_polygons = predictor.prediction_mask_to_polygons(binary_prediction)
predictions[area_class] = prediction_polygons
image.area_classes.add_predictions(predictions)
# plot training mask
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.image_mask, alpha=0.5)
# plot predicted mask
plt.figure()
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.prediction_image_mask, alpha=0.5)
plt.show()
def create_and_train_predictor():
LABELS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
predictor = AreaPredictor(LABELS)
for image_id in area_data.keys():
image = Image(image_id, grid_sizes[image_id], DATA_DIRECTORY)
image.load_image()
image.load_areas(area_data[image_id])
# train and predict same image
predictor.train(image)
return predictor
def load_trained_predictor(file_name='predictors.p'):
with open(file_name, 'rb') as predictors_file:
predictor_data = pickle.load(predictors_file)
predictor = AreaPredictor(predictors=predictor_data)
return predictor
def create_submission(entries, file_name='submission.csv'):
with open(file_name, 'w') as submission_file:
writer = csv.writer(submission_file)
writer.writerow(['ImageId', 'ClassType', 'MultipolygonWKT'])
log.warning('Creating submission in {} with {} entries...'.format(file_name, len(entries)))
for entry in entries:
writer.writerow(entry)
log.info('... done!')
def save_entries(entries, file_name='predictions.p'):
log.warning('Saving entries to {}...'.format(file_name))
with open(file_name, 'wb') as fh:
pickle.dump(entries, fh)
# def main():
# setup
log = get_logger()
grid_sizes = load_grid_sizes()
area_data = load_area_data()
predictor = load_trained_predictor()
# process all images
entries = []
image_ids = []
training_images_to_predict = []
with open('./correct-order', 'r') as fh:
reader = csv.reader(fh)
for row in reader:
image_ids.append(row[0])
for i, image_id in enumerate(image_ids):
log.debug('Processing image {}/{} ({})'.format(i + 1, len(image_ids), image_id))
image = Image(image_id, grid_sizes[image_id], DATA_DIRECTORY)
image.load_image()
areas = area_data[image_id] if image_id in area_data else None
image.load_areas(areas)
predictions = predictor.predict(image)
image.area_classes.add_predictions(predictions)
for area_id, area_class in image.area_classes.classes.items():
entries.append((image_id, area_id, area_class.predicted_submission_polygons))
create_submission(entries)
save_entries(entries)
"""
# process single image
IMAGE_ID = '6120_2_2'
image = Image(IMAGE_ID, grid_sizes[IMAGE_ID], DATA_DIRECTORY)
image.load_image()
image.load_areas(area_data[IMAGE_ID])
predictions = predictor.predict(image)
image.area_classes.add_predictions(predictions)
create_submission([image])
"""
"""
# train and predict same image
predictor.train(image)
predictions = predictor.predict(image)
# visualize prediction
for area_class, prediction in predictions.items():
binary_prediction = predictor.prediction_to_binary_prediction(prediction)
prediction_polygons = predictor.prediction_mask_to_polygons(binary_prediction)
predictions[area_class] = prediction_polygons
image.area_classes.add_predictions(predictions)
# plot training mask
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.image_mask, alpha=0.5)
# plot predicted mask
plt.figure()
plt.imshow(scale_percentile(image.raw_data))
plt.imshow(image.area_classes.prediction_image_mask, alpha=0.5)
plt.show()
"""
# if __name__ == '__main__':
# main()
logging.shutdown()
|
Change None comparison to [] comparison in hmax function
|
#!/usr/bin/env python3
# file: dvd2webm.py
# vim:fileencoding=utf-8:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2016-02-10 22:42:09 +0100
# Last modified: 2018-01-28 17:27:23 +0100
#
# To the extent possible under law, R.F. Smith has waived all copyright and
# related or neighboring rights to dvd2webm.py. This work is published
# from the Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""
Convert an mpeg stream from a DVD to a webm file, using constrained rate VP9
encoding for video and libvorbis for audio.
It uses the first video stream and the first audio stream, unless otherwise
indicated.
"""
from collections import Counter
from datetime import datetime
import argparse
import logging
import os
import re
import subprocess as sp
import sys
__version__ = '0.10.0'
def main(argv):
"""Entry point for dvd2webm.py."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--log',
default='info',
choices=['debug', 'info', 'warning', 'error'],
help="logging level (defaults to 'info')")
parser.add_argument(
'-v', '--version', action='version', version=__version__)
parser.add_argument(
'-s',
'--start',
type=str,
default=None,
help="time (hh:mm:ss) at which to start encoding")
parser.add_argument('-c', '--crop', type=str, help="crop (w:h:x:y) to use")
parser.add_argument('-d', '--dummy', action="store_true",
help="print commands but do not run them")
parser.add_argument(
'-t', '--subtitle', type=str, help="srt file or dvdsub track number")
ahelp = "number of the audio track to use (default: 0; first audio track)"
parser.add_argument('-a', '--audio', type=int, default=0, help=ahelp)
parser.add_argument('fn', metavar='filename', help='MPEG file to process')
args = parser.parse_args(argv)
logging.basicConfig(
level=getattr(logging, args.log.upper(), None),
format='%(levelname)s: %(message)s')
logging.debug('command line arguments = {}'.format(argv))
logging.debug('parsed arguments = {}'.format(args))
logging.info("processing '{}'.".format(args.fn))
starttime = datetime.now()
logging.info('started at {}.'.format(str(starttime)[:-7]))
logging.info('using audio stream {}.'.format(args.audio))
if not args.crop:
logging.info('looking for cropping.')
args.crop = findcrop(args.fn)
width, height, _, _ = args.crop.split(':')
if width in ['720', '704'] and height == '576':
logging.info('standard format, no cropping necessary.')
args.crop = None
if args.crop:
logging.info('using cropping ' + args.crop)
subtrack, srtfile = None, None
if args.subtitle:
try:
subtrack = str(int(args.subtitle))
logging.info('using subtitle track ' + subtrack)
except ValueError:
srtfile = args.subtitle
logging.info('using subtitle file ' + srtfile)
a1 = mkargs(args.fn, 1, crop=args.crop, start=args.start, subf=srtfile,
subt=subtrack, atrack=args.audio)
a2 = mkargs(args.fn, 2, crop=args.crop, start=args.start, subf=srtfile,
subt=subtrack, atrack=args.audio)
if not args.dummy:
origbytes, newbytes = encode(a1, a2)
else:
logging.basicConfig(level='INFO')
logging.info('first pass: ' + ' '.join(a1))
logging.info('second pass: ' + ' '.join(a2))
stoptime = datetime.now()
logging.info('ended at {}.'.format(str(stoptime)[:-7]))
runtime = stoptime - starttime
logging.info('total running time {}.'.format(str(runtime)[:-7]))
encspeed = origbytes/(runtime.seconds*1000)
logging.info('average input encoding speed {:.2f} kB/s.'.format(encspeed))
def findcrop(path, start='00:10:00', duration='00:00:01'):
"""
Find the cropping of the video file.
Arguments:
path: location of the file to query.
start: A string that defines where in the movie to start scanning.
Defaults to 10 minutes from the start. Format HH:MM:SS.
duration: A string defining how much of the movie to scan. Defaults to
one second. Format HH:MM:SS.
Returns:
A string containing the cropping to use with ffmpeg.
"""
args = [
'ffmpeg',
'-hide_banner',
'-ss',
start, # Start at 10 minutes in.
'-t',
duration, # Parse for one second.
'-i',
path, # Path to the input file.
'-vf',
'cropdetect', # Use the crop detect filter.
'-an', # Disable audio output.
'-y', # Overwrite output without asking.
'-f',
'rawvideo', # write raw video output.
'/dev/null' # Write output to /dev/null
]
proc = sp.run(
args, universal_newlines=True, stdout=sp.DEVNULL, stderr=sp.PIPE)
rv = Counter(re.findall('crop=(\d+:\d+:\d+:\d+)', proc.stderr))
return rv.most_common(1)[0][0]
def reporttime(p, start, end):
"""
Report the amount of time passed between start and end.
Arguments:
p: number of the pass.
start: datetime.datetime instance.
end: datetime.datetime instance.
"""
dt = str(end - start)
logging.info('pass {} took {}.'.format(p, dt[:-7]))
def mkargs(fn, npass, crop=None, start=None, subf=None, subt=None,
atrack=0):
"""Create argument list for constrained quality VP9/vorbis encoding.
Arguments:
fn: String containing the path of the input file
npass: Number of the pass. Must be 1 or 2.
crop: Optional string containing the cropping to use. Must be in the
format W:H:X:Y, where W, H, X and Y are numbers.
start: Optional string containing the start time for the conversion.
Must be in the format HH:MM:SS, where H, M and S are digits.
subf: Optional string containing the name of the SRT file to use.
subt: Optional string containing the index of the dvdsub stream to use.
atrack: Optional number of the audio track to use. Defaults to 0.
Returns:
A list of strings suitable for calling a subprocess.
"""
if npass not in (1, 2):
raise ValueError('npass must be 1 or 2')
if crop and not re.search('\d+:\d+:\d+:\d+', crop):
raise ValueError('cropping must be in the format W:H:X:Y')
if start and not re.search('\d{2}:\d{2}:\d{2}', start):
raise ValueError('starting time must be in the format HH:MM:SS')
numthreads = str(os.cpu_count() - 1)
basename = fn.rsplit('.', 1)[0]
args = ['ffmpeg', '-loglevel', 'quiet']
if start:
args += ['-ss', start]
args += ['-i', fn, '-passlogfile', basename]
speed = '2'
if npass == 1:
speed = '4'
args += ['-c:v', 'libvpx-vp9', '--row-mt=1' '-threads', numthreads, '-pass',
str(npass), '-b:v', '1400k', '-crf', '33', '-g', '250',
'-speed', speed, '-tile-columns', '1']
if npass == 2:
args += ['-auto-alt-ref', '1', '-lag-in-frames', '25']
args += ['-sn']
if npass == 1:
args += ['-an']
elif npass == 2:
args += ['-c:a', 'libvorbis', '-q:a', '3']
args += ['-f', 'webm']
if not subt: # SRT file
args += ['-map', '0:v', '-map', '0:a:{}'.format(atrack)]
vf = []
if subf:
vf = ['subtitles={}'.format(subf)]
if crop:
vf.append('crop={}'.format(crop))
if vf:
args += ['-vf', ','.join(vf)]
else:
fc = '[0:v][0:s:{}]overlay'.format(subt)
if crop:
fc += ',crop={}[v]'.format(crop)
args += ['-filter_complex', fc, '-map', '[v]', '-map',
'0:a:{}'.format(atrack)]
if npass == 1:
outname = '/dev/null'
else:
outname = basename + '.webm'
args += ['-y', outname]
return args
def encode(args1, args2):
"""
Run the encoding subprocesses.
Arguments:
args1: Commands to run the first encoding step as a subprocess.
args2: Commands to run the second encoding step as a subprocess.
"""
logging.info('running pass 1...')
logging.debug('pass 1: {}'.format(' '.join(args1)))
start = datetime.utcnow()
proc = sp.run(args1, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
end = datetime.utcnow()
if proc.returncode:
logging.error('pass 1 returned {}.'.format(proc.returncode))
return
else:
reporttime(1, start, end)
logging.info('running pass 2...')
logging.debug('pass 2: {}'.format(' '.join(args2)))
start = datetime.utcnow()
proc = sp.run(args2, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
end = datetime.utcnow()
if proc.returncode:
logging.error('pass 2 returned {}.'.format(proc.returncode))
else:
reporttime(2, start, end)
oidx = args2.index('-i') + 1
origsize = os.path.getsize(args2[oidx])
newsize = os.path.getsize(args2[-1])
percentage = int(100 * newsize / origsize)
sz = "the size of '{}' is {}% of the size of '{}'."
logging.info(sz.format(args2[-1], percentage, args2[4]))
return origsize, newsize # both in bytes.
if __name__ == '__main__':
main(sys.argv[1:])
Fix bug with row-mt.
#!/usr/bin/env python3
# file: dvd2webm.py
# vim:fileencoding=utf-8:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2016-02-10 22:42:09 +0100
# Last modified: 2018-02-25 03:04:55 +0100
#
# To the extent possible under law, R.F. Smith has waived all copyright and
# related or neighboring rights to dvd2webm.py. This work is published
# from the Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""
Convert an mpeg stream from a DVD to a webm file, using constrained rate VP9
encoding for video and libvorbis for audio.
It uses the first video stream and the first audio stream, unless otherwise
indicated.
"""
from collections import Counter
from datetime import datetime
import argparse
import logging
import os
import re
import subprocess as sp
import sys
__version__ = '0.10.0'
def main(argv):
"""Entry point for dvd2webm.py."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--log',
default='info',
choices=['debug', 'info', 'warning', 'error'],
help="logging level (defaults to 'info')")
parser.add_argument(
'-v', '--version', action='version', version=__version__)
parser.add_argument(
'-s',
'--start',
type=str,
default=None,
help="time (hh:mm:ss) at which to start encoding")
parser.add_argument('-c', '--crop', type=str, help="crop (w:h:x:y) to use")
parser.add_argument('-d', '--dummy', action="store_true",
help="print commands but do not run them")
parser.add_argument(
'-t', '--subtitle', type=str, help="srt file or dvdsub track number")
ahelp = "number of the audio track to use (default: 0; first audio track)"
parser.add_argument('-a', '--audio', type=int, default=0, help=ahelp)
parser.add_argument('fn', metavar='filename', help='MPEG file to process')
args = parser.parse_args(argv)
logging.basicConfig(
level=getattr(logging, args.log.upper(), None),
format='%(levelname)s: %(message)s')
logging.debug('command line arguments = {}'.format(argv))
logging.debug('parsed arguments = {}'.format(args))
logging.info("processing '{}'.".format(args.fn))
starttime = datetime.now()
logging.info('started at {}.'.format(str(starttime)[:-7]))
logging.info('using audio stream {}.'.format(args.audio))
if not args.crop:
logging.info('looking for cropping.')
args.crop = findcrop(args.fn)
width, height, _, _ = args.crop.split(':')
if width in ['720', '704'] and height == '576':
logging.info('standard format, no cropping necessary.')
args.crop = None
if args.crop:
logging.info('using cropping ' + args.crop)
subtrack, srtfile = None, None
if args.subtitle:
try:
subtrack = str(int(args.subtitle))
logging.info('using subtitle track ' + subtrack)
except ValueError:
srtfile = args.subtitle
logging.info('using subtitle file ' + srtfile)
a1 = mkargs(args.fn, 1, crop=args.crop, start=args.start, subf=srtfile,
subt=subtrack, atrack=args.audio)
a2 = mkargs(args.fn, 2, crop=args.crop, start=args.start, subf=srtfile,
subt=subtrack, atrack=args.audio)
if not args.dummy:
origbytes, newbytes = encode(a1, a2)
else:
logging.basicConfig(level='INFO')
logging.info('first pass: ' + ' '.join(a1))
logging.info('second pass: ' + ' '.join(a2))
stoptime = datetime.now()
logging.info('ended at {}.'.format(str(stoptime)[:-7]))
runtime = stoptime - starttime
logging.info('total running time {}.'.format(str(runtime)[:-7]))
encspeed = origbytes/(runtime.seconds*1000)
logging.info('average input encoding speed {:.2f} kB/s.'.format(encspeed))
def findcrop(path, start='00:10:00', duration='00:00:01'):
"""
Find the cropping of the video file.
Arguments:
path: location of the file to query.
start: A string that defines where in the movie to start scanning.
Defaults to 10 minutes from the start. Format HH:MM:SS.
duration: A string defining how much of the movie to scan. Defaults to
one second. Format HH:MM:SS.
Returns:
A string containing the cropping to use with ffmpeg.
"""
args = [
'ffmpeg',
'-hide_banner',
'-ss',
start, # Start at 10 minutes in.
'-t',
duration, # Parse for one second.
'-i',
path, # Path to the input file.
'-vf',
'cropdetect', # Use the crop detect filter.
'-an', # Disable audio output.
'-y', # Overwrite output without asking.
'-f',
'rawvideo', # write raw video output.
'/dev/null' # Write output to /dev/null
]
proc = sp.run(
args, universal_newlines=True, stdout=sp.DEVNULL, stderr=sp.PIPE)
rv = Counter(re.findall('crop=(\d+:\d+:\d+:\d+)', proc.stderr))
return rv.most_common(1)[0][0]
def reporttime(p, start, end):
"""
Report the amount of time passed between start and end.
Arguments:
p: number of the pass.
start: datetime.datetime instance.
end: datetime.datetime instance.
"""
dt = str(end - start)
logging.info('pass {} took {}.'.format(p, dt[:-7]))
def mkargs(fn, npass, crop=None, start=None, subf=None, subt=None,
atrack=0):
"""Create argument list for constrained quality VP9/vorbis encoding.
Arguments:
fn: String containing the path of the input file
npass: Number of the pass. Must be 1 or 2.
crop: Optional string containing the cropping to use. Must be in the
format W:H:X:Y, where W, H, X and Y are numbers.
start: Optional string containing the start time for the conversion.
Must be in the format HH:MM:SS, where H, M and S are digits.
subf: Optional string containing the name of the SRT file to use.
subt: Optional string containing the index of the dvdsub stream to use.
atrack: Optional number of the audio track to use. Defaults to 0.
Returns:
A list of strings suitable for calling a subprocess.
"""
if npass not in (1, 2):
raise ValueError('npass must be 1 or 2')
if crop and not re.search('\d+:\d+:\d+:\d+', crop):
raise ValueError('cropping must be in the format W:H:X:Y')
if start and not re.search('\d{2}:\d{2}:\d{2}', start):
raise ValueError('starting time must be in the format HH:MM:SS')
numthreads = str(os.cpu_count() - 1)
basename = fn.rsplit('.', 1)[0]
args = ['ffmpeg', '-loglevel', 'quiet']
if start:
args += ['-ss', start]
args += ['-i', fn, '-passlogfile', basename]
speed = '2'
if npass == 1:
speed = '4'
args += ['-c:v', 'libvpx-vp9', '-row-mt=1' '-threads', numthreads, '-pass',
str(npass), '-b:v', '1400k', '-crf', '33', '-g', '250',
'-speed', speed, '-tile-columns', '1']
if npass == 2:
args += ['-auto-alt-ref', '1', '-lag-in-frames', '25']
args += ['-sn']
if npass == 1:
args += ['-an']
elif npass == 2:
args += ['-c:a', 'libvorbis', '-q:a', '3']
args += ['-f', 'webm']
if not subt: # SRT file
args += ['-map', '0:v', '-map', '0:a:{}'.format(atrack)]
vf = []
if subf:
vf = ['subtitles={}'.format(subf)]
if crop:
vf.append('crop={}'.format(crop))
if vf:
args += ['-vf', ','.join(vf)]
else:
fc = '[0:v][0:s:{}]overlay'.format(subt)
if crop:
fc += ',crop={}[v]'.format(crop)
args += ['-filter_complex', fc, '-map', '[v]', '-map',
'0:a:{}'.format(atrack)]
if npass == 1:
outname = '/dev/null'
else:
outname = basename + '.webm'
args += ['-y', outname]
return args
def encode(args1, args2):
"""
Run the encoding subprocesses.
Arguments:
args1: Commands to run the first encoding step as a subprocess.
args2: Commands to run the second encoding step as a subprocess.
"""
logging.info('running pass 1...')
logging.debug('pass 1: {}'.format(' '.join(args1)))
start = datetime.utcnow()
proc = sp.run(args1, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
end = datetime.utcnow()
if proc.returncode:
logging.error('pass 1 returned {}.'.format(proc.returncode))
return None, None
else:
reporttime(1, start, end)
logging.info('running pass 2...')
logging.debug('pass 2: {}'.format(' '.join(args2)))
start = datetime.utcnow()
proc = sp.run(args2, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
end = datetime.utcnow()
if proc.returncode:
logging.error('pass 2 returned {}.'.format(proc.returncode))
else:
reporttime(2, start, end)
oidx = args2.index('-i') + 1
origsize = os.path.getsize(args2[oidx])
newsize = os.path.getsize(args2[-1])
percentage = int(100 * newsize / origsize)
sz = "the size of '{}' is {}% of the size of '{}'."
logging.info(sz.format(args2[-1], percentage, args2[4]))
return origsize, newsize # both in bytes.
if __name__ == '__main__':
main(sys.argv[1:])
|
import SCons.SConf
import re
import os
import os.path
import hashlib
def _run_prog(context, src, suffix):
# Workaround for a SCons bug.
# RunProg uses a global incrementing counter for temporary .c file names. The
# file name depends on the number of invocations of that function, but not on
# the file contents. When the user subsequently invokes scons with different
# options, the sequence of file contents passed to RunProg may vary. However,
# RunProg may incorrectly use cached results from a previous run saved for
# different file contents but the same invocation number. To prevent this, we
# monkey patch its global counter with a hashsum of the file contents.
SCons.SConf._ac_build_counter = int(hashlib.md5(src.encode()).hexdigest(), 16)
return context.RunProg(src, suffix)
def CheckLibWithHeaderExt(context, libs, headers, language, expr='1', run=True):
if not isinstance(headers, list):
headers = [headers]
if not isinstance(libs, list):
libs = [libs]
name = libs[0]
libs = [l for l in libs if not l in context.env['LIBS']]
suffix = '.%s' % language.lower()
includes = '\n'.join(['#include <%s>' % h for h in ['stdio.h'] + headers])
src = """
%s
int main() {
printf("%%d\\n", (int)(%s));
return 0;
}
""" % (includes, expr)
context.Message("Checking for %s library %s... " % (
language.upper(), name))
if run:
err, out = _run_prog(context, src, suffix)
if out.strip() == '0':
err = True
else:
err = context.CompileProg(src, suffix)
if not err:
context.Result('yes')
context.env.Append(LIBS=libs)
return True
else:
context.Result('no')
return False
def CheckProg(context, prog):
context.Message("Checking for executable %s... " % prog)
path = context.env.Which(prog)
if path:
context.Result(path[0])
return True
else:
context.Result('not found')
return False
def CheckCanRunProgs(context):
context.Message("Checking whether we can run compiled executables... ")
src = """
int main() {
return 0;
}
"""
err, out = _run_prog(context, src, '.c')
if not err:
context.Result('yes')
return True
else:
context.Result('no')
return False
def FindTool(context, var, toolchain, version, commands, prepend_path=[]):
env = context.env
context.Message("Searching %s executable... " % var)
if env.HasArg(var):
context.Result(env[var])
return True
for tool_cmd in commands:
if isinstance(tool_cmd, list):
tool_name = tool_cmd[0]
tool_flags = tool_cmd[1:]
else:
tool_name = tool_cmd
tool_flags = []
if not toolchain:
tool = tool_name
else:
tool = '%s-%s' % (toolchain, tool_name)
if version:
search_versions = [
version[:3],
version[:2],
version[:1],
]
default_ver = env.ParseCompilerVersion(tool)
if default_ver and default_ver[:len(version)] == version:
search_versions += [default_ver]
for ver in reversed(sorted(set(search_versions))):
versioned_tool = '%s-%s' % (tool, '.'.join(map(str, ver)))
if env.Which(versioned_tool, prepend_path):
tool = versioned_tool
break
tool_path = env.Which(tool, prepend_path)
if tool_path:
env[var] = tool_path[0]
if tool_flags:
env['%sFLAGS' % var] = ' '.join(tool_flags)
break
else:
env.Die("can't detect %s: looked for any of: %s" % (
var,
', '.join([' '.join(c) if isinstance(c, list) else c for c in commands])))
if version:
actual_ver = env.ParseCompilerVersion(env[var])
if actual_ver:
actual_ver = actual_ver[:len(version)]
if actual_ver != version:
env.Die(
"can't detect %s: '%s' not found in PATH, '%s' version is %s" % (
var,
'%s-%s' % (tool, '.'.join(map(str, version))),
env[var],
'.'.join(map(str, actual_ver))))
message = env[var]
realpath = os.path.realpath(env[var])
if realpath != env[var]:
message += ' (%s)' % realpath
context.Result(message)
return True
def FindLLVMDir(context, version):
context.Message(
"Searching PATH for llvm %s... " % '.'.join(map(str, version)))
def macos_dirs():
return [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin',
'/Library/Developer/CommandLineTools/usr/bin',
]
def linux_dirs():
suffixes = []
for n in [3, 2, 1]:
v = '.'.join(map(str, version[:n]))
suffixes += [
'-' + v,
'/' + v,
]
suffixes += ['']
ret = []
for s in suffixes:
ret.append('/usr/lib/llvm%s/bin' % s)
return ret
for llvmdir in macos_dirs() + linux_dirs():
if os.path.isdir(llvmdir):
context.env['ENV']['PATH'] += ':' + llvmdir
context.Result(llvmdir)
return True
context.Result('not found')
return True
def _libdirs(host):
dirs = ['lib/' + host]
if 'x86_64-pc-linux-gnu' == host:
dirs += ['lib/x86_64-linux-gnu']
if 'x86_64' in host:
dirs += ['lib64']
dirs += ['lib']
return dirs
def _isprefix(prefix, subdir):
prefix = os.path.abspath(prefix)
subdir = os.path.abspath(subdir)
return subdir.startswith(prefix + os.sep)
def FindLibDir(context, prefix, host):
context.Message("Searching for system library directory... ")
for d in _libdirs(host):
libdir = os.path.join(prefix, d)
if os.path.isdir(libdir):
break
context.env['ROC_SYSTEM_LIBDIR'] = libdir
context.Result(libdir)
return True
def FindPulseDir(context, prefix, build, host, version):
context.Message("Searching for PulseAudio modules directory... ")
if build == host:
pa_ver = context.env.CommandOutput(['pulseaudio', '--version'])
if pa_ver and version in pa_ver.split():
pa_conf = context.env.CommandOutput(['pulseaudio', '--dump-conf'])
if pa_conf:
for line in pa_conf.splitlines():
m = re.match(r'^\s*dl-search-path\s*=\s*(.*)$', line)
if m:
pa_dir = m.group(1)
if _isprefix(prefix, pa_dir):
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
for d in _libdirs(host):
pa_dir = os.path.join(prefix, d, 'pulse-'+version, 'modules')
if os.path.isdir(pa_dir):
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
for d in _libdirs(host):
libdir = os.path.join(prefix, d)
if os.path.isdir(libdir):
break
pa_dir = os.path.join(libdir, 'pulse-'+version, 'modules')
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
def FindConfigGuess(context):
context.Message('Searching CONFIG_GUESS script... ')
if context.env.HasArg('CONFIG_GUESS'):
context.Result(context.env['CONFIG_GUESS'])
return True
prefixes = [
'/usr',
'/usr/local',
'/usr/local/Cellar',
]
dirs = [
'share/gnuconfig',
'share/misc',
'share/automake-*',
'automake/*/share/automake-*',
'share/libtool/build-aux',
'libtool/*/share/libtool/build-aux',
'lib/php/build',
'lib/php/*/build',
]
for p in prefixes:
for d in dirs:
for f in context.env.Glob(os.path.join(p, d, 'config.guess')):
path = str(f)
if not os.access(path, os.X_OK):
continue
context.env['CONFIG_GUESS'] = path
context.Result(path)
return True
context.Result('not found')
return False
def init(env):
env.CustomTests = {
'CheckLibWithHeaderExt': CheckLibWithHeaderExt,
'CheckProg': CheckProg,
'CheckCanRunProgs': CheckCanRunProgs,
'FindTool': FindTool,
'FindLLVMDir': FindLLVMDir,
'FindLibDir': FindLibDir,
'FindPulseDir': FindPulseDir,
'FindConfigGuess': FindConfigGuess,
}
Fix PulseAudio version parser
import SCons.SConf
import re
import os
import os.path
import hashlib
def _run_prog(context, src, suffix):
# Workaround for a SCons bug.
# RunProg uses a global incrementing counter for temporary .c file names. The
# file name depends on the number of invocations of that function, but not on
# the file contents. When the user subsequently invokes scons with different
# options, the sequence of file contents passed to RunProg may vary. However,
# RunProg may incorrectly use cached results from a previous run saved for
# different file contents but the same invocation number. To prevent this, we
# monkey patch its global counter with a hashsum of the file contents.
SCons.SConf._ac_build_counter = int(hashlib.md5(src.encode()).hexdigest(), 16)
return context.RunProg(src, suffix)
def CheckLibWithHeaderExt(context, libs, headers, language, expr='1', run=True):
if not isinstance(headers, list):
headers = [headers]
if not isinstance(libs, list):
libs = [libs]
name = libs[0]
libs = [l for l in libs if not l in context.env['LIBS']]
suffix = '.%s' % language.lower()
includes = '\n'.join(['#include <%s>' % h for h in ['stdio.h'] + headers])
src = """
%s
int main() {
printf("%%d\\n", (int)(%s));
return 0;
}
""" % (includes, expr)
context.Message("Checking for %s library %s... " % (
language.upper(), name))
if run:
err, out = _run_prog(context, src, suffix)
if out.strip() == '0':
err = True
else:
err = context.CompileProg(src, suffix)
if not err:
context.Result('yes')
context.env.Append(LIBS=libs)
return True
else:
context.Result('no')
return False
def CheckProg(context, prog):
context.Message("Checking for executable %s... " % prog)
path = context.env.Which(prog)
if path:
context.Result(path[0])
return True
else:
context.Result('not found')
return False
def CheckCanRunProgs(context):
context.Message("Checking whether we can run compiled executables... ")
src = """
int main() {
return 0;
}
"""
err, out = _run_prog(context, src, '.c')
if not err:
context.Result('yes')
return True
else:
context.Result('no')
return False
def FindTool(context, var, toolchain, version, commands, prepend_path=[]):
env = context.env
context.Message("Searching %s executable... " % var)
if env.HasArg(var):
context.Result(env[var])
return True
for tool_cmd in commands:
if isinstance(tool_cmd, list):
tool_name = tool_cmd[0]
tool_flags = tool_cmd[1:]
else:
tool_name = tool_cmd
tool_flags = []
if not toolchain:
tool = tool_name
else:
tool = '%s-%s' % (toolchain, tool_name)
if version:
search_versions = [
version[:3],
version[:2],
version[:1],
]
default_ver = env.ParseCompilerVersion(tool)
if default_ver and default_ver[:len(version)] == version:
search_versions += [default_ver]
for ver in reversed(sorted(set(search_versions))):
versioned_tool = '%s-%s' % (tool, '.'.join(map(str, ver)))
if env.Which(versioned_tool, prepend_path):
tool = versioned_tool
break
tool_path = env.Which(tool, prepend_path)
if tool_path:
env[var] = tool_path[0]
if tool_flags:
env['%sFLAGS' % var] = ' '.join(tool_flags)
break
else:
env.Die("can't detect %s: looked for any of: %s" % (
var,
', '.join([' '.join(c) if isinstance(c, list) else c for c in commands])))
if version:
actual_ver = env.ParseCompilerVersion(env[var])
if actual_ver:
actual_ver = actual_ver[:len(version)]
if actual_ver != version:
env.Die(
"can't detect %s: '%s' not found in PATH, '%s' version is %s" % (
var,
'%s-%s' % (tool, '.'.join(map(str, version))),
env[var],
'.'.join(map(str, actual_ver))))
message = env[var]
realpath = os.path.realpath(env[var])
if realpath != env[var]:
message += ' (%s)' % realpath
context.Result(message)
return True
def FindLLVMDir(context, version):
context.Message(
"Searching PATH for llvm %s... " % '.'.join(map(str, version)))
def macos_dirs():
return [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin',
'/Library/Developer/CommandLineTools/usr/bin',
]
def linux_dirs():
suffixes = []
for n in [3, 2, 1]:
v = '.'.join(map(str, version[:n]))
suffixes += [
'-' + v,
'/' + v,
]
suffixes += ['']
ret = []
for s in suffixes:
ret.append('/usr/lib/llvm%s/bin' % s)
return ret
for llvmdir in macos_dirs() + linux_dirs():
if os.path.isdir(llvmdir):
context.env['ENV']['PATH'] += ':' + llvmdir
context.Result(llvmdir)
return True
context.Result('not found')
return True
def _libdirs(host):
dirs = ['lib/' + host]
if 'x86_64-pc-linux-gnu' == host:
dirs += ['lib/x86_64-linux-gnu']
if 'x86_64' in host:
dirs += ['lib64']
dirs += ['lib']
return dirs
def _isprefix(prefix, subdir):
prefix = os.path.abspath(prefix)
subdir = os.path.abspath(subdir)
return subdir.startswith(prefix + os.sep)
def FindLibDir(context, prefix, host):
context.Message("Searching for system library directory... ")
for d in _libdirs(host):
libdir = os.path.join(prefix, d)
if os.path.isdir(libdir):
break
context.env['ROC_SYSTEM_LIBDIR'] = libdir
context.Result(libdir)
return True
def FindPulseDir(context, prefix, build, host, version):
context.Message("Searching for PulseAudio modules directory... ")
if build == host:
pa_ver = context.env.CommandOutput(['pulseaudio', '--version'])
m = re.search(r'([0-9.]+)', pa_ver or '')
if m and m.group(1) == version:
pa_conf = context.env.CommandOutput(['pulseaudio', '--dump-conf'])
if pa_conf:
for line in pa_conf.splitlines():
m = re.match(r'^\s*dl-search-path\s*=\s*(.*)$', line)
if m:
pa_dir = m.group(1)
if _isprefix(prefix, pa_dir):
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
for d in _libdirs(host):
pa_dir = os.path.join(prefix, d, 'pulse-'+version, 'modules')
if os.path.isdir(pa_dir):
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
for d in _libdirs(host):
libdir = os.path.join(prefix, d)
if os.path.isdir(libdir):
break
pa_dir = os.path.join(libdir, 'pulse-'+version, 'modules')
context.env['ROC_PULSE_MODULEDIR'] = pa_dir
context.Result(pa_dir)
return True
def FindConfigGuess(context):
context.Message('Searching CONFIG_GUESS script... ')
if context.env.HasArg('CONFIG_GUESS'):
context.Result(context.env['CONFIG_GUESS'])
return True
prefixes = [
'/usr',
'/usr/local',
'/usr/local/Cellar',
]
dirs = [
'share/gnuconfig',
'share/misc',
'share/automake-*',
'automake/*/share/automake-*',
'share/libtool/build-aux',
'libtool/*/share/libtool/build-aux',
'lib/php/build',
'lib/php/*/build',
]
for p in prefixes:
for d in dirs:
for f in context.env.Glob(os.path.join(p, d, 'config.guess')):
path = str(f)
if not os.access(path, os.X_OK):
continue
context.env['CONFIG_GUESS'] = path
context.Result(path)
return True
context.Result('not found')
return False
def init(env):
env.CustomTests = {
'CheckLibWithHeaderExt': CheckLibWithHeaderExt,
'CheckProg': CheckProg,
'CheckCanRunProgs': CheckCanRunProgs,
'FindTool': FindTool,
'FindLLVMDir': FindLLVMDir,
'FindLibDir': FindLibDir,
'FindPulseDir': FindPulseDir,
'FindConfigGuess': FindConfigGuess,
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import re
import copy
import tempfile
from collections import OrderedDict
import xml.etree.cElementTree as et
import yaml
import numpy as np
import scipy.optimize
from scipy.interpolate import UnivariateSpline
from scipy.optimize import brentq
import scipy.special as special
from numpy.core import defchararray
from astropy.extern import six
def init_matplotlib_backend(backend=None):
"""This function initializes the matplotlib backend. When no
DISPLAY is available the backend is automatically set to 'Agg'.
Parameters
----------
backend : str
matplotlib backend name.
"""
import matplotlib
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
else:
if backend is not None:
matplotlib.use(backend)
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(six.text_type, unicode_representer)
def load_yaml(infile, **kwargs):
return yaml.load(open(infile), **kwargs)
def write_yaml(o, outfile, **kwargs):
yaml.dump(tolist(o), open(outfile, 'w'), **kwargs)
def load_npy(infile):
return np.load(infile).flat[0]
def load_data(infile, workdir=None):
"""Load python data structure from either a YAML or numpy file. """
infile = resolve_path(infile, workdir=workdir)
infile, ext = os.path.splitext(infile)
if os.path.isfile(infile + '.npy'):
infile += '.npy'
elif os.path.isfile(infile + '.yaml'):
infile += '.yaml'
else:
raise Exception('Input file does not exist.')
ext = os.path.splitext(infile)[1]
if ext == '.npy':
return infile, load_npy(infile)
elif ext == '.yaml':
return infile, load_yaml(infile)
else:
raise Exception('Unrecognized extension.')
def resolve_path(path, workdir=None):
if os.path.isabs(path):
return path
elif workdir is None:
return os.path.abspath(path)
else:
return os.path.join(workdir, path)
def resolve_file_path(path, **kwargs):
dirs = kwargs.get('search_dirs', [])
if os.path.isabs(os.path.expandvars(path)) and \
os.path.isfile(os.path.expandvars(path)):
return path
for d in dirs:
if not os.path.isdir(os.path.expandvars(d)):
continue
p = os.path.join(d, path)
if os.path.isfile(os.path.expandvars(p)):
return p
raise Exception('Failed to resolve file path: %s' % path)
def resolve_file_path_list(pathlist, workdir, prefix='',
randomize=False):
"""Resolve the path of each file name in the file ``pathlist`` and
write the updated paths to a new file.
"""
files = [line.strip() for line in open(pathlist, 'r')]
newfiles = []
for f in files:
f = os.path.expandvars(f)
if os.path.isfile(f):
newfiles += [f]
else:
newfiles += [os.path.join(workdir,f)]
if randomize:
_, tmppath = tempfile.mkstemp(prefix=prefix,dir=workdir)
else:
tmppath = os.path.join(workdir,prefix)
tmppath += '.txt'
with open(tmppath, 'w') as tmpfile:
tmpfile.write("\n".join(newfiles))
return tmppath
def is_fits_file(path):
if (path.endswith('.fit') or path.endswith('.fits') or
path.endswith('.fit.gz') or path.endswith('.fits.gz')):
return True
else:
return False
def collect_dirs(path, max_depth=1, followlinks=True):
"""Recursively find directories under the given path."""
if not os.path.isdir(path):
return []
o = [path]
if max_depth == 0:
return o
for subdir in os.listdir(path):
subdir = os.path.join(path, subdir)
if not os.path.isdir(subdir):
continue
o += [subdir]
if os.path.islink(subdir) and not followlinks:
continue
if max_depth > 0:
o += collect_dirs(subdir, max_depth=max_depth - 1)
return list(set(o))
def match_regex_list(patterns, string):
"""Perform a regex match of a string against a list of patterns.
Returns true if the string matches at least one pattern in the
list."""
for p in patterns:
if re.findall(p, string):
return True
return False
def find_rows_by_string(tab, names, colnames=['assoc']):
"""Find the rows in a table ``tab`` that match at least one of the
strings in ``names``. This method ignores whitespace and case
when matching strings.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of strings.
colname : str
Name of the table column that will be searched for matching string.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask for rows with matching strings.
"""
mask = np.empty(len(tab),dtype=bool); mask.fill(False)
names = [name.lower().replace(' ', '') for name in names]
for colname in colnames:
if colname not in tab.columns:
continue
col = tab[[colname]].copy()
col[colname] = defchararray.replace(defchararray.lower(col[colname]),
' ', '')
for name in names:
mask |= col[colname] == name
return mask
def join_strings(strings, sep='_'):
if strings is None:
return ''
else:
if not isinstance(strings, list):
strings = [strings]
return sep.join([s for s in strings if s])
def format_filename(outdir, basename, prefix=None, extension=None):
filename = join_strings(prefix)
filename = join_strings([filename, basename])
if extension is not None:
if extension.startswith('.'):
filename += extension
else:
filename += '.' + extension
return os.path.join(outdir, filename)
def strip_suffix(filename, suffix):
for s in suffix:
filename = re.sub(r'\.%s$' % s, '', filename)
return filename
def met_to_mjd(time):
""""Convert mission elapsed time to mean julian date."""
return 54682.65 + (time-239557414.0)/(86400.)
RA_NGP = np.radians(192.8594812065348)
DEC_NGP = np.radians(27.12825118085622)
L_CP = np.radians(122.9319185680026)
def gal2eq(l, b):
L_0 = L_CP - np.pi / 2.
RA_0 = RA_NGP + np.pi / 2.
l = np.array(l, ndmin=1)
b = np.array(b, ndmin=1)
l, b = np.radians(l), np.radians(b)
sind = np.sin(b) * np.sin(DEC_NGP) + np.cos(b) * np.cos(DEC_NGP) * np.sin(
l - L_0)
dec = np.arcsin(sind)
cosa = np.cos(l - L_0) * np.cos(b) / np.cos(dec)
sina = (np.cos(b) * np.sin(DEC_NGP) * np.sin(l - L_0) - np.sin(b) * np.cos(
DEC_NGP)) / np.cos(dec)
dec = np.degrees(dec)
cosa[cosa < -1.0] = -1.0
cosa[cosa > 1.0] = 1.0
ra = np.arccos(cosa)
ra[np.where(sina < 0.)] = -ra[np.where(sina < 0.)]
ra = np.degrees(ra + RA_0)
ra = np.mod(ra, 360.)
dec = np.mod(dec + 90., 180.) - 90.
return ra, dec
def eq2gal(ra, dec):
L_0 = L_CP - np.pi / 2.
RA_0 = RA_NGP + np.pi / 2.
DEC_0 = np.pi / 2. - DEC_NGP
ra = np.array(ra, ndmin=1)
dec = np.array(dec, ndmin=1)
ra, dec = np.radians(ra), np.radians(dec)
np.sinb = np.sin(dec) * np.cos(DEC_0) - np.cos(dec) * np.sin(
ra - RA_0) * np.sin(DEC_0)
b = np.arcsin(np.sinb)
cosl = np.cos(dec) * np.cos(ra - RA_0) / np.cos(b)
sinl = (np.sin(dec) * np.sin(DEC_0) + np.cos(dec) * np.sin(
ra - RA_0) * np.cos(DEC_0)) / np.cos(b)
b = np.degrees(b)
cosl[cosl < -1.0] = -1.0
cosl[cosl > 1.0] = 1.0
l = np.arccos(cosl)
l[np.where(sinl < 0.)] = - l[np.where(sinl < 0.)]
l = np.degrees(l + L_0)
l = np.mod(l, 360.)
b = np.mod(b + 90., 180.) - 90.
return l, b
def xyz_to_lonlat(*args):
if len(args) == 1:
x, y, z = args[0][0], args[0][1], args[0][2]
else:
x, y, z = args[0], args[1], args[2]
lat = np.pi / 2. - np.arctan2(np.sqrt(x ** 2 + y ** 2), z)
lon = np.arctan2(y, x)
return lon, lat
def lonlat_to_xyz(lon, lat):
phi = lon
theta = np.pi / 2. - lat
return np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)])
def project(lon0, lat0, lon1, lat1):
"""This function performs a stereographic projection on the unit
vector (lon1,lat1) with the pole defined at the reference unit
vector (lon0,lat0)."""
costh = np.cos(np.pi / 2. - lat0)
cosphi = np.cos(lon0)
sinth = np.sin(np.pi / 2. - lat0)
sinphi = np.sin(lon0)
xyz = lonlat_to_xyz(lon1, lat1)
x1 = xyz[0]
y1 = xyz[1]
z1 = xyz[2]
x1p = x1 * costh * cosphi + y1 * costh * sinphi - z1 * sinth
y1p = -x1 * sinphi + y1 * cosphi
z1p = x1 * sinth * cosphi + y1 * sinth * sinphi + z1 * costh
r = np.arctan2(np.sqrt(x1p ** 2 + y1p ** 2), z1p)
phi = np.arctan2(y1p, x1p)
return r * np.cos(phi), r * np.sin(phi)
def scale_parameter(p):
if isstr(p):
p = float(p)
if p > 0:
scale = 10 ** -np.round(np.log10(1. / p))
return p / scale, scale
else:
return p, 1.0
def update_bounds(val, bounds):
return min(val, bounds[0]), max(val, bounds[1])
def apply_minmax_selection(val, val_minmax):
if val_minmax is None:
return True
if val_minmax[0] is None:
min_cut = True
elif np.isfinite(val) and val >= val_minmax[0]:
min_cut = True
else:
min_cut = False
if val_minmax[1] is None:
max_cut = True
elif np.isfinite(val) and val <= val_minmax[1]:
max_cut = True
else:
max_cut = False
return (min_cut and max_cut)
def create_source_name(skydir):
hms = skydir.icrs.ra.hms
dms = skydir.icrs.dec.dms
return 'PS J%02.f%04.1f%+03.f%02.f' % (hms.h,
hms.m + hms.s / 60.,
dms.d,
np.abs(dms.m + dms.s / 60.))
def create_model_name(src):
"""Generate a name for a source object given its spatial/spectral
properties.
Parameters
----------
src : `~fermipy.roi_model.Source`
A source object.
Returns
-------
name : str
A source name.
"""
o = ''
spatial_type = src['SpatialModel'].lower()
o += spatial_type
if spatial_type == 'gaussian':
o += '_s%04.2f' % src['SpatialWidth']
if src['SpectrumType'] == 'PowerLaw':
o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value'])
else:
o += '_%s' % (src['SpectrumType'].lower())
return o
def cov_to_correlation(cov):
err = np.sqrt(np.diag(cov))
corr = np.array(cov)
corr *= np.outer(1 / err, 1 / err)
return corr
def twosided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood value that corresponds to a
two-sided interval of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
return 0.5 * np.power( np.sqrt(2.) * special.erfinv(cl), 2)
def twosided_dlnl_to_cl(dlnl):
"""Compute the confidence level that corresponds to a two-sided
interval with a given change in the loglikelihood value.
Parameters
----------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
Returns
-------
cl : float
Confidence level.
"""
return special.erf( dlnl**0.5 )
def onesided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood values that corresponds to an
upper limit of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
alpha = 1.0 - cl
return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
def onesided_dlnl_to_cl(dlnl):
"""Compute the confidence level that corresponds to an upper limit
with a given change in the loglikelihood value.
Parameters
----------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
Returns
-------
cl : float
Confidence level.
"""
alpha = (1.0 - special.erf(dlnl**0.5))/2.0
return 1.0-alpha
def interpolate_function_min(x, y):
sp = scipy.interpolate.splrep(x, y, k=2, s=0)
def fn(t):
return scipy.interpolate.splev(t, sp, der=1)
if np.sign(fn(x[0])) == np.sign(fn(x[-1])):
if np.sign(fn(x[0])) == -1:
return x[-1]
else:
return x[0]
x0 = scipy.optimize.brentq(fn,
x[0], x[-1],
xtol=1e-10 * np.median(x))
return x0
def find_function_root(fn, x0, xb, delta=0.0):
"""Find the root of a function: f(x)+delta in the interval encompassed
by x0 and xb.
Parameters
----------
fn : function
Python function.
x0 : float
Fixed bound for the root search. This will either be used as
the lower or upper bound depending on the relative value of xb.
xb : float
Upper or lower bound for the root search. If a root is not
found in the interval [x0,xb]/[xb,x0] this value will be
increased/decreased until a change in sign is found.
"""
if x0 == xb:
return np.nan
for i in range(10):
if np.sign(fn(xb) + delta) != np.sign(fn(x0) + delta):
break
if xb < x0:
xb *= 0.5
else:
xb *= 2.0
# Failed to find a root
if np.sign(fn(xb) + delta) == np.sign(fn(x0) + delta):
return np.nan
if x0 == 0:
xtol = 1e-10 * xb
else:
xtol = 1e-10 * (xb + x0)
return brentq(lambda t: fn(t) + delta, x0, xb, xtol=xtol)
def get_parameter_limits(xval, loglike, ul_confidence=0.95, tol=1E-3):
"""Compute upper/lower limits, peak position, and 1-sigma errors
from a 1-D likelihood function. This function uses the
delta-loglikelihood method to evaluate parameter limits by
searching for the point at which the change in the log-likelihood
value with respect to the maximum equals a specific value. A
parabolic spline fit to the log-likelihood values is used to
improve the accuracy of the calculation.
Parameters
----------
xval : `~numpy.ndarray`
Array of parameter values.
loglike : `~numpy.ndarray`
Array of log-likelihood values.
ul_confidence : float
Confidence level to use for limit calculation.
tol : float
Tolerance parameter for spline.
"""
deltalnl = onesided_cl_to_dlnl(ul_confidence)
spline = UnivariateSpline(xval, loglike, k=2, s=tol)
# m = np.abs(loglike[1:] - loglike[:-1]) > delta_tol
# xval = np.concatenate((xval[:1],xval[1:][m]))
# loglike = np.concatenate((loglike[:1],loglike[1:][m]))
# spline = InterpolatedUnivariateSpline(xval, loglike, k=2)
sd = spline.derivative()
imax = np.argmax(loglike)
ilo = max(imax - 2, 0)
ihi = min(imax + 2, len(xval) - 1)
# Find the peak
x0 = xval[imax]
# Refine the peak position
if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
x0 = find_function_root(sd, xval[ilo], xval[ihi])
lnlmax = float(spline(x0))
fn = lambda t: spline(t) - lnlmax
ul = find_function_root(fn, x0, xval[-1], deltalnl)
ll = find_function_root(fn, x0, xval[0], deltalnl)
err_lo = np.abs(x0 - find_function_root(fn, x0, xval[0], 0.5))
err_hi = np.abs(x0 - find_function_root(fn, x0, xval[-1], 0.5))
if np.isfinite(err_lo):
err = 0.5 * (err_lo + err_hi)
else:
err = err_hi
o = {'x0': x0, 'ul': ul, 'll': ll,
'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
'lnlmax': lnlmax}
return o
def poly_to_parabola(coeff):
sigma = np.sqrt(1. / np.abs(2.0 * coeff[0]))
x0 = -coeff[1] / (2 * coeff[0])
y0 = (1. - (coeff[1] ** 2 - 4 * coeff[0] * coeff[2])) / (4 * coeff[0])
return x0, sigma, y0
def parabola(xy, amplitude, x0, y0, sx, sy, theta):
"""Evaluate a 2D parabola given by:
f(x,y) = f_0 - (1/2) * \delta^T * R * \Sigma * R^T * \delta
where
\delta = [(x - x_0), (y - y_0)]
and R is the matrix for a 2D rotation by angle \theta and \Sigma
is the covariance matrix:
\Sigma = [[1/\sigma_x^2, 0 ],
[0 , 1/\sigma_y^2]]
Parameters
----------
xy : tuple
Tuple containing x and y arrays for the values at which the
parabola will be evaluated.
amplitude : float
Constant offset value.
x0 : float
Centroid in x coordinate.
y0 : float
Centroid in y coordinate.
sx : float
Standard deviation along first axis (x-axis when theta=0).
sy : float
Standard deviation along second axis (y-axis when theta=0).
theta : float
Rotation angle in radians.
Returns
-------
vals : `~numpy.ndarray`
Values of the parabola evaluated at the points defined in the
`xy` input tuple.
"""
x = xy[0]
y = xy[1]
cth = np.cos(theta)
sth = np.sin(theta)
a = (cth ** 2) / (2 * sx ** 2) + (sth ** 2) / (2 * sy ** 2)
b = -(np.sin(2 * theta)) / (4 * sx ** 2) + (np.sin(2 * theta)) / (
4 * sy ** 2)
c = (sth ** 2) / (2 * sx ** 2) + (cth ** 2) / (2 * sy ** 2)
vals = amplitude - (a * ((x - x0) ** 2) +
2 * b * (x - x0) * (y - y0) +
c * ((y - y0) ** 2))
return vals
def fit_parabola(z, ix, iy, dpix=2, zmin=None):
"""Fit a parabola to a 2D numpy array. This function will fit a
parabola with the functional form described in
`~fermipy.utils.parabola` to a 2D slice of the input array `z`.
The boundaries of the fit region within z are set with the pixel
centroid (`ix` and `iy`) and region size (`dpix`).
Parameters
----------
z : `~numpy.ndarray`
ix : int
X index of center pixel of fit region in array `z`.
iy : int
Y index of center pixel of fit region in array `z`.
dpix : int
Size of fit region expressed as a pixel offset with respect the
centroid. The size of the sub-array will be (dpix*2 + 1) x
(dpix*2 + 1).
"""
xmin = max(0, ix - dpix)
xmax = min(z.shape[0], ix + dpix + 1)
ymin = max(0, iy - dpix)
ymax = min(z.shape[1], iy + dpix + 1)
sx = slice(xmin, xmax)
sy = slice(ymin, ymax)
nx = sx.stop - sx.start
ny = sy.stop - sy.start
x = np.arange(sx.start, sx.stop)
y = np.arange(sy.start, sy.stop)
x = x[:, np.newaxis] * np.ones((nx, ny))
y = y[np.newaxis, :] * np.ones((nx, ny))
coeffx = poly_to_parabola(np.polyfit(
np.arange(sx.start, sx.stop), z[sx, iy], 2))
coeffy = poly_to_parabola(np.polyfit(
np.arange(sy.start, sy.stop), z[ix, sy], 2))
p0 = [coeffx[2], coeffx[0], coeffy[0], coeffx[1], coeffy[1], 0.0]
m = np.isfinite(z[sx, sy])
if zmin is not None:
m = z[sx, sy] > zmin
o = {'fit_success': True, 'p0': p0}
def curve_fit_fn(*args):
return np.ravel(parabola(*args))
try:
popt, pcov = scipy.optimize.curve_fit(curve_fit_fn,
(np.ravel(x[m]), np.ravel(y[m])),
np.ravel(z[sx, sy][m]), p0)
except Exception:
popt = copy.deepcopy(p0)
o['fit_success'] = False
fm = parabola((x[m], y[m]), *popt)
df = fm - z[sx, sy][m]
rchi2 = np.sum(df ** 2) / len(fm)
o['rchi2'] = rchi2
o['x0'] = popt[1]
o['y0'] = popt[2]
o['sigmax'] = popt[3]
o['sigmay'] = popt[4]
o['sigma'] = np.sqrt(o['sigmax'] ** 2 + o['sigmay'] ** 2)
o['z0'] = popt[0]
o['theta'] = popt[5]
o['popt'] = popt
a = max(o['sigmax'], o['sigmay'])
b = min(o['sigmax'], o['sigmay'])
o['eccentricity'] = np.sqrt(1 - b ** 2 / a ** 2)
o['eccentricity2'] = np.sqrt(a ** 2 / b ** 2 - 1)
return o
def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges
x = (edges[:-1, None] +
(edges[1:, None] - edges[:-1, None]) *
np.linspace(0.0, 1.0, npts + 1)[None, :])
return np.unique(np.ravel(x))
def center_to_edge(center):
if len(center) == 1:
delta = np.array(1.0,ndmin=1)
else:
delta = center[1:]-center[:-1]
edges = 0.5*(center[1:]+center[:-1])
edges = np.insert(edges,0,center[0]-0.5*delta[0])
edges = np.append(edges,center[-1]+0.5*delta[-1])
return edges
def edge_to_center(edges):
return 0.5 * (edges[1:] + edges[:-1])
def edge_to_width(edges):
return (edges[1:] - edges[:-1])
def val_to_bin(edges, x):
"""Convert axis coordinate to bin index."""
ibin = np.digitize(np.array(x, ndmin=1), edges) - 1
return ibin
def val_to_pix(center, x):
return np.interp(x, center, np.arange(len(center)).astype(float))
def val_to_edge(edges, x):
"""Convert axis coordinate to bin index."""
edges = np.array(edges)
w = edges[1:] - edges[:-1]
w = np.insert(w, 0, w[0])
ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1
ibin[ibin < 0] = 0
return ibin
def val_to_bin_bounded(edges, x):
"""Convert axis coordinate to bin index."""
nbins = len(edges) - 1
ibin = val_to_bin(edges, x)
ibin[ibin < 0] = 0
ibin[ibin > nbins - 1] = nbins - 1
return ibin
def extend_array(edges, binsz, lo, hi):
"""Extend an array to encompass lo and hi values."""
numlo = int(np.ceil((edges[0] - lo) / binsz))
numhi = int(np.ceil((hi - edges[-1]) / binsz))
edges = copy.deepcopy(edges)
if numlo > 0:
edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)
edges = np.concatenate((edges_lo[:-1], edges))
if numhi > 0:
edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)
edges = np.concatenate((edges, edges_hi[1:]))
return edges
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def fits_recarray_to_dict(table):
"""Convert a FITS recarray to a python dictionary."""
cols = {}
for icol, col in enumerate(table.columns.names):
col_data = table.data[col]
if type(col_data[0]) == np.float32:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == np.float64:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == str:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.string_:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.int16:
cols[col] = np.array(col_data, dtype=int)
elif type(col_data[0]) == np.ndarray:
cols[col] = np.array(col_data)
else:
raise Exception(
'Unrecognized column type: %s %s' % (col, str(type(col_data))))
return cols
def unicode_to_str(args):
o = {}
for k, v in args.items():
if isinstance(v, unicode):
o[k] = str(v)
else:
o[k] = v
return o
def isstr(s):
"""String instance testing method that works under both Python 2.X
and 3.X. Returns true if the input is a string."""
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def xmlpath_to_path(path):
if path is None:
return path
return re.sub(r'\$\(([a-zA-Z\_]+)\)', r'$\1', path)
def path_to_xmlpath(path):
if path is None:
return path
return re.sub(r'\$([a-zA-Z\_]+)', r'$(\1)', path)
def create_xml_element(root, name, attrib):
el = et.SubElement(root, name)
for k, v in attrib.iteritems():
if isinstance(v, bool):
el.set(k, str(int(v)))
elif isstr(v):
el.set(k, v)
elif np.isfinite(v):
el.set(k, str(v))
return el
def load_xml_elements(root, path):
o = {}
for p in root.findall(path):
if 'name' in p.attrib:
o[p.attrib['name']] = copy.deepcopy(p.attrib)
else:
o.update(p.attrib)
return o
def prettify_xml(elem):
"""Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
import xml.etree.cElementTree as et
rough_string = et.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def arg_to_list(arg):
if arg is None:
return []
elif isinstance(arg, list):
return arg
else:
return [arg]
def update_keys(input_dict, key_map):
o = {}
for k, v in input_dict.items():
if k in key_map.keys():
k = key_map[k]
if isinstance(v, dict):
o[k] = update_keys(v, key_map)
else:
o[k] = v
return o
def create_dict(d0, **kwargs):
o = copy.deepcopy(d0)
o = merge_dict(o,kwargs,add_new_keys=True)
return o
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False):
"""Recursively merge the contents of python dictionary d0 with
the contents of another python dictionary, d1.
Parameters
----------
d0 : dict
The input dictionary.
d1 : dict
Dictionary to be merged with the input dictionary.
add_new_keys : str
Do not skip keys that only exist in d1.
append_arrays : bool
If an element is a numpy array set the value of that element by
concatenating the two arrays.
"""
if d1 is None:
return d0
elif d0 is None:
return d1
elif d0 is None and d1 is None:
return {}
od = {}
for k, v in d0.items():
t0 = None
t1 = None
if k in d0:
t0 = type(d0[k])
if k in d1:
t1 = type(d1[k])
if k not in d1:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, dict) and isinstance(d1[k], dict):
od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays)
elif isinstance(v, list) and isstr(d1[k]):
od[k] = d1[k].split(',')
elif isinstance(v, dict) and d1[k] is None:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, np.ndarray) and append_arrays:
od[k] = np.concatenate((v, d1[k]))
elif (d0[k] is not None and d1[k] is not None) and t0 != t1:
if t0 == dict or t0 == list:
raise Exception('Conflicting types in dictionary merge for '
'key %s %s %s' % (k, t0, t1))
od[k] = t0(d1[k])
else:
od[k] = copy.copy(d1[k])
if add_new_keys:
for k, v in d1.items():
if k not in d0:
od[k] = copy.deepcopy(d1[k])
return od
def tolist(x):
""" convenience function that takes in a
nested structure of lists and dictionaries
and converts everything to its base objects.
This is useful for dupming a file to yaml.
(a) numpy arrays into python lists
>>> type(tolist(np.asarray(123))) == int
True
>>> tolist(np.asarray([1,2,3])) == [1,2,3]
True
(b) numpy strings into python strings.
>>> tolist([np.asarray('cat')])==['cat']
True
(c) an ordered dict to a dict
>>> ordered=OrderedDict(a=1, b=2)
>>> type(tolist(ordered)) == dict
True
(d) converts unicode to regular strings
>>> type(u'a') == str
False
>>> type(tolist(u'a')) == str
True
(e) converts numbers & bools in strings to real represntation,
(i.e. '123' -> 123)
>>> type(tolist(np.asarray('123'))) == int
True
>>> type(tolist('123')) == int
True
>>> tolist('False') == False
True
"""
if isinstance(x, list):
return map(tolist, x)
elif isinstance(x, dict):
return dict((tolist(k), tolist(v)) for k, v in x.items())
elif isinstance(x, np.ndarray) or isinstance(x, np.number):
# note, call tolist again to convert strings of numbers to numbers
return tolist(x.tolist())
elif isinstance(x, OrderedDict):
return dict(x)
elif isinstance(x, np.bool_):
return bool(x)
elif isinstance(x, basestring) or isinstance(x, np.str):
x = str(x) # convert unicode & numpy strings
try:
return int(x)
except:
try:
return float(x)
except:
if x == 'True':
return True
elif x == 'False':
return False
else:
return x
else:
return x
def create_hpx_disk_region_string(skyDir, coordsys, radius, inclusive=0):
"""
"""
# Make an all-sky region
if radius >= 90.:
return None
if coordsys == "GAL":
xref = skyDir.galactic.l.deg
yref = skyDir.galactic.b.deg
elif coordsys == "CEL":
xref = skyDir.ra.deg
yref = skyDir.dec.deg
else:
raise Exception("Unrecognized coordinate system %s" % coordsys)
if inclusive:
val = "DISK_INC(%.3f,%.3f,%.3f,%i)" % (xref, yref, radius, inclusive)
else:
val = "DISK(%.3f,%.3f,%.3f)" % (xref, yref, radius)
return val
def convolve2d_disk(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
step function given by:
g(r) = H(1-r/s)
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Radius parameter of the step function.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - sig
rmax = r + sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = rmin[:, np.newaxis] + \
delta[:, np.newaxis] * np.linspace(0, nstep, nstep + 1)[np.newaxis, :]
rp = 0.5 * (redge[:, 1:] + redge[:, :-1])
dr = redge[:, 1:] - redge[:, :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
saxis = 1
cphi = -np.ones(dr.shape)
m = ((rp + r) / sig < 1) | (r == 0)
rrp = r * rp
sx = r ** 2 + rp ** 2 - sig ** 2
cphi[~m] = sx[~m] / (2 * rrp[~m])
dphi = 2 * np.arccos(cphi)
v = rp * fnv * dphi * dr / (np.pi * sig * sig)
s = np.sum(v, axis=saxis)
return s
def convolve2d_gauss(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
gaussian given by:
g(r) = 1/(2*pi*s^2) Exp[-r^2/(2*s^2)]
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Width parameter of the gaussian.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - 10 * sig
rmax = r + 10 * sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = (rmin[:, np.newaxis] +
delta[:, np.newaxis] *
np.linspace(0, nstep, nstep + 1)[np.newaxis, :])
rp = 0.5 * (redge[:, 1:] + redge[:, :-1])
dr = redge[:, 1:] - redge[:, :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
saxis = 1
sig2 = sig * sig
x = r * rp / (sig2)
if 'je_fn' not in convolve2d_gauss.__dict__:
t = 10 ** np.linspace(-8, 8, 1000)
t = np.insert(t, 0, [0])
je = special.ive(0, t)
convolve2d_gauss.je_fn = UnivariateSpline(t, je, k=2, s=0)
je = convolve2d_gauss.je_fn(x.flat).reshape(x.shape)
# je2 = special.ive(0,x)
v = (rp * fnv / (sig2) * je * np.exp(x - (r * r + rp * rp) /
(2 * sig2)) * dr)
s = np.sum(v, axis=saxis)
return s
def make_pixel_offset(npix, xpix=0.0, ypix=0.0):
"""Make a 2D array with the distance of each pixel from a
reference direction in pixel coordinates. Pixel coordinates are
defined such that (0,0) is located at the center of the coordinate
grid."""
dx = np.abs(np.linspace(0, npix - 1, npix) - (npix - 1) / 2. - xpix)
dy = np.abs(np.linspace(0, npix - 1, npix) - (npix - 1) / 2. - ypix)
dxy = np.zeros((npix, npix))
dxy += np.sqrt(dx[np.newaxis, :] ** 2 + dy[:, np.newaxis] ** 2)
return dxy
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=0.0, ypix=0.0):
"""Make kernel for a 2D gaussian.
Parameters
----------
sigma : float
68% containment radius in degrees.
"""
sigma /= 1.5095921854516636
sigma /= cdelt
fn = lambda t, s: 1. / (2 * np.pi * s ** 2) * np.exp(
-t ** 2 / (s ** 2 * 2.0))
dxy = make_pixel_offset(npix, xpix, ypix)
k = fn(dxy, sigma)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
def make_disk_kernel(radius, npix=501, cdelt=0.01, xpix=0.0, ypix=0.0):
"""Make kernel for a 2D disk.
Parameters
----------
radius : float
Disk radius in deg.
"""
radius /= cdelt
fn = lambda t, s: 0.5 * (np.sign(s - t) + 1.0)
dxy = make_pixel_offset(npix, xpix, ypix)
k = fn(dxy, radius)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False):
"""Make a kernel for a PSF-convolved 2D disk.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
"""
dtheta = psf.dtheta
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
fn = lambda t: psf.eval(i, t, scale_fn=psf_scale_fn)
psfc = convolve2d_disk(fn, dtheta, sigma)
k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def make_cgauss_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False):
"""Make a kernel for a PSF-convolved 2D gaussian.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
"""
sigma /= 1.5095921854516636
dtheta = psf.dtheta
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
fn = lambda t: psf.eval(i, t, scale_fn=psf_scale_fn)
psfc = convolve2d_gauss(fn, dtheta, sigma)
k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):
"""
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
"""
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def rebin_map(k, nebin, npix, rebin):
if rebin > 1:
k = np.sum(k.reshape((nebin, npix * rebin, npix, rebin)), axis=3)
k = k.swapaxes(1, 2)
k = np.sum(k.reshape(nebin, npix, npix, rebin), axis=3)
k = k.swapaxes(1, 2)
k /= rebin ** 2
return k
added try block in get_parameter_limits to handle case of undefined x axis
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import re
import copy
import tempfile
from collections import OrderedDict
import xml.etree.cElementTree as et
import yaml
import numpy as np
import scipy.optimize
from scipy.interpolate import UnivariateSpline
from scipy.optimize import brentq
import scipy.special as special
from numpy.core import defchararray
from astropy.extern import six
def init_matplotlib_backend(backend=None):
"""This function initializes the matplotlib backend. When no
DISPLAY is available the backend is automatically set to 'Agg'.
Parameters
----------
backend : str
matplotlib backend name.
"""
import matplotlib
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
else:
if backend is not None:
matplotlib.use(backend)
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(six.text_type, unicode_representer)
def load_yaml(infile, **kwargs):
return yaml.load(open(infile), **kwargs)
def write_yaml(o, outfile, **kwargs):
yaml.dump(tolist(o), open(outfile, 'w'), **kwargs)
def load_npy(infile):
return np.load(infile).flat[0]
def load_data(infile, workdir=None):
"""Load python data structure from either a YAML or numpy file. """
infile = resolve_path(infile, workdir=workdir)
infile, ext = os.path.splitext(infile)
if os.path.isfile(infile + '.npy'):
infile += '.npy'
elif os.path.isfile(infile + '.yaml'):
infile += '.yaml'
else:
raise Exception('Input file does not exist.')
ext = os.path.splitext(infile)[1]
if ext == '.npy':
return infile, load_npy(infile)
elif ext == '.yaml':
return infile, load_yaml(infile)
else:
raise Exception('Unrecognized extension.')
def resolve_path(path, workdir=None):
if os.path.isabs(path):
return path
elif workdir is None:
return os.path.abspath(path)
else:
return os.path.join(workdir, path)
def resolve_file_path(path, **kwargs):
dirs = kwargs.get('search_dirs', [])
if os.path.isabs(os.path.expandvars(path)) and \
os.path.isfile(os.path.expandvars(path)):
return path
for d in dirs:
if not os.path.isdir(os.path.expandvars(d)):
continue
p = os.path.join(d, path)
if os.path.isfile(os.path.expandvars(p)):
return p
raise Exception('Failed to resolve file path: %s' % path)
def resolve_file_path_list(pathlist, workdir, prefix='',
randomize=False):
"""Resolve the path of each file name in the file ``pathlist`` and
write the updated paths to a new file.
"""
files = [line.strip() for line in open(pathlist, 'r')]
newfiles = []
for f in files:
f = os.path.expandvars(f)
if os.path.isfile(f):
newfiles += [f]
else:
newfiles += [os.path.join(workdir,f)]
if randomize:
_, tmppath = tempfile.mkstemp(prefix=prefix,dir=workdir)
else:
tmppath = os.path.join(workdir,prefix)
tmppath += '.txt'
with open(tmppath, 'w') as tmpfile:
tmpfile.write("\n".join(newfiles))
return tmppath
def is_fits_file(path):
if (path.endswith('.fit') or path.endswith('.fits') or
path.endswith('.fit.gz') or path.endswith('.fits.gz')):
return True
else:
return False
def collect_dirs(path, max_depth=1, followlinks=True):
"""Recursively find directories under the given path."""
if not os.path.isdir(path):
return []
o = [path]
if max_depth == 0:
return o
for subdir in os.listdir(path):
subdir = os.path.join(path, subdir)
if not os.path.isdir(subdir):
continue
o += [subdir]
if os.path.islink(subdir) and not followlinks:
continue
if max_depth > 0:
o += collect_dirs(subdir, max_depth=max_depth - 1)
return list(set(o))
def match_regex_list(patterns, string):
"""Perform a regex match of a string against a list of patterns.
Returns true if the string matches at least one pattern in the
list."""
for p in patterns:
if re.findall(p, string):
return True
return False
def find_rows_by_string(tab, names, colnames=['assoc']):
"""Find the rows in a table ``tab`` that match at least one of the
strings in ``names``. This method ignores whitespace and case
when matching strings.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of strings.
colname : str
Name of the table column that will be searched for matching string.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask for rows with matching strings.
"""
mask = np.empty(len(tab),dtype=bool); mask.fill(False)
names = [name.lower().replace(' ', '') for name in names]
for colname in colnames:
if colname not in tab.columns:
continue
col = tab[[colname]].copy()
col[colname] = defchararray.replace(defchararray.lower(col[colname]),
' ', '')
for name in names:
mask |= col[colname] == name
return mask
def join_strings(strings, sep='_'):
if strings is None:
return ''
else:
if not isinstance(strings, list):
strings = [strings]
return sep.join([s for s in strings if s])
def format_filename(outdir, basename, prefix=None, extension=None):
filename = join_strings(prefix)
filename = join_strings([filename, basename])
if extension is not None:
if extension.startswith('.'):
filename += extension
else:
filename += '.' + extension
return os.path.join(outdir, filename)
def strip_suffix(filename, suffix):
for s in suffix:
filename = re.sub(r'\.%s$' % s, '', filename)
return filename
def met_to_mjd(time):
""""Convert mission elapsed time to mean julian date."""
return 54682.65 + (time-239557414.0)/(86400.)
RA_NGP = np.radians(192.8594812065348)
DEC_NGP = np.radians(27.12825118085622)
L_CP = np.radians(122.9319185680026)
def gal2eq(l, b):
L_0 = L_CP - np.pi / 2.
RA_0 = RA_NGP + np.pi / 2.
l = np.array(l, ndmin=1)
b = np.array(b, ndmin=1)
l, b = np.radians(l), np.radians(b)
sind = np.sin(b) * np.sin(DEC_NGP) + np.cos(b) * np.cos(DEC_NGP) * np.sin(
l - L_0)
dec = np.arcsin(sind)
cosa = np.cos(l - L_0) * np.cos(b) / np.cos(dec)
sina = (np.cos(b) * np.sin(DEC_NGP) * np.sin(l - L_0) - np.sin(b) * np.cos(
DEC_NGP)) / np.cos(dec)
dec = np.degrees(dec)
cosa[cosa < -1.0] = -1.0
cosa[cosa > 1.0] = 1.0
ra = np.arccos(cosa)
ra[np.where(sina < 0.)] = -ra[np.where(sina < 0.)]
ra = np.degrees(ra + RA_0)
ra = np.mod(ra, 360.)
dec = np.mod(dec + 90., 180.) - 90.
return ra, dec
def eq2gal(ra, dec):
L_0 = L_CP - np.pi / 2.
RA_0 = RA_NGP + np.pi / 2.
DEC_0 = np.pi / 2. - DEC_NGP
ra = np.array(ra, ndmin=1)
dec = np.array(dec, ndmin=1)
ra, dec = np.radians(ra), np.radians(dec)
np.sinb = np.sin(dec) * np.cos(DEC_0) - np.cos(dec) * np.sin(
ra - RA_0) * np.sin(DEC_0)
b = np.arcsin(np.sinb)
cosl = np.cos(dec) * np.cos(ra - RA_0) / np.cos(b)
sinl = (np.sin(dec) * np.sin(DEC_0) + np.cos(dec) * np.sin(
ra - RA_0) * np.cos(DEC_0)) / np.cos(b)
b = np.degrees(b)
cosl[cosl < -1.0] = -1.0
cosl[cosl > 1.0] = 1.0
l = np.arccos(cosl)
l[np.where(sinl < 0.)] = - l[np.where(sinl < 0.)]
l = np.degrees(l + L_0)
l = np.mod(l, 360.)
b = np.mod(b + 90., 180.) - 90.
return l, b
def xyz_to_lonlat(*args):
if len(args) == 1:
x, y, z = args[0][0], args[0][1], args[0][2]
else:
x, y, z = args[0], args[1], args[2]
lat = np.pi / 2. - np.arctan2(np.sqrt(x ** 2 + y ** 2), z)
lon = np.arctan2(y, x)
return lon, lat
def lonlat_to_xyz(lon, lat):
phi = lon
theta = np.pi / 2. - lat
return np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)])
def project(lon0, lat0, lon1, lat1):
"""This function performs a stereographic projection on the unit
vector (lon1,lat1) with the pole defined at the reference unit
vector (lon0,lat0)."""
costh = np.cos(np.pi / 2. - lat0)
cosphi = np.cos(lon0)
sinth = np.sin(np.pi / 2. - lat0)
sinphi = np.sin(lon0)
xyz = lonlat_to_xyz(lon1, lat1)
x1 = xyz[0]
y1 = xyz[1]
z1 = xyz[2]
x1p = x1 * costh * cosphi + y1 * costh * sinphi - z1 * sinth
y1p = -x1 * sinphi + y1 * cosphi
z1p = x1 * sinth * cosphi + y1 * sinth * sinphi + z1 * costh
r = np.arctan2(np.sqrt(x1p ** 2 + y1p ** 2), z1p)
phi = np.arctan2(y1p, x1p)
return r * np.cos(phi), r * np.sin(phi)
def scale_parameter(p):
if isstr(p):
p = float(p)
if p > 0:
scale = 10 ** -np.round(np.log10(1. / p))
return p / scale, scale
else:
return p, 1.0
def update_bounds(val, bounds):
return min(val, bounds[0]), max(val, bounds[1])
def apply_minmax_selection(val, val_minmax):
if val_minmax is None:
return True
if val_minmax[0] is None:
min_cut = True
elif np.isfinite(val) and val >= val_minmax[0]:
min_cut = True
else:
min_cut = False
if val_minmax[1] is None:
max_cut = True
elif np.isfinite(val) and val <= val_minmax[1]:
max_cut = True
else:
max_cut = False
return (min_cut and max_cut)
def create_source_name(skydir):
hms = skydir.icrs.ra.hms
dms = skydir.icrs.dec.dms
return 'PS J%02.f%04.1f%+03.f%02.f' % (hms.h,
hms.m + hms.s / 60.,
dms.d,
np.abs(dms.m + dms.s / 60.))
def create_model_name(src):
"""Generate a name for a source object given its spatial/spectral
properties.
Parameters
----------
src : `~fermipy.roi_model.Source`
A source object.
Returns
-------
name : str
A source name.
"""
o = ''
spatial_type = src['SpatialModel'].lower()
o += spatial_type
if spatial_type == 'gaussian':
o += '_s%04.2f' % src['SpatialWidth']
if src['SpectrumType'] == 'PowerLaw':
o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value'])
else:
o += '_%s' % (src['SpectrumType'].lower())
return o
def cov_to_correlation(cov):
err = np.sqrt(np.diag(cov))
corr = np.array(cov)
corr *= np.outer(1 / err, 1 / err)
return corr
def twosided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood value that corresponds to a
two-sided interval of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
return 0.5 * np.power( np.sqrt(2.) * special.erfinv(cl), 2)
def twosided_dlnl_to_cl(dlnl):
"""Compute the confidence level that corresponds to a two-sided
interval with a given change in the loglikelihood value.
Parameters
----------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
Returns
-------
cl : float
Confidence level.
"""
return special.erf( dlnl**0.5 )
def onesided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood values that corresponds to an
upper limit of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
alpha = 1.0 - cl
return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
def onesided_dlnl_to_cl(dlnl):
"""Compute the confidence level that corresponds to an upper limit
with a given change in the loglikelihood value.
Parameters
----------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
Returns
-------
cl : float
Confidence level.
"""
alpha = (1.0 - special.erf(dlnl**0.5))/2.0
return 1.0-alpha
def interpolate_function_min(x, y):
sp = scipy.interpolate.splrep(x, y, k=2, s=0)
def fn(t):
return scipy.interpolate.splev(t, sp, der=1)
if np.sign(fn(x[0])) == np.sign(fn(x[-1])):
if np.sign(fn(x[0])) == -1:
return x[-1]
else:
return x[0]
x0 = scipy.optimize.brentq(fn,
x[0], x[-1],
xtol=1e-10 * np.median(x))
return x0
def find_function_root(fn, x0, xb, delta=0.0):
"""Find the root of a function: f(x)+delta in the interval encompassed
by x0 and xb.
Parameters
----------
fn : function
Python function.
x0 : float
Fixed bound for the root search. This will either be used as
the lower or upper bound depending on the relative value of xb.
xb : float
Upper or lower bound for the root search. If a root is not
found in the interval [x0,xb]/[xb,x0] this value will be
increased/decreased until a change in sign is found.
"""
if x0 == xb:
return np.nan
for i in range(10):
if np.sign(fn(xb) + delta) != np.sign(fn(x0) + delta):
break
if xb < x0:
xb *= 0.5
else:
xb *= 2.0
# Failed to find a root
if np.sign(fn(xb) + delta) == np.sign(fn(x0) + delta):
return np.nan
if x0 == 0:
xtol = 1e-10 * xb
else:
xtol = 1e-10 * (xb + x0)
return brentq(lambda t: fn(t) + delta, x0, xb, xtol=xtol)
def get_parameter_limits(xval, loglike, ul_confidence=0.95, tol=1E-3):
"""Compute upper/lower limits, peak position, and 1-sigma errors
from a 1-D likelihood function. This function uses the
delta-loglikelihood method to evaluate parameter limits by
searching for the point at which the change in the log-likelihood
value with respect to the maximum equals a specific value. A
parabolic spline fit to the log-likelihood values is used to
improve the accuracy of the calculation.
Parameters
----------
xval : `~numpy.ndarray`
Array of parameter values.
loglike : `~numpy.ndarray`
Array of log-likelihood values.
ul_confidence : float
Confidence level to use for limit calculation.
tol : float
Tolerance parameter for spline.
"""
deltalnl = onesided_cl_to_dlnl(ul_confidence)
# EAC FIXME, added try block here b/c sometimes xval is np.nan
try:
spline = UnivariateSpline(xval, loglike, k=2, s=tol)
except:
print ("Failed to create spline: ", xval, loglike)
return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,
'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,
'lnlmax': np.nan}
# m = np.abs(loglike[1:] - loglike[:-1]) > delta_tol
# xval = np.concatenate((xval[:1],xval[1:][m]))
# loglike = np.concatenate((loglike[:1],loglike[1:][m]))
# spline = InterpolatedUnivariateSpline(xval, loglike, k=2)
sd = spline.derivative()
imax = np.argmax(loglike)
ilo = max(imax - 2, 0)
ihi = min(imax + 2, len(xval) - 1)
# Find the peak
x0 = xval[imax]
# Refine the peak position
if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
x0 = find_function_root(sd, xval[ilo], xval[ihi])
lnlmax = float(spline(x0))
fn = lambda t: spline(t) - lnlmax
ul = find_function_root(fn, x0, xval[-1], deltalnl)
ll = find_function_root(fn, x0, xval[0], deltalnl)
err_lo = np.abs(x0 - find_function_root(fn, x0, xval[0], 0.5))
err_hi = np.abs(x0 - find_function_root(fn, x0, xval[-1], 0.5))
if np.isfinite(err_lo):
err = 0.5 * (err_lo + err_hi)
else:
err = err_hi
o = {'x0': x0, 'ul': ul, 'll': ll,
'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
'lnlmax': lnlmax}
return o
def poly_to_parabola(coeff):
sigma = np.sqrt(1. / np.abs(2.0 * coeff[0]))
x0 = -coeff[1] / (2 * coeff[0])
y0 = (1. - (coeff[1] ** 2 - 4 * coeff[0] * coeff[2])) / (4 * coeff[0])
return x0, sigma, y0
def parabola(xy, amplitude, x0, y0, sx, sy, theta):
"""Evaluate a 2D parabola given by:
f(x,y) = f_0 - (1/2) * \delta^T * R * \Sigma * R^T * \delta
where
\delta = [(x - x_0), (y - y_0)]
and R is the matrix for a 2D rotation by angle \theta and \Sigma
is the covariance matrix:
\Sigma = [[1/\sigma_x^2, 0 ],
[0 , 1/\sigma_y^2]]
Parameters
----------
xy : tuple
Tuple containing x and y arrays for the values at which the
parabola will be evaluated.
amplitude : float
Constant offset value.
x0 : float
Centroid in x coordinate.
y0 : float
Centroid in y coordinate.
sx : float
Standard deviation along first axis (x-axis when theta=0).
sy : float
Standard deviation along second axis (y-axis when theta=0).
theta : float
Rotation angle in radians.
Returns
-------
vals : `~numpy.ndarray`
Values of the parabola evaluated at the points defined in the
`xy` input tuple.
"""
x = xy[0]
y = xy[1]
cth = np.cos(theta)
sth = np.sin(theta)
a = (cth ** 2) / (2 * sx ** 2) + (sth ** 2) / (2 * sy ** 2)
b = -(np.sin(2 * theta)) / (4 * sx ** 2) + (np.sin(2 * theta)) / (
4 * sy ** 2)
c = (sth ** 2) / (2 * sx ** 2) + (cth ** 2) / (2 * sy ** 2)
vals = amplitude - (a * ((x - x0) ** 2) +
2 * b * (x - x0) * (y - y0) +
c * ((y - y0) ** 2))
return vals
def fit_parabola(z, ix, iy, dpix=2, zmin=None):
"""Fit a parabola to a 2D numpy array. This function will fit a
parabola with the functional form described in
`~fermipy.utils.parabola` to a 2D slice of the input array `z`.
The boundaries of the fit region within z are set with the pixel
centroid (`ix` and `iy`) and region size (`dpix`).
Parameters
----------
z : `~numpy.ndarray`
ix : int
X index of center pixel of fit region in array `z`.
iy : int
Y index of center pixel of fit region in array `z`.
dpix : int
Size of fit region expressed as a pixel offset with respect the
centroid. The size of the sub-array will be (dpix*2 + 1) x
(dpix*2 + 1).
"""
xmin = max(0, ix - dpix)
xmax = min(z.shape[0], ix + dpix + 1)
ymin = max(0, iy - dpix)
ymax = min(z.shape[1], iy + dpix + 1)
sx = slice(xmin, xmax)
sy = slice(ymin, ymax)
nx = sx.stop - sx.start
ny = sy.stop - sy.start
x = np.arange(sx.start, sx.stop)
y = np.arange(sy.start, sy.stop)
x = x[:, np.newaxis] * np.ones((nx, ny))
y = y[np.newaxis, :] * np.ones((nx, ny))
coeffx = poly_to_parabola(np.polyfit(
np.arange(sx.start, sx.stop), z[sx, iy], 2))
coeffy = poly_to_parabola(np.polyfit(
np.arange(sy.start, sy.stop), z[ix, sy], 2))
p0 = [coeffx[2], coeffx[0], coeffy[0], coeffx[1], coeffy[1], 0.0]
m = np.isfinite(z[sx, sy])
if zmin is not None:
m = z[sx, sy] > zmin
o = {'fit_success': True, 'p0': p0}
def curve_fit_fn(*args):
return np.ravel(parabola(*args))
try:
popt, pcov = scipy.optimize.curve_fit(curve_fit_fn,
(np.ravel(x[m]), np.ravel(y[m])),
np.ravel(z[sx, sy][m]), p0)
except Exception:
popt = copy.deepcopy(p0)
o['fit_success'] = False
fm = parabola((x[m], y[m]), *popt)
df = fm - z[sx, sy][m]
rchi2 = np.sum(df ** 2) / len(fm)
o['rchi2'] = rchi2
o['x0'] = popt[1]
o['y0'] = popt[2]
o['sigmax'] = popt[3]
o['sigmay'] = popt[4]
o['sigma'] = np.sqrt(o['sigmax'] ** 2 + o['sigmay'] ** 2)
o['z0'] = popt[0]
o['theta'] = popt[5]
o['popt'] = popt
a = max(o['sigmax'], o['sigmay'])
b = min(o['sigmax'], o['sigmay'])
o['eccentricity'] = np.sqrt(1 - b ** 2 / a ** 2)
o['eccentricity2'] = np.sqrt(a ** 2 / b ** 2 - 1)
return o
def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges
x = (edges[:-1, None] +
(edges[1:, None] - edges[:-1, None]) *
np.linspace(0.0, 1.0, npts + 1)[None, :])
return np.unique(np.ravel(x))
def center_to_edge(center):
if len(center) == 1:
delta = np.array(1.0,ndmin=1)
else:
delta = center[1:]-center[:-1]
edges = 0.5*(center[1:]+center[:-1])
edges = np.insert(edges,0,center[0]-0.5*delta[0])
edges = np.append(edges,center[-1]+0.5*delta[-1])
return edges
def edge_to_center(edges):
return 0.5 * (edges[1:] + edges[:-1])
def edge_to_width(edges):
return (edges[1:] - edges[:-1])
def val_to_bin(edges, x):
"""Convert axis coordinate to bin index."""
ibin = np.digitize(np.array(x, ndmin=1), edges) - 1
return ibin
def val_to_pix(center, x):
return np.interp(x, center, np.arange(len(center)).astype(float))
def val_to_edge(edges, x):
"""Convert axis coordinate to bin index."""
edges = np.array(edges)
w = edges[1:] - edges[:-1]
w = np.insert(w, 0, w[0])
ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1
ibin[ibin < 0] = 0
return ibin
def val_to_bin_bounded(edges, x):
"""Convert axis coordinate to bin index."""
nbins = len(edges) - 1
ibin = val_to_bin(edges, x)
ibin[ibin < 0] = 0
ibin[ibin > nbins - 1] = nbins - 1
return ibin
def extend_array(edges, binsz, lo, hi):
"""Extend an array to encompass lo and hi values."""
numlo = int(np.ceil((edges[0] - lo) / binsz))
numhi = int(np.ceil((hi - edges[-1]) / binsz))
edges = copy.deepcopy(edges)
if numlo > 0:
edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)
edges = np.concatenate((edges_lo[:-1], edges))
if numhi > 0:
edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)
edges = np.concatenate((edges, edges_hi[1:]))
return edges
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def fits_recarray_to_dict(table):
"""Convert a FITS recarray to a python dictionary."""
cols = {}
for icol, col in enumerate(table.columns.names):
col_data = table.data[col]
if type(col_data[0]) == np.float32:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == np.float64:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == str:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.string_:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.int16:
cols[col] = np.array(col_data, dtype=int)
elif type(col_data[0]) == np.ndarray:
cols[col] = np.array(col_data)
else:
raise Exception(
'Unrecognized column type: %s %s' % (col, str(type(col_data))))
return cols
def unicode_to_str(args):
o = {}
for k, v in args.items():
if isinstance(v, unicode):
o[k] = str(v)
else:
o[k] = v
return o
def isstr(s):
"""String instance testing method that works under both Python 2.X
and 3.X. Returns true if the input is a string."""
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def xmlpath_to_path(path):
if path is None:
return path
return re.sub(r'\$\(([a-zA-Z\_]+)\)', r'$\1', path)
def path_to_xmlpath(path):
if path is None:
return path
return re.sub(r'\$([a-zA-Z\_]+)', r'$(\1)', path)
def create_xml_element(root, name, attrib):
el = et.SubElement(root, name)
for k, v in attrib.iteritems():
if isinstance(v, bool):
el.set(k, str(int(v)))
elif isstr(v):
el.set(k, v)
elif np.isfinite(v):
el.set(k, str(v))
return el
def load_xml_elements(root, path):
o = {}
for p in root.findall(path):
if 'name' in p.attrib:
o[p.attrib['name']] = copy.deepcopy(p.attrib)
else:
o.update(p.attrib)
return o
def prettify_xml(elem):
"""Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
import xml.etree.cElementTree as et
rough_string = et.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def arg_to_list(arg):
if arg is None:
return []
elif isinstance(arg, list):
return arg
else:
return [arg]
def update_keys(input_dict, key_map):
o = {}
for k, v in input_dict.items():
if k in key_map.keys():
k = key_map[k]
if isinstance(v, dict):
o[k] = update_keys(v, key_map)
else:
o[k] = v
return o
def create_dict(d0, **kwargs):
o = copy.deepcopy(d0)
o = merge_dict(o,kwargs,add_new_keys=True)
return o
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False):
"""Recursively merge the contents of python dictionary d0 with
the contents of another python dictionary, d1.
Parameters
----------
d0 : dict
The input dictionary.
d1 : dict
Dictionary to be merged with the input dictionary.
add_new_keys : str
Do not skip keys that only exist in d1.
append_arrays : bool
If an element is a numpy array set the value of that element by
concatenating the two arrays.
"""
if d1 is None:
return d0
elif d0 is None:
return d1
elif d0 is None and d1 is None:
return {}
od = {}
for k, v in d0.items():
t0 = None
t1 = None
if k in d0:
t0 = type(d0[k])
if k in d1:
t1 = type(d1[k])
if k not in d1:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, dict) and isinstance(d1[k], dict):
od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays)
elif isinstance(v, list) and isstr(d1[k]):
od[k] = d1[k].split(',')
elif isinstance(v, dict) and d1[k] is None:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, np.ndarray) and append_arrays:
od[k] = np.concatenate((v, d1[k]))
elif (d0[k] is not None and d1[k] is not None) and t0 != t1:
if t0 == dict or t0 == list:
raise Exception('Conflicting types in dictionary merge for '
'key %s %s %s' % (k, t0, t1))
od[k] = t0(d1[k])
else:
od[k] = copy.copy(d1[k])
if add_new_keys:
for k, v in d1.items():
if k not in d0:
od[k] = copy.deepcopy(d1[k])
return od
def tolist(x):
""" convenience function that takes in a
nested structure of lists and dictionaries
and converts everything to its base objects.
This is useful for dupming a file to yaml.
(a) numpy arrays into python lists
>>> type(tolist(np.asarray(123))) == int
True
>>> tolist(np.asarray([1,2,3])) == [1,2,3]
True
(b) numpy strings into python strings.
>>> tolist([np.asarray('cat')])==['cat']
True
(c) an ordered dict to a dict
>>> ordered=OrderedDict(a=1, b=2)
>>> type(tolist(ordered)) == dict
True
(d) converts unicode to regular strings
>>> type(u'a') == str
False
>>> type(tolist(u'a')) == str
True
(e) converts numbers & bools in strings to real represntation,
(i.e. '123' -> 123)
>>> type(tolist(np.asarray('123'))) == int
True
>>> type(tolist('123')) == int
True
>>> tolist('False') == False
True
"""
if isinstance(x, list):
return map(tolist, x)
elif isinstance(x, dict):
return dict((tolist(k), tolist(v)) for k, v in x.items())
elif isinstance(x, np.ndarray) or isinstance(x, np.number):
# note, call tolist again to convert strings of numbers to numbers
return tolist(x.tolist())
elif isinstance(x, OrderedDict):
return dict(x)
elif isinstance(x, np.bool_):
return bool(x)
elif isinstance(x, basestring) or isinstance(x, np.str):
x = str(x) # convert unicode & numpy strings
try:
return int(x)
except:
try:
return float(x)
except:
if x == 'True':
return True
elif x == 'False':
return False
else:
return x
else:
return x
def create_hpx_disk_region_string(skyDir, coordsys, radius, inclusive=0):
"""
"""
# Make an all-sky region
if radius >= 90.:
return None
if coordsys == "GAL":
xref = skyDir.galactic.l.deg
yref = skyDir.galactic.b.deg
elif coordsys == "CEL":
xref = skyDir.ra.deg
yref = skyDir.dec.deg
else:
raise Exception("Unrecognized coordinate system %s" % coordsys)
if inclusive:
val = "DISK_INC(%.3f,%.3f,%.3f,%i)" % (xref, yref, radius, inclusive)
else:
val = "DISK(%.3f,%.3f,%.3f)" % (xref, yref, radius)
return val
def convolve2d_disk(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
step function given by:
g(r) = H(1-r/s)
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Radius parameter of the step function.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - sig
rmax = r + sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = rmin[:, np.newaxis] + \
delta[:, np.newaxis] * np.linspace(0, nstep, nstep + 1)[np.newaxis, :]
rp = 0.5 * (redge[:, 1:] + redge[:, :-1])
dr = redge[:, 1:] - redge[:, :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
saxis = 1
cphi = -np.ones(dr.shape)
m = ((rp + r) / sig < 1) | (r == 0)
rrp = r * rp
sx = r ** 2 + rp ** 2 - sig ** 2
cphi[~m] = sx[~m] / (2 * rrp[~m])
dphi = 2 * np.arccos(cphi)
v = rp * fnv * dphi * dr / (np.pi * sig * sig)
s = np.sum(v, axis=saxis)
return s
def convolve2d_gauss(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
gaussian given by:
g(r) = 1/(2*pi*s^2) Exp[-r^2/(2*s^2)]
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Width parameter of the gaussian.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - 10 * sig
rmax = r + 10 * sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = (rmin[:, np.newaxis] +
delta[:, np.newaxis] *
np.linspace(0, nstep, nstep + 1)[np.newaxis, :])
rp = 0.5 * (redge[:, 1:] + redge[:, :-1])
dr = redge[:, 1:] - redge[:, :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
saxis = 1
sig2 = sig * sig
x = r * rp / (sig2)
if 'je_fn' not in convolve2d_gauss.__dict__:
t = 10 ** np.linspace(-8, 8, 1000)
t = np.insert(t, 0, [0])
je = special.ive(0, t)
convolve2d_gauss.je_fn = UnivariateSpline(t, je, k=2, s=0)
je = convolve2d_gauss.je_fn(x.flat).reshape(x.shape)
# je2 = special.ive(0,x)
v = (rp * fnv / (sig2) * je * np.exp(x - (r * r + rp * rp) /
(2 * sig2)) * dr)
s = np.sum(v, axis=saxis)
return s
def make_pixel_offset(npix, xpix=0.0, ypix=0.0):
"""Make a 2D array with the distance of each pixel from a
reference direction in pixel coordinates. Pixel coordinates are
defined such that (0,0) is located at the center of the coordinate
grid."""
dx = np.abs(np.linspace(0, npix - 1, npix) - (npix - 1) / 2. - xpix)
dy = np.abs(np.linspace(0, npix - 1, npix) - (npix - 1) / 2. - ypix)
dxy = np.zeros((npix, npix))
dxy += np.sqrt(dx[np.newaxis, :] ** 2 + dy[:, np.newaxis] ** 2)
return dxy
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=0.0, ypix=0.0):
"""Make kernel for a 2D gaussian.
Parameters
----------
sigma : float
68% containment radius in degrees.
"""
sigma /= 1.5095921854516636
sigma /= cdelt
fn = lambda t, s: 1. / (2 * np.pi * s ** 2) * np.exp(
-t ** 2 / (s ** 2 * 2.0))
dxy = make_pixel_offset(npix, xpix, ypix)
k = fn(dxy, sigma)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
def make_disk_kernel(radius, npix=501, cdelt=0.01, xpix=0.0, ypix=0.0):
"""Make kernel for a 2D disk.
Parameters
----------
radius : float
Disk radius in deg.
"""
radius /= cdelt
fn = lambda t, s: 0.5 * (np.sign(s - t) + 1.0)
dxy = make_pixel_offset(npix, xpix, ypix)
k = fn(dxy, radius)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False):
"""Make a kernel for a PSF-convolved 2D disk.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
"""
dtheta = psf.dtheta
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
fn = lambda t: psf.eval(i, t, scale_fn=psf_scale_fn)
psfc = convolve2d_disk(fn, dtheta, sigma)
k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def make_cgauss_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False):
"""Make a kernel for a PSF-convolved 2D gaussian.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
"""
sigma /= 1.5095921854516636
dtheta = psf.dtheta
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
fn = lambda t: psf.eval(i, t, scale_fn=psf_scale_fn)
psfc = convolve2d_gauss(fn, dtheta, sigma)
k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):
"""
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
"""
egy = psf.energies
x = make_pixel_offset(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
def rebin_map(k, nebin, npix, rebin):
if rebin > 1:
k = np.sum(k.reshape((nebin, npix * rebin, npix, rebin)), axis=3)
k = k.swapaxes(1, 2)
k = np.sum(k.reshape(nebin, npix, npix, rebin), axis=3)
k = k.swapaxes(1, 2)
k /= rebin ** 2
return k
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from skbio.parse.sequences import (MinimalRfamParser, RfamFinder,
ChangedSequence, is_empty_or_html)
from skbio.core.alignment import Alignment
#from cogent.struct.rna2d import WussStructure
#from cogent.core.moltype import BYTES
#Sequence = BYTES.Sequence
from skbio.core.exception import FastqParseError, RecordError
from skbio.parse.sequences import parse_fastq
from skbio.parse.sequences import parse_fasta
from unittest import TestCase, main
class GenericFastaTest(TestCase):
"""Setup data for all the various FASTA parsers."""
def setUp(self):
"""standard files"""
self.labels = '>abc\n>def\n>ghi\n'.split('\n')
self.oneseq = '>abc\nUCAG\n'.split('\n')
self.multiline = '>xyz\nUUUU\nCC\nAAAAA\nG'.split('\n')
self.threeseq = '>123\na\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split(
'\n')
self.twogood = '>123\n\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split(
'\n')
self.oneX = '>123\nX\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split('\n')
self.nolabels = 'GJ>DSJGSJDF\nSFHKLDFS>jkfs\n'.split('\n')
self.empty = []
class ParseFastaTests(GenericFastaTest):
"""Tests of parse_fasta: returns (label, seq) tuples."""
def test_empty(self):
"""parse_fasta should return empty list from 'file' w/o labels
"""
self.assertEqual(list(parse_fasta(self.empty)), [])
self.assertEqual(list(parse_fasta(self.nolabels, strict=False)),
[])
self.assertRaises(RecordError, list, parse_fasta(self.nolabels))
def test_no_labels(self):
"""parse_fasta should return empty list from file w/o seqs"""
# should fail if strict (the default)
self.assertRaises(RecordError, list,
parse_fasta(self.labels, strict=True))
# if not strict, should skip the records
self.assertEqual(list(parse_fasta(self.labels, strict=False)),
[])
def test_single(self):
"""parse_fasta should read single record as (label, seq) tuple
"""
f = list(parse_fasta(self.oneseq))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('abc', 'UCAG'))
f = list(parse_fasta(self.multiline))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('xyz', 'UUUUCCAAAAAG'))
def test_gt_bracket_in_seq(self):
"""parse_fasta handles alternate finder function
this test also illustrates how to use the parse_fasta
to handle "sequences" that start with a > symbol, which can
happen when we abuse the parse_fasta to parse
fasta-like sequence quality files.
"""
oneseq_w_gt = '>abc\n>CAG\n'.split('\n')
def get_two_line_records(infile):
line1 = None
for line in infile:
if line1 is None:
line1 = line
else:
yield (line1, line)
line1 = None
f = list(parse_fasta(oneseq_w_gt, finder=get_two_line_records))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('abc', '>CAG'))
def test_multiple(self):
"""parse_fasta should read multiline records correctly"""
f = list(parse_fasta(self.threeseq))
self.assertEqual(len(f), 3)
a, b, c = f
self.assertEqual(a, ('123', 'a'))
self.assertEqual(b, ('abc', 'caggac'))
self.assertEqual(c, ('456', 'cg'))
def test_multiple_bad(self):
"""parse_fasta should complain or skip bad records"""
self.assertRaises(RecordError, list, parse_fasta(self.twogood))
f = list(parse_fasta(self.twogood, strict=False))
self.assertEqual(len(f), 2)
a, b = f
self.assertEqual(a, ('abc', 'caggac'))
class ParseFastqTests(TestCase):
def setUp(self):
""" Initialize variables to be used by the tests """
self.FASTQ_EXAMPLE = FASTQ_EXAMPLE.split('\n')
self.FASTQ_EXAMPLE_2 = FASTQ_EXAMPLE_2.split('\n')
def test_parse(self):
"""sequence and info objects should correctly match"""
for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE):
self.assertTrue(label in DATA)
self.assertEqual(seq, DATA[label]["seq"])
self.assertEqual(qual, DATA[label]["qual"])
def test_parse_error(self):
"""Does this raise a FastqParseError with incorrect input?"""
with self.assertRaises(FastqParseError):
list(parse_fastq(self.FASTQ_EXAMPLE_2, strict=True))
class RfamParserTests(TestCase):
""" Tests componenets of the rfam parser, in the rfam.py file """
def setUp(self):
""" Construct some fake data for testing purposes """
self._fake_headers = []
temp = list(fake_headers.split('\n'))
for line in temp:
self._fake_headers.append(line.strip())
del temp
self._fake_record_no_headers =\
list(fake_record_no_headers.split('\n'))
self._fake_record_no_sequences =\
list(fake_record_no_sequences.split('\n'))
self._fake_record_no_structure =\
list(fake_record_no_structure.split('\n'))
self._fake_two_records =\
list(fake_two_records.split('\n'))
self._fake_record =\
list(fake_record.split('\n'))
self._fake_record_bad_header_1 =\
list(fake_record_bad_header_1.split('\n'))
self._fake_record_bad_header_2 =\
list(fake_record_bad_header_2.split('\n'))
self._fake_record_bad_sequence_1 =\
list(fake_record_bad_sequence_1.split('\n'))
self._fake_record_bad_structure_1 =\
list(fake_record_bad_structure_1.split('\n'))
self._fake_record_bad_structure_2 =\
list(fake_record_bad_structure_2.split('\n'))
self.single_family = single_family.split('\n')
def test_is_empty_or_html(self):
"""is_empty_or_html: should ignore empty and HTML line"""
line = ' '
self.assertEqual(is_empty_or_html(line), True)
line = '\n\n'
self.assertEqual(is_empty_or_html(line), True)
line = '<pre>'
self.assertEqual(is_empty_or_html(line), True)
line = '</pre>\n\n'
self.assertEqual(is_empty_or_html(line), True)
line = '\t<//\n'
self.assertEqual(is_empty_or_html(line), False)
def test_MinimalRfamParser_strict_missing_fields(self):
"""MinimalRfamParser: toggle strict functions w/ missing fields"""
# strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_no_sequences))
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_no_structure))
# strict = False
# no header shouldn't be a problem
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_headers,\
strict=False)), [([],{'Z11765.1/1-89':'GGUC'},'............>>>')])
# should get empty on missing sequence or missing structure
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_sequences,\
strict=False)), [])
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_structure,\
strict=False)), [])
def test_MinimalRfamParser_strict_invalid_sequence(self):
"""MinimalRfamParser: toggle strict functions w/ invalid seq
"""
#strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_bad_sequence_1))
# strict = False
# you expect to get back as much information as possible, also
# half records or sequences
result = MinimalRfamParser(self._fake_record_bad_sequence_1,strict=False)
self.assertEqual(len(list(MinimalRfamParser(\
self._fake_record_bad_sequence_1,strict=False))[0][1].NamedSeqs), 3)
def test_MinimalRfamParser_strict_invalid_structure(self):
"""MinimalRfamParser: toggle strict functions w/ invalid structure
"""
#strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_bad_structure_1))
# strict = False
self.assertEqual(list(MinimalRfamParser(\
self._fake_record_bad_structure_1,strict=False))[0][2],None)
def test_MinimalRfamParser_w_valid_data(self):
"""MinimalRfamParser: integrity of output """
# Some ugly constructions here, but this is what the output of
# parsing fake_two_records should be
headers = ['#=GF AC RF00014','#=GF AU Mifsud W']
sequences =\
{'U17136.1/898-984':\
''.join(['AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA',\
'AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU']),\
'M15749.1/155-239':\
''.join(['AACGCAUCGGAUUUCCCGGUGUAACGAA-UUUUCAAGUGCUUCUUGCAUU',\
'AGCAAGUUUGAUCCCGACUCCUG-CGAGUCGGGAUUU']),\
'AF090431.1/222-139':\
''.join(['CUCACAUCAGAUUUCCUGGUGUAACGAA-UUUUCAAGUGCUUCUUGCAUA',\
'AGCAAGUUUGAUCCCGACCCGU--AGGGCCGGGAUUU'])}
structure = WussStructure(''.join(\
['...<<<<<<<.....>>>>>>>....................<<<<<...',\
'.>>>>>....<<<<<<<<<<.....>>>>>>>>>>..']))
data = []
for r in MinimalRfamParser(self._fake_two_records, strict=False):
data.append(r)
self.assertEqual(data[0],(headers,sequences,structure))
assert isinstance(data[0][1],Alignment)
# This line tests that invalid entries are ignored when strict=False
# Note, there are two records in self._fake_two_records, but 2nd is
# invalid
self.assertEqual(len(data),1)
def test_RfamFinder(self):
"""RfamFinder: integrity of output """
fake_record = ['a','//','b','b','//']
num_records = 0
data = []
for r in RfamFinder(fake_record):
data.append(r)
num_records += 1
self.assertEqual(num_records, 2)
self.assertEqual(data[0], ['a','//'])
self.assertEqual(data[1], ['b','b','//'])
def test_ChangedSequence(self):
"""ChangedSequence: integrity of output"""
# Made up input, based on a line that would look like:
# U17136.1/898-984 AACA..CAU..CAGAUUUCCU..GGUGUAA.CGAA
s_in = 'AACA..CAU..CAGAUUUCCU..GGUGUAA.CGAA'
s_out = 'AACA--CAU--CAGAUUUCCU--GGUGUAA-CGAA'
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# test some extremes on the seq
# sequence of all blanks
s_in = '.' * 5
s_out = '-' * 5
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# sequence of no blanks
s_in = 'U' * 5
s_out = 'U' * 5
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# This is an altered version of some header info from Rfam.seed modified to
# incorporate different cases for testing
fake_headers = """#=GF AC RF00001
#=GF AU Griffiths-Jones SR
#=GF ID 5S_rRNA
#=GF RT 5S Ribosomal RNA Database.
#=GF DR URL; http://oberon.fvms.ugent.be:8080/rRNA/ssu/index.html;
#=GF DR URL; http://rdp.cme.msu.edu/html/;
#=GF CC This is a short
#=GF CC comment
#=GF SQ 606
#=GF PK not real"""
fake_record_no_headers ="""Z11765.1/1-89 GGUC
#=GC SS_cons ............>>>
//"""
fake_record_no_sequences ="""#=GF AC RF00006
#=GC SS_cons ............>
//"""
fake_record_no_structure ="""#=GF AC RF00006
Z11765.1/1-89 GGUCAGC
//"""
fake_two_records ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//
#=GF AC RF00015
//"""
fake_record ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_header_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AUMifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_header_2 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GFAUMifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_sequence_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_structure_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_structure_2 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<!!!
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
single_family=\
"""K02120.1/628-682 AUGGGAAAUUCCCCCUCCUAUAACCCCCCCGCUGGUAUCUCCCCCUCAGA
D00647.1/629-683 AUGGGAAACUCCCCCUCCUAUAACCCCCCCGCUGGCAUCUCCCCCUCAGA
#=GC SS_cons <<<<<<.........>>>>>>.........<<<<<<.............>
K02120.1/628-682 CUGGC
D00647.1/629-683 CUGGC
#=GC SS_cons >>>>>
//"""
DATA = {
"GAPC_0015:6:1:1259:10413#0/1":
dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
qual=r'````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF'),
"GAPC_0015:6:1:1283:11957#0/1":
dict(seq='TATGTATATATAACATATACATATATACATACATA',
qual=r']KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb'),
"GAPC_0015:6:1:1284:10484#0/1":
dict(seq='TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG',
qual=r'UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_'),
"GAPC_0015:6:1:1287:17135#0/1":
dict(seq='TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA',
qual=r'^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K'),
"GAPC_0015:6:1:1293:3171#0/1":
dict(seq="AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA",
qual=r"b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c"),
"GAPC_0015:6:1:1297:10729#0/1":
dict(seq="TAATGCCAAAGAAATATTTCCAAACTACATGCTTA",
qual=r"T\ccLbb``bacc]_cacccccLccc\ccTccYL^"),
"GAPC_0015:6:1:1299:5940#0/1":
dict(seq="AATCAAGAAATGAAGATTTATGTATGTGAAGAATA",
qual=r"dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde"),
"GAPC_0015:6:1:1308:6996#0/1":
dict(seq="TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA",
qual=r"a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc"),
"GAPC_0015:6:1:1314:13295#0/1":
dict(seq="AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT",
qual=r"cLcc\\dddddaaYd`T```bLYT\`a```bZccc"),
"GAPC_0015:6:1:1317:3403#0/1":
dict(seq="TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG",
# had to add space in qual line
qual=r"\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc ".strip())
}
FASTQ_EXAMPLE = r"""@GAPC_0015:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG
+GAPC_0015:6:1:1284:10484#0/1
UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_
@GAPC_0015:6:1:1287:17135#0/1
TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA
+GAPC_0015:6:1:1287:17135#0/1
^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K
@GAPC_0015:6:1:1293:3171#0/1
AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA
+GAPC_0015:6:1:1293:3171#0/1
b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c
@GAPC_0015:6:1:1297:10729#0/1
TAATGCCAAAGAAATATTTCCAAACTACATGCTTA
+GAPC_0015:6:1:1297:10729#0/1
T\ccLbb``bacc]_cacccccLccc\ccTccYL^
@GAPC_0015:6:1:1299:5940#0/1
AATCAAGAAATGAAGATTTATGTATGTGAAGAATA
+GAPC_0015:6:1:1299:5940#0/1
dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde
@GAPC_0015:6:1:1308:6996#0/1
TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA
+GAPC_0015:6:1:1308:6996#0/1
a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc
@GAPC_0015:6:1:1314:13295#0/1
AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT
+GAPC_0015:6:1:1314:13295#0/1
cLcc\\dddddaaYd`T```bLYT\`a```bZccc
@GAPC_0015:6:1:1317:3403#0/1
TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG
+GAPC_0015:6:1:1317:3403#0/1
\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc"""
FASTQ_EXAMPLE_2 = r"""@GAPC_0017:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
"""
if __name__ == "__main__":
main()
TST: Add tests for utility functions in Rfam
The following functions were untested in skbio but were tested in
pycogent, the test-cases were ported over and a few minor changes were
made to take advantage of TestCase object:
- is_rfam_header_line
- is_rfam_structure_line
- is_rfam_seq_line
Five failures remain.
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import division
from skbio.parse.sequences import (MinimalRfamParser, RfamFinder,
ChangedSequence, is_empty_or_html,
is_rfam_header_line, is_rfam_seq_line,
is_rfam_structure_line)
from skbio.core.alignment import Alignment
#from cogent.struct.rna2d import WussStructure
#from cogent.core.moltype import BYTES
#Sequence = BYTES.Sequence
from skbio.core.exception import FastqParseError, RecordError
from skbio.parse.sequences import parse_fastq
from skbio.parse.sequences import parse_fasta
from unittest import TestCase, main
class GenericFastaTest(TestCase):
"""Setup data for all the various FASTA parsers."""
def setUp(self):
"""standard files"""
self.labels = '>abc\n>def\n>ghi\n'.split('\n')
self.oneseq = '>abc\nUCAG\n'.split('\n')
self.multiline = '>xyz\nUUUU\nCC\nAAAAA\nG'.split('\n')
self.threeseq = '>123\na\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split(
'\n')
self.twogood = '>123\n\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split(
'\n')
self.oneX = '>123\nX\n> \t abc \t \ncag\ngac\n>456\nc\ng'.split('\n')
self.nolabels = 'GJ>DSJGSJDF\nSFHKLDFS>jkfs\n'.split('\n')
self.empty = []
class ParseFastaTests(GenericFastaTest):
"""Tests of parse_fasta: returns (label, seq) tuples."""
def test_empty(self):
"""parse_fasta should return empty list from 'file' w/o labels
"""
self.assertEqual(list(parse_fasta(self.empty)), [])
self.assertEqual(list(parse_fasta(self.nolabels, strict=False)),
[])
self.assertRaises(RecordError, list, parse_fasta(self.nolabels))
def test_no_labels(self):
"""parse_fasta should return empty list from file w/o seqs"""
# should fail if strict (the default)
self.assertRaises(RecordError, list,
parse_fasta(self.labels, strict=True))
# if not strict, should skip the records
self.assertEqual(list(parse_fasta(self.labels, strict=False)),
[])
def test_single(self):
"""parse_fasta should read single record as (label, seq) tuple
"""
f = list(parse_fasta(self.oneseq))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('abc', 'UCAG'))
f = list(parse_fasta(self.multiline))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('xyz', 'UUUUCCAAAAAG'))
def test_gt_bracket_in_seq(self):
"""parse_fasta handles alternate finder function
this test also illustrates how to use the parse_fasta
to handle "sequences" that start with a > symbol, which can
happen when we abuse the parse_fasta to parse
fasta-like sequence quality files.
"""
oneseq_w_gt = '>abc\n>CAG\n'.split('\n')
def get_two_line_records(infile):
line1 = None
for line in infile:
if line1 is None:
line1 = line
else:
yield (line1, line)
line1 = None
f = list(parse_fasta(oneseq_w_gt, finder=get_two_line_records))
self.assertEqual(len(f), 1)
a = f[0]
self.assertEqual(a, ('abc', '>CAG'))
def test_multiple(self):
"""parse_fasta should read multiline records correctly"""
f = list(parse_fasta(self.threeseq))
self.assertEqual(len(f), 3)
a, b, c = f
self.assertEqual(a, ('123', 'a'))
self.assertEqual(b, ('abc', 'caggac'))
self.assertEqual(c, ('456', 'cg'))
def test_multiple_bad(self):
"""parse_fasta should complain or skip bad records"""
self.assertRaises(RecordError, list, parse_fasta(self.twogood))
f = list(parse_fasta(self.twogood, strict=False))
self.assertEqual(len(f), 2)
a, b = f
self.assertEqual(a, ('abc', 'caggac'))
class ParseFastqTests(TestCase):
def setUp(self):
""" Initialize variables to be used by the tests """
self.FASTQ_EXAMPLE = FASTQ_EXAMPLE.split('\n')
self.FASTQ_EXAMPLE_2 = FASTQ_EXAMPLE_2.split('\n')
def test_parse(self):
"""sequence and info objects should correctly match"""
for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE):
self.assertTrue(label in DATA)
self.assertEqual(seq, DATA[label]["seq"])
self.assertEqual(qual, DATA[label]["qual"])
def test_parse_error(self):
"""Does this raise a FastqParseError with incorrect input?"""
with self.assertRaises(FastqParseError):
list(parse_fastq(self.FASTQ_EXAMPLE_2, strict=True))
class RfamParserTests(TestCase):
""" Tests componenets of the rfam parser, in the rfam.py file """
def setUp(self):
""" Construct some fake data for testing purposes """
self._fake_headers = []
temp = list(fake_headers.split('\n'))
for line in temp:
self._fake_headers.append(line.strip())
del temp
self._fake_record_no_headers =\
list(fake_record_no_headers.split('\n'))
self._fake_record_no_sequences =\
list(fake_record_no_sequences.split('\n'))
self._fake_record_no_structure =\
list(fake_record_no_structure.split('\n'))
self._fake_two_records =\
list(fake_two_records.split('\n'))
self._fake_record =\
list(fake_record.split('\n'))
self._fake_record_bad_header_1 =\
list(fake_record_bad_header_1.split('\n'))
self._fake_record_bad_header_2 =\
list(fake_record_bad_header_2.split('\n'))
self._fake_record_bad_sequence_1 =\
list(fake_record_bad_sequence_1.split('\n'))
self._fake_record_bad_structure_1 =\
list(fake_record_bad_structure_1.split('\n'))
self._fake_record_bad_structure_2 =\
list(fake_record_bad_structure_2.split('\n'))
self.single_family = single_family.split('\n')
def test_is_empty_or_html(self):
"""is_empty_or_html: should ignore empty and HTML line"""
line = ' '
self.assertEqual(is_empty_or_html(line), True)
line = '\n\n'
self.assertEqual(is_empty_or_html(line), True)
line = '<pre>'
self.assertEqual(is_empty_or_html(line), True)
line = '</pre>\n\n'
self.assertEqual(is_empty_or_html(line), True)
line = '\t<//\n'
self.assertEqual(is_empty_or_html(line), False)
def test_is_rfam_header_line(self):
"""is_rfam_header_line: functions correctly w/ various lines """
self.assertEqual(is_rfam_header_line('#=GF'), True)
self.assertEqual(is_rfam_header_line('#=GF AC RF00001'), True)
self.assertEqual(is_rfam_header_line('#=GF CC until it is\
required for transcription. '), True)
self.assertEqual(is_rfam_header_line(''), False)
self.assertEqual(is_rfam_header_line('X07545.1/505-619 '), False)
self.assertEqual(is_rfam_header_line('#=G'), False)
self.assertEqual(is_rfam_header_line('=GF'), False)
self.assertEqual(is_rfam_header_line('#=GC SS_cons'), False)
def test_is_rfam_seq_line(self):
"""is_rfam_seq_line: functions correctly w/ various lines """
s = 'X07545.1/505-619 .\
.ACCCGGC.CAUA...GUGGCCG.GGCAA.CAC.CCGG.U.C..UCGUU'
self.assertTrue(is_rfam_seq_line('s'))
self.assertTrue(is_rfam_seq_line('X07545.1/505-619'))
self.assertTrue(is_rfam_seq_line('M21086.1/8-123'))
self.assertFalse(is_rfam_seq_line(''))
self.assertFalse(is_rfam_seq_line('#GF='))
self.assertFalse(is_rfam_seq_line('//blah'))
def test_is_rfam_structure_line(self):
"""is_rfam_structure_line: functions correctly w/ various lines """
s = '#=GC SS_cons\
<<<<<<<<<........<<.<<<<.<...<.<...<<<<.<.<.......'
self.assertEqual(is_rfam_structure_line(s), True)
self.assertEqual(is_rfam_structure_line('#=GC SS_cons'), True)
self.assertEqual(is_rfam_structure_line('#=GC SS_cons '), True)
self.assertEqual(is_rfam_structure_line(''), False)
self.assertEqual(is_rfam_structure_line(' '), False)
self.assertEqual(is_rfam_structure_line('#=GF AC RF00001'), False)
self.assertEqual(is_rfam_structure_line('X07545.1/505-619'), False)
self.assertEqual(is_rfam_structure_line('=GC SS_cons'), False)
self.assertEqual(is_rfam_structure_line('#=GC'), False)
self.assertEqual(is_rfam_structure_line('#=GC RF'), False)
def test_MinimalRfamParser_strict_missing_fields(self):
"""MinimalRfamParser: toggle strict functions w/ missing fields"""
# strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_no_sequences))
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_no_structure))
# strict = False
# no header shouldn't be a problem
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_headers,\
strict=False)), [([],{'Z11765.1/1-89':'GGUC'},'............>>>')])
# should get empty on missing sequence or missing structure
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_sequences,\
strict=False)), [])
self.assertEqual(list(MinimalRfamParser(self._fake_record_no_structure,\
strict=False)), [])
def test_MinimalRfamParser_strict_invalid_sequence(self):
"""MinimalRfamParser: toggle strict functions w/ invalid seq
"""
#strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_bad_sequence_1))
# strict = False
# you expect to get back as much information as possible, also
# half records or sequences
result = MinimalRfamParser(self._fake_record_bad_sequence_1,strict=False)
self.assertEqual(len(list(MinimalRfamParser(\
self._fake_record_bad_sequence_1,strict=False))[0][1].NamedSeqs), 3)
def test_MinimalRfamParser_strict_invalid_structure(self):
"""MinimalRfamParser: toggle strict functions w/ invalid structure
"""
#strict = True
self.assertRaises(RecordError,list,\
MinimalRfamParser(self._fake_record_bad_structure_1))
# strict = False
self.assertEqual(list(MinimalRfamParser(\
self._fake_record_bad_structure_1,strict=False))[0][2],None)
def test_MinimalRfamParser_w_valid_data(self):
"""MinimalRfamParser: integrity of output """
# Some ugly constructions here, but this is what the output of
# parsing fake_two_records should be
headers = ['#=GF AC RF00014','#=GF AU Mifsud W']
sequences =\
{'U17136.1/898-984':\
''.join(['AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA',\
'AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU']),\
'M15749.1/155-239':\
''.join(['AACGCAUCGGAUUUCCCGGUGUAACGAA-UUUUCAAGUGCUUCUUGCAUU',\
'AGCAAGUUUGAUCCCGACUCCUG-CGAGUCGGGAUUU']),\
'AF090431.1/222-139':\
''.join(['CUCACAUCAGAUUUCCUGGUGUAACGAA-UUUUCAAGUGCUUCUUGCAUA',\
'AGCAAGUUUGAUCCCGACCCGU--AGGGCCGGGAUUU'])}
structure = WussStructure(''.join(\
['...<<<<<<<.....>>>>>>>....................<<<<<...',\
'.>>>>>....<<<<<<<<<<.....>>>>>>>>>>..']))
data = []
for r in MinimalRfamParser(self._fake_two_records, strict=False):
data.append(r)
self.assertEqual(data[0],(headers,sequences,structure))
assert isinstance(data[0][1],Alignment)
# This line tests that invalid entries are ignored when strict=False
# Note, there are two records in self._fake_two_records, but 2nd is
# invalid
self.assertEqual(len(data),1)
def test_RfamFinder(self):
"""RfamFinder: integrity of output """
fake_record = ['a','//','b','b','//']
num_records = 0
data = []
for r in RfamFinder(fake_record):
data.append(r)
num_records += 1
self.assertEqual(num_records, 2)
self.assertEqual(data[0], ['a','//'])
self.assertEqual(data[1], ['b','b','//'])
def test_ChangedSequence(self):
"""ChangedSequence: integrity of output"""
# Made up input, based on a line that would look like:
# U17136.1/898-984 AACA..CAU..CAGAUUUCCU..GGUGUAA.CGAA
s_in = 'AACA..CAU..CAGAUUUCCU..GGUGUAA.CGAA'
s_out = 'AACA--CAU--CAGAUUUCCU--GGUGUAA-CGAA'
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# test some extremes on the seq
# sequence of all blanks
s_in = '.' * 5
s_out = '-' * 5
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# sequence of no blanks
s_in = 'U' * 5
s_out = 'U' * 5
sequence = ChangedSequence(s_in)
self.assertEqual(sequence, s_out)
# This is an altered version of some header info from Rfam.seed modified to
# incorporate different cases for testing
fake_headers = """#=GF AC RF00001
#=GF AU Griffiths-Jones SR
#=GF ID 5S_rRNA
#=GF RT 5S Ribosomal RNA Database.
#=GF DR URL; http://oberon.fvms.ugent.be:8080/rRNA/ssu/index.html;
#=GF DR URL; http://rdp.cme.msu.edu/html/;
#=GF CC This is a short
#=GF CC comment
#=GF SQ 606
#=GF PK not real"""
fake_record_no_headers ="""Z11765.1/1-89 GGUC
#=GC SS_cons ............>>>
//"""
fake_record_no_sequences ="""#=GF AC RF00006
#=GC SS_cons ............>
//"""
fake_record_no_structure ="""#=GF AC RF00006
Z11765.1/1-89 GGUCAGC
//"""
fake_two_records ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//
#=GF AC RF00015
//"""
fake_record ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_header_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AUMifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_header_2 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GFAUMifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_sequence_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_structure_1 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons...<<<<<<<.....>>>>>>>....................<<<<<...
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
fake_record_bad_structure_2 ="""# STOCKHOLM 1.0
#=GF AC RF00014
#=GF AU Mifsud W
U17136.1/898-984 AACACAUCAGAUUUCCUGGUGUAACGAAUUUUUUAAGUGCUUCUUGCUUA
M15749.1/155-239 AACGCAUCGGAUUUCCCGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUU
AF090431.1/222-139 CUCACAUCAGAUUUCCUGGUGUAACGAA.UUUUCAAGUGCUUCUUGCAUA
#=GC SS_cons ...<<<<<<<.....>>>>>>>....................<<<<<!!!
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
U17136.1/898-984 AGCAAGUUUCAUCCCGACCCCCUCAGGGUCGGGAUUU
M15749.1/155-239 AGCAAGUUUGAUCCCGACUCCUG.CGAGUCGGGAUUU
AF090431.1/222-139 AGCAAGUUUGAUCCCGACCCGU..AGGGCCGGGAUUU
#=GC SS_cons .>>>>>....<<<<<<<<<<.....>>>>>>>>>>..
#=GC RF xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
//"""
single_family=\
"""K02120.1/628-682 AUGGGAAAUUCCCCCUCCUAUAACCCCCCCGCUGGUAUCUCCCCCUCAGA
D00647.1/629-683 AUGGGAAACUCCCCCUCCUAUAACCCCCCCGCUGGCAUCUCCCCCUCAGA
#=GC SS_cons <<<<<<.........>>>>>>.........<<<<<<.............>
K02120.1/628-682 CUGGC
D00647.1/629-683 CUGGC
#=GC SS_cons >>>>>
//"""
DATA = {
"GAPC_0015:6:1:1259:10413#0/1":
dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
qual=r'````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF'),
"GAPC_0015:6:1:1283:11957#0/1":
dict(seq='TATGTATATATAACATATACATATATACATACATA',
qual=r']KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb'),
"GAPC_0015:6:1:1284:10484#0/1":
dict(seq='TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG',
qual=r'UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_'),
"GAPC_0015:6:1:1287:17135#0/1":
dict(seq='TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA',
qual=r'^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K'),
"GAPC_0015:6:1:1293:3171#0/1":
dict(seq="AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA",
qual=r"b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c"),
"GAPC_0015:6:1:1297:10729#0/1":
dict(seq="TAATGCCAAAGAAATATTTCCAAACTACATGCTTA",
qual=r"T\ccLbb``bacc]_cacccccLccc\ccTccYL^"),
"GAPC_0015:6:1:1299:5940#0/1":
dict(seq="AATCAAGAAATGAAGATTTATGTATGTGAAGAATA",
qual=r"dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde"),
"GAPC_0015:6:1:1308:6996#0/1":
dict(seq="TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA",
qual=r"a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc"),
"GAPC_0015:6:1:1314:13295#0/1":
dict(seq="AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT",
qual=r"cLcc\\dddddaaYd`T```bLYT\`a```bZccc"),
"GAPC_0015:6:1:1317:3403#0/1":
dict(seq="TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG",
# had to add space in qual line
qual=r"\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc ".strip())
}
FASTQ_EXAMPLE = r"""@GAPC_0015:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG
+GAPC_0015:6:1:1284:10484#0/1
UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_
@GAPC_0015:6:1:1287:17135#0/1
TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA
+GAPC_0015:6:1:1287:17135#0/1
^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K
@GAPC_0015:6:1:1293:3171#0/1
AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA
+GAPC_0015:6:1:1293:3171#0/1
b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c
@GAPC_0015:6:1:1297:10729#0/1
TAATGCCAAAGAAATATTTCCAAACTACATGCTTA
+GAPC_0015:6:1:1297:10729#0/1
T\ccLbb``bacc]_cacccccLccc\ccTccYL^
@GAPC_0015:6:1:1299:5940#0/1
AATCAAGAAATGAAGATTTATGTATGTGAAGAATA
+GAPC_0015:6:1:1299:5940#0/1
dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde
@GAPC_0015:6:1:1308:6996#0/1
TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA
+GAPC_0015:6:1:1308:6996#0/1
a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc
@GAPC_0015:6:1:1314:13295#0/1
AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT
+GAPC_0015:6:1:1314:13295#0/1
cLcc\\dddddaaYd`T```bLYT\`a```bZccc
@GAPC_0015:6:1:1317:3403#0/1
TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG
+GAPC_0015:6:1:1317:3403#0/1
\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc"""
FASTQ_EXAMPLE_2 = r"""@GAPC_0017:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
"""
if __name__ == "__main__":
main()
|
"""
Individual pages
"""
from dominate import document
from dominate.tags import *
import urllib2
from bs4 import BeautifulSoup
from pyteaser import SummarizeUrl
from flask import (
Blueprint,
g,
render_template)
analytics = Blueprint('analytics', __name__)
urls = [
'http://www.wired.com/',
'http://www.nytimes.com/',
'http://www.technologyreview.com/lists/technologies/2014/'
]
'''
from collections import defaultdict
from string import punctuation
import urllib2
from bs4 import BeautifulSoup
from summarytool import SummaryTool
global fs
fs = SummaryTool()
print fs.__dict__
@analytics.route('/')
def index():
"""main index page"""
return render_template('index2.html', pages=g.pages.sorted[:3])
'''
def get_only_text(url):
"""
return the title and the text of the article
at the specified url
"""
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
return soup.title.text, text
@analytics.route('/analytics/')
def analytics_check():
"""about page"""
for url in urls:
feed_xml = urllib2.urlopen(url).read()
feed = BeautifulSoup(feed_xml.decode('utf8'))
to_summarize = map(lambda p: p.text, feed.find_all('guid'))
print 'length is %s'%(len(to_summarize))
for article_url in to_summarize[:5]:
head1, text = get_only_text(article_url)
#headlines='\n'.join(str(line.encode('ascii', 'ignore')) for line in summaries)
sentences_dic = st.get_senteces_ranks(text)
headlines=st.get_summary(title, content, sentences_dic)
with document(title='Analytics') as doc:
h1(head1)
#print headlines
h2(headlines)
with open('templates/analytics.html', 'w') as f:
f.write(doc.render())
return render_template('analytics.html')
Added changes9
"""
Individual pages
"""
from dominate import document
from dominate.tags import *
import urllib2
from bs4 import BeautifulSoup
from pyteaser import SummarizeUrl
from flask import (
Blueprint,
g,
render_template)
analytics = Blueprint('analytics', __name__)
urls = [
'http://www.wired.com/',
'http://www.nytimes.com/',
'http://www.technologyreview.com/lists/technologies/2014/'
]
'''
from collections import defaultdict
from string import punctuation
import urllib2
from bs4 import BeautifulSoup
from summarytool import SummaryTool
global fs
fs = SummaryTool()
print fs.__dict__
@analytics.route('/')
def index():
"""main index page"""
return render_template('index2.html', pages=g.pages.sorted[:3])
'''
def get_only_text(url):
"""
return the title and the text of the article
at the specified url
"""
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
return soup.title.text, text
@analytics.route('/analytics/')
def analytics_check():
"""about page"""
for url in urls:
feed_xml = urllib2.urlopen(url).read()
print feed_xml
feed = BeautifulSoup(feed_xml.decode('utf8'))
to_summarize = map(lambda p: p.text, feed.find_all('guid'))
for article_url in to_summarize[:5]:
head1, text = get_only_text(article_url)
#headlines='\n'.join(str(line.encode('ascii', 'ignore')) for line in summaries)
sentences_dic = st.get_senteces_ranks(text)
headlines=st.get_summary(title, content, sentences_dic)
with document(title='Analytics') as doc:
h1(head1)
#print headlines
h2(headlines)
with open('templates/analytics.html', 'w') as f:
f.write(doc.render())
return render_template('analytics.html')
|
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from threading import Lock
from flask import Flask, Response, jsonify, request, json
#from simplejson import JSONDecodeError
from sqlalchemy import *
from sqlalchemy.exc import *
from flasgger import Swagger
# Create Flask application
app = Flask(__name__)
# Configure Swagger before initilaizing it
app.config['SWAGGER'] = {
"swagger_version": "2.0",
"specs": [
{
"version": "1.0.0",
"title": "DevOps Swagger Recommendations App",
"description": "This is a sample server Recommendations server.",
"endpoint": 'v1_spec',
"route": '/v1/spec'
}
]
}
# Initialize Swagger after configuring it
Swagger(app)
# Status Codes
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_204_NO_CONTENT = 204
HTTP_400_BAD_REQUEST = 400
HTTP_404_NOT_FOUND = 404
HTTP_409_CONFLICT = 409
debug = (os.getenv('DEBUG', 'False') == 'True')
port = os.getenv('PORT', '5000')
# Lock for thread-safe counter increment
lock = Lock()
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
recommendations_url = request.base_url + "recommendations"
return jsonify(name='Recommendations REST API Service',
version='0.1',
url=recommendations_url
), HTTP_200_OK
######################################################################
# LIST ALL PRODUCT RECOMMENDATIONS
# CAN USE type AND product-id KEYWORDS TO QUERY THE LIST
######################################################################
@app.route('/recommendations', methods=['GET'])
def list_recommendations():
"""
Retrieve a list of Recommendations
This endpoint will return all Recommendations unless a query parameter is specificed
---
tags:
- Recommendations
description: The Recommendations endpoint allows you to query Recommendations
parameters:
- name: type
in: query
description: the type of Recommendation you are looking for
required: false
type: string
- name: product-id
in: query
description: the id of the product you would like recommendations for
required: false
type: int
responses:
200:
description: An array of Recommendations
schema:
type: array
items:
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
"""
message = []
request_type = request.args.get('type')
request_product_id = request.args.get('product-id')
# To mitigate query limit issues consider using session in sqlalchemy
query_str = 'SELECT * FROM recommendations'
if request_type and request_product_id:
query_str += (' WHERE type=%s AND parent_product_id=%s' \
% (request_type, request_product_id))
elif request_type:
query_str += (' WHERE type=%s' % request_type)
elif request_product_id:
query_str += (' WHERE parent_product_id=%s' % request_product_id)
try:
results = conn.execute(query_str)
except OperationalError:
results = []
for rec in results:
message.append({'id': rec[0],
'parent_product_id': rec[1],
'related_product_id': rec[2],
'type': rec[3],
'priority': rec[4]})
return reply(message, HTTP_200_OK)
######################################################################
# RETRIEVE Recommendations for a given recommendations ID
######################################################################
@app.route('/recommendations/<int:id>', methods=['GET'])
def get_recommendations(id):
"""
Retrieve a single Recommendation given a ID
This endpoint will return a Recommendation based on it's id
---
tags:
- Recommendations
produces:
- application/json
parameters:
- name: id
in: path
description: ID of Recommendation to retrieve
type: integer
required: true
responses:
200:
description: Recommendation returned
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
404:
description: Recommendation not found
"""
message = retrieve_by_id(id)
rc = HTTP_200_OK
if not message:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
rc = HTTP_404_NOT_FOUND
return reply(message, rc)
######################################################################
# ADD A NEW PRODUCT RECOMMENDATION RELATIONSHIP
######################################################################
@app.route('/recommendations', methods=['POST'])
def create_recommendations():
"""
Create a new product recommendation relationship
This endpoint will create a recommendation based the data in the body that is posted
---
tags:
- Recommendation
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: body
required: true
schema:
id: data
required:
- priority
- related_product_id
- type
- parent_product_id
properties:
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
responses:
201:
description: Recommendation created
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
400:
description: Bad Request (the posted data was not valid)
"""
message, valid = is_valid(request.get_data())
if valid:
payload = json.loads(request.get_data())
id = next_index()
conn.execute("INSERT INTO recommendations VALUES (%s, %s, %s, \"%s\", %s)" % \
(id, \
payload['parent_product_id'], \
payload['related_product_id'], \
payload['type'], \
payload['priority']))
message = retrieve_by_id(id)
rc = HTTP_201_CREATED
else:
# message = { 'error' : 'Data is not valid' }
rc = HTTP_400_BAD_REQUEST
return reply(message, rc)
######################################################################
# UPDATE AN EXISTINT RECOMMENDATION RELATIONSHIP
######################################################################
@app.route('/recommendations/<int:id>', methods=['PUT'])
def update_recommendations(id):
"""
Given a Recommendation ID, update the columns as from the payload
This endpoint will update a Recommendation based the body that is posted
---
tags:
- Recommendations
consumes:
- application/json
produces:
- application/json
parameters:
- name: id
in: path
description: ID of recommendation to retrieve
type: integer
required: true
- in: body
name: body
required: true
schema:
id: data
required:
- priority
- related_product_id
- type
- parent_product_id
properties:
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
responses:
200:
description: Recommendation Updated
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
400:
description: Bad Request (the posted data was not valid)
"""
if get_recommendations(id).status_code == 404:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
return reply(message, HTTP_404_NOT_FOUND)
message, valid = is_valid(request.get_data())
if valid:
payload = json.loads(request.get_data())
conn.execute("UPDATE recommendations \
SET type=\"%s\", priority=%s \
WHERE parent_product_id=%s \
AND related_product_id=%s AND id=%s"
% (payload['type'],
payload['priority'],
payload['parent_product_id'],
payload['related_product_id'],
id
))
return get_recommendations(id)
else:
#message = {'error': 'Invalid Request'}
rc = HTTP_400_BAD_REQUEST
return reply(message, rc)
######################################################################
# DELETE A PRODUCT RECOMMENDATION
######################################################################
@app.route('/recommendations/<int:id>', methods=['DELETE'])
def delete_recommendations(id):
"""
Delete a single recommendation
This endpoint will delete a recommendation based on the id that is specificed
---
tags:
- Recommendations
description: Delete a recommendation
parameters:
- name: id
in: path
description: ID of recommendation to be delete_recommendations
type: integer
required: true
responses:
204:
description: recommendation deleted
"""
if get_recommendations(id).status_code == 200:
conn.execute("DELETE FROM recommendations WHERE id=%d" % id)
return '', HTTP_204_NO_CONTENT
######################################################################
# ACTION - UPDATE PRIORITY WHEN RECOMMENDATION IS CLICKED
######################################################################
@app.route('/recommendations/<int:id>/clicked', methods=['PUT'])
def increase_priority(id):
if get_recommendations(id).status_code == 404:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
return reply(message, HTTP_404_NOT_FOUND)
"""
Decrements the priority from low to high of the recommendations_id until 1
"""
conn.execute("UPDATE recommendations \
SET priority= priority - 1 \
WHERE id=%d \
AND priority>1"
% (id))
return reply(None, HTTP_200_OK)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def next_index():
max_id_result = conn.execute("select max(id) from recommendations")
return list(max_id_result)[0][0] + 1
def reply(message, rc):
# print "message = " + str(message);
response = Response(json.dumps(message))
response.headers['Content-Type'] = 'application/json'
response.status_code = rc
return response
def is_valid(raw_data):
try:
data = json.loads(raw_data)
except:
message = {'error': 'JSON decoding error'}
return message, False
if set(data.keys()) != set(['priority', 'related_product_id', 'parent_product_id', 'type']):
# app.logger.error('key set does not match')
message = {'error': 'key set does not match'}
return message, False
try:
# Not sure if we should check data type or exceptions
# If we check exceptions, input 1 and "1" will be treated as the same
priority = int(data['priority'])
related_pid = int(data['related_product_id'])
parent_pid = int(data['parent_product_id'])
except ValueError as err:
# app.logger.error('Data value error: %s', err)
message = {'error': 'Data value error: %s' % err}
return message, False
except TypeError as err:
# app.logger.error('Data type error: %s', err)
message = {'error': 'Data type error: %s' % err}
return message, False
return "", True
def retrieve_by_id(id):
message = {}
results = conn.execute("SELECT * FROM recommendations WHERE id=%d" % (int(id)))
for rec in results:
message = {"id": rec[0],
"parent_product_id": rec[1],
"related_product_id": rec[2],
"type": rec[3],
"priority": rec[4]}
return message
######################################################################
# Connect to MySQL and catch connection exceptions
######################################################################
def connect_mysql(user, passwd, server, port, database):
engine = create_engine("mysql://%s:%s@%s:%s/%s" % (user, passwd, server, port, database), echo = False)
return engine.connect()
######################################################################
# INITIALIZE MySQL
# This method will work in the following conditions:
# 1) In Bluemix with cleardb bound through VCAP_SERVICES
# 2) With MySQL --linked in a Docker container in virtual machine
######################################################################
def initialize_mysql(test=False):
global conn
conn = None
# Get the crdentials from the Bluemix environment
if 'VCAP_SERVICES' in os.environ:
print("Using VCAP_SERVICES...")
VCAP_SERVICES = os.environ['VCAP_SERVICES']
services = json.loads(VCAP_SERVICES)
creds = services['cleardb'][0]['credentials']
print("Conecting to Mysql on host %s port %s" % (creds['hostname'], creds['port']))
conn = connect_mysql(creds['username'], creds['password'], creds['hostname'], creds['port'], creds['name'])
else:
print("VCAP_SERVICES not found, checking localhost for MySQL")
response = os.system("ping -c 1 mysql")
if response == 0:
mysql_hostname = 'mysql'
else:
mysql_hostname = '127.0.0.1'
if test:
engine = create_engine("mysql://%s:%s@%s:%s/%s" % ('root', '', mysql_hostname, 3306, 'tdd'), echo = False)
meta = MetaData()
recommendations = Table('recommendations', meta,
Column('id', Integer, nullable=False, primary_key=True),
Column('parent_product_id', Integer, nullable=False),
Column('related_product_id', Integer, nullable=False),
Column('type', String(20), nullable=False),
Column('priority', Integer, nullable=True)
)
try:
recommendations.drop(engine, checkfirst=True)
except:
pass
recommendations.create(engine, checkfirst=True)
conn = engine.connect()
else:
conn = connect_mysql('root', '', mysql_hostname, 3306, 'nyudevops')
######################################################################
# M A I N
######################################################################
if __name__ == "__main__":
print "Recommendations Service Starting..."
initialize_mysql()
# Pull options from environment
app.run(host='0.0.0.0', port=int(port), debug=debug)
added swagger docs for clicked recommendation
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from threading import Lock
from flask import Flask, Response, jsonify, request, json
#from simplejson import JSONDecodeError
from sqlalchemy import *
from sqlalchemy.exc import *
from flasgger import Swagger
# Create Flask application
app = Flask(__name__)
# Configure Swagger before initilaizing it
app.config['SWAGGER'] = {
"swagger_version": "2.0",
"specs": [
{
"version": "1.0.0",
"title": "DevOps Swagger Recommendations App",
"description": "This is a sample server Recommendations server.",
"endpoint": 'v1_spec',
"route": '/v1/spec'
}
]
}
# Initialize Swagger after configuring it
Swagger(app)
# Status Codes
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_204_NO_CONTENT = 204
HTTP_400_BAD_REQUEST = 400
HTTP_404_NOT_FOUND = 404
HTTP_409_CONFLICT = 409
debug = (os.getenv('DEBUG', 'False') == 'True')
port = os.getenv('PORT', '5000')
# Lock for thread-safe counter increment
lock = Lock()
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
recommendations_url = request.base_url + "recommendations"
return jsonify(name='Recommendations REST API Service',
version='0.1',
url=recommendations_url
), HTTP_200_OK
######################################################################
# LIST ALL PRODUCT RECOMMENDATIONS
# CAN USE type AND product-id KEYWORDS TO QUERY THE LIST
######################################################################
@app.route('/recommendations', methods=['GET'])
def list_recommendations():
"""
Retrieve a list of Recommendations
This endpoint will return all Recommendations unless a query parameter is specificed
---
tags:
- Recommendations
description: The Recommendations endpoint allows you to query Recommendations
parameters:
- name: type
in: query
description: the type of Recommendation you are looking for
required: false
type: string
- name: product-id
in: query
description: the id of the product you would like recommendations for
required: false
type: int
responses:
200:
description: An array of Recommendations
schema:
type: array
items:
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
"""
message = []
request_type = request.args.get('type')
request_product_id = request.args.get('product-id')
# To mitigate query limit issues consider using session in sqlalchemy
query_str = 'SELECT * FROM recommendations'
if request_type and request_product_id:
query_str += (' WHERE type=%s AND parent_product_id=%s' \
% (request_type, request_product_id))
elif request_type:
query_str += (' WHERE type=%s' % request_type)
elif request_product_id:
query_str += (' WHERE parent_product_id=%s' % request_product_id)
try:
results = conn.execute(query_str)
except OperationalError:
results = []
for rec in results:
message.append({'id': rec[0],
'parent_product_id': rec[1],
'related_product_id': rec[2],
'type': rec[3],
'priority': rec[4]})
return reply(message, HTTP_200_OK)
######################################################################
# RETRIEVE Recommendations for a given recommendations ID
######################################################################
@app.route('/recommendations/<int:id>', methods=['GET'])
def get_recommendations(id):
"""
Retrieve a single Recommendation given a ID
This endpoint will return a Recommendation based on it's id
---
tags:
- Recommendations
produces:
- application/json
parameters:
- name: id
in: path
description: ID of Recommendation to retrieve
type: integer
required: true
responses:
200:
description: Recommendation returned
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
404:
description: Recommendation not found
"""
message = retrieve_by_id(id)
rc = HTTP_200_OK
if not message:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
rc = HTTP_404_NOT_FOUND
return reply(message, rc)
######################################################################
# ADD A NEW PRODUCT RECOMMENDATION RELATIONSHIP
######################################################################
@app.route('/recommendations', methods=['POST'])
def create_recommendations():
"""
Create a new product recommendation relationship
This endpoint will create a recommendation based the data in the body that is posted
---
tags:
- Recommendation
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: body
required: true
schema:
id: data
required:
- priority
- related_product_id
- type
- parent_product_id
properties:
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
responses:
201:
description: Recommendation created
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
400:
description: Bad Request (the posted data was not valid)
"""
message, valid = is_valid(request.get_data())
if valid:
payload = json.loads(request.get_data())
id = next_index()
conn.execute("INSERT INTO recommendations VALUES (%s, %s, %s, \"%s\", %s)" % \
(id, \
payload['parent_product_id'], \
payload['related_product_id'], \
payload['type'], \
payload['priority']))
message = retrieve_by_id(id)
rc = HTTP_201_CREATED
else:
# message = { 'error' : 'Data is not valid' }
rc = HTTP_400_BAD_REQUEST
return reply(message, rc)
######################################################################
# UPDATE AN EXISTINT RECOMMENDATION RELATIONSHIP
######################################################################
@app.route('/recommendations/<int:id>', methods=['PUT'])
def update_recommendations(id):
"""
Given a Recommendation ID, update the columns as from the payload
This endpoint will update a Recommendation based the body that is posted
---
tags:
- Recommendations
consumes:
- application/json
produces:
- application/json
parameters:
- name: id
in: path
description: ID of recommendation to retrieve
type: integer
required: true
- in: body
name: body
required: true
schema:
id: data
required:
- priority
- related_product_id
- type
- parent_product_id
properties:
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
responses:
200:
description: Recommendation Updated
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned internally by service
parent_product_id:
type: integer
description: unique id of the parent product
related_product_id:
type: integer
description: unique id of the recommended product
type:
type: string
description: the category of recommendation (e.g., up-sell, x-sell, etc.)
priority:
type: integer
description: the priority of the recommendation (a lower number means higher priority)
400:
description: Bad Request (the posted data was not valid)
"""
if get_recommendations(id).status_code == 404:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
return reply(message, HTTP_404_NOT_FOUND)
message, valid = is_valid(request.get_data())
if valid:
payload = json.loads(request.get_data())
conn.execute("UPDATE recommendations \
SET type=\"%s\", priority=%s \
WHERE parent_product_id=%s \
AND related_product_id=%s AND id=%s"
% (payload['type'],
payload['priority'],
payload['parent_product_id'],
payload['related_product_id'],
id
))
return get_recommendations(id)
else:
#message = {'error': 'Invalid Request'}
rc = HTTP_400_BAD_REQUEST
return reply(message, rc)
######################################################################
# DELETE A PRODUCT RECOMMENDATION
######################################################################
@app.route('/recommendations/<int:id>', methods=['DELETE'])
def delete_recommendations(id):
"""
Delete a single recommendation
This endpoint will delete a recommendation based on the id that is specificed
---
tags:
- Recommendations
description: Delete a recommendation
parameters:
- name: id
in: path
description: ID of recommendation to be delete_recommendations
type: integer
required: true
responses:
204:
description: recommendation deleted
"""
if get_recommendations(id).status_code == 200:
conn.execute("DELETE FROM recommendations WHERE id=%d" % id)
return '', HTTP_204_NO_CONTENT
######################################################################
# ACTION - UPDATE PRIORITY WHEN RECOMMENDATION IS CLICKED
######################################################################
@app.route('/recommendations/<int:id>/clicked', methods=['PUT'])
def increase_priority(id):
"""
Given a recommendation ID, update the priority as from the payload
This endpoint will increase the priority of a recommmendation based on the ID specificed
---
tags:
- Recommendations
consumes:
- application/json
produces:
- application/json
parameters:
- name: id
in: path
description: ID of recommendation to retrieve
type: integer
required: true
responses:
200:
description: Recommendation priority Updated
schema:
id: Recommendation
properties:
id:
type: integer
description: unique id assigned to recommendation
priority:
type: integer
description: priority of the recommendation (lower the number, higher the priority)
404:
description: Recommendation not found
"""
if get_recommendations(id).status_code == 404:
message = {'error': 'Recommendation with id: %s was not found' % str(id)}
return reply(message, HTTP_404_NOT_FOUND)
"""
Decrements the priority from low to high of the recommendations_id until 1
"""
conn.execute("UPDATE recommendations \
SET priority= priority - 1 \
WHERE id=%d \
AND priority>1"
% (id))
return reply(None, HTTP_200_OK)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def next_index():
max_id_result = conn.execute("select max(id) from recommendations")
return list(max_id_result)[0][0] + 1
def reply(message, rc):
# print "message = " + str(message);
response = Response(json.dumps(message))
response.headers['Content-Type'] = 'application/json'
response.status_code = rc
return response
def is_valid(raw_data):
try:
data = json.loads(raw_data)
except:
message = {'error': 'JSON decoding error'}
return message, False
if set(data.keys()) != set(['priority', 'related_product_id', 'parent_product_id', 'type']):
# app.logger.error('key set does not match')
message = {'error': 'key set does not match'}
return message, False
try:
# Not sure if we should check data type or exceptions
# If we check exceptions, input 1 and "1" will be treated as the same
priority = int(data['priority'])
related_pid = int(data['related_product_id'])
parent_pid = int(data['parent_product_id'])
except ValueError as err:
# app.logger.error('Data value error: %s', err)
message = {'error': 'Data value error: %s' % err}
return message, False
except TypeError as err:
# app.logger.error('Data type error: %s', err)
message = {'error': 'Data type error: %s' % err}
return message, False
return "", True
def retrieve_by_id(id):
message = {}
results = conn.execute("SELECT * FROM recommendations WHERE id=%d" % (int(id)))
for rec in results:
message = {"id": rec[0],
"parent_product_id": rec[1],
"related_product_id": rec[2],
"type": rec[3],
"priority": rec[4]}
return message
######################################################################
# Connect to MySQL and catch connection exceptions
######################################################################
def connect_mysql(user, passwd, server, port, database):
engine = create_engine("mysql://%s:%s@%s:%s/%s" % (user, passwd, server, port, database), echo = False)
return engine.connect()
######################################################################
# INITIALIZE MySQL
# This method will work in the following conditions:
# 1) In Bluemix with cleardb bound through VCAP_SERVICES
# 2) With MySQL --linked in a Docker container in virtual machine
######################################################################
def initialize_mysql(test=False):
global conn
conn = None
# Get the crdentials from the Bluemix environment
if 'VCAP_SERVICES' in os.environ:
print("Using VCAP_SERVICES...")
VCAP_SERVICES = os.environ['VCAP_SERVICES']
services = json.loads(VCAP_SERVICES)
creds = services['cleardb'][0]['credentials']
print("Conecting to Mysql on host %s port %s" % (creds['hostname'], creds['port']))
conn = connect_mysql(creds['username'], creds['password'], creds['hostname'], creds['port'], creds['name'])
else:
print("VCAP_SERVICES not found, checking localhost for MySQL")
response = os.system("ping -c 1 mysql")
if response == 0:
mysql_hostname = 'mysql'
else:
mysql_hostname = '127.0.0.1'
if test:
engine = create_engine("mysql://%s:%s@%s:%s/%s" % ('root', '', mysql_hostname, 3306, 'tdd'), echo = False)
meta = MetaData()
recommendations = Table('recommendations', meta,
Column('id', Integer, nullable=False, primary_key=True),
Column('parent_product_id', Integer, nullable=False),
Column('related_product_id', Integer, nullable=False),
Column('type', String(20), nullable=False),
Column('priority', Integer, nullable=True)
)
try:
recommendations.drop(engine, checkfirst=True)
except:
pass
recommendations.create(engine, checkfirst=True)
conn = engine.connect()
else:
conn = connect_mysql('root', '', mysql_hostname, 3306, 'nyudevops')
######################################################################
# M A I N
######################################################################
if __name__ == "__main__":
print "Recommendations Service Starting..."
initialize_mysql()
# Pull options from environment
app.run(host='0.0.0.0', port=int(port), debug=debug)
|
#!/usr/bin/env python
# from datetime import datetime, date
from time import sleep
from argparse import ArgumentParser
import logging
from pyepm import api, config, __version__
from bitcoin import * # NOQA
BITCOIN_MAINNET = 'btc'
BITCOIN_TESTNET = 'testnet'
SLEEP_TIME = 5 * 60 # 5 mins. If changing, check retry logic
GAS_FOR_STORE_HEADERS = 1200000 # it should take less than 1M gas, but buffer to avoid running out
CHUNK_SIZE = 5 # number of headers to fetch at a time
CHUNK_RANGE = range(CHUNK_SIZE)
api_config = config.read_config()
instance = api.Api(api_config)
logging.basicConfig(format='%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
pyepmLogger = logging.getLogger("pyepm")
pyepmLogger.setLevel(logging.INFO)
# instance.address = "0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826"
# instance.relayContract = "0xba164d1e85526bd5e27fd15ad14b0eae91c45a93"
# TESTNET relay: 0x142f674e911cc55c226af81ac4d6de0a671d4abf
instance.walletContract = '0xdc056dc1a79f6a07b28d351ceb084852f88febbb' # Morden
instance.ethDaily = int(1e18) # 1 ETH
useWallet = True
aWalletOwner = '0xd005c515db902b1b77beb98370ba1f16b3111d7b'
def main():
# logging.basicConfig(level=logging.DEBUG)
logger.info("fetchd using PyEPM %s" % __version__)
parser = ArgumentParser()
parser.add_argument('-s', '--sender', required=True, help='sender of transaction')
parser.add_argument('-r', '--relay', required=True, help='relay contract address')
parser.add_argument('--rpcHost', default='127.0.0.1', help='RPC hostname')
parser.add_argument('--rpcPort', default='8545', type=int, help='RPC port')
parser.add_argument('--startBlock', default=0, type=int, help='block number to start fetching from')
parser.add_argument('-w', '--waitFor', default=0, type=int, help='number of blocks to wait between fetches')
parser.add_argument('--gasPrice', default=int(10e12), type=int, help='gas price') # default 10 szabo
parser.add_argument('--fetch', action='store_true', help='fetch blockheaders')
parser.add_argument('-n', '--network', default=BITCOIN_TESTNET, choices=[BITCOIN_TESTNET, BITCOIN_MAINNET], help='Bitcoin network')
parser.add_argument('-d', '--daemon', default=False, action='store_true', help='run as daemon')
parser.add_argument('--feeVTX', default=0, type=int, help='fee to charge for verifications')
parser.add_argument('--feeRecipient', help='address of fee recipient')
args = parser.parse_args()
instance.address = args.sender
instance.relayContract = args.relay
instance.rpcHost = args.rpcHost
instance.rpcPort = args.rpcPort
instance.jsonrpc_url = "http://%s:%s" % (instance.rpcHost, instance.rpcPort)
instance.numBlocksToWait = args.waitFor # for CPP eth as of Apr 28, 3 blocks seems reasonable. 0 seems to be fine for Geth
# instance.gasPrice = args.gasPrice
feeVerifyTx = args.feeVTX
logger.info('feeVTX: %s' % feeVerifyTx)
if useWallet and instance.address != aWalletOwner:
logger.info('sender is not a wallet owner: %s' % instance.address)
feeRecipient = args.feeRecipient or instance.address
logger.info('feeRecipient: %s' % feeRecipient)
# logger.info('@@@ rpc: %s' % instance.jsonrpc_url)
# this can't be commented out easily since run() always does instance.heightToStartFetch = getLastBlockHeight() + 1 for retries
# contractHeight = getLastBlockHeight() # needs instance.relayContract to be set
# logger.info('@@@ contract height: {0} gp: {1}').format(contractHeight, instance.gasPrice)
# instance.heightToStartFetch = args.startBlock or contractHeight + 1
# this will not handle exceptions or do retries. need to use -d switch if desired
if not args.daemon:
run(feeVerifyTx, feeRecipient, doFetch=args.fetch, network=args.network, startBlock=args.startBlock)
return
while True:
for i in range(4):
try:
run(feeVerifyTx, feeRecipient, doFetch=args.fetch, network=args.network, startBlock=args.startBlock)
sleep(SLEEP_TIME)
except Exception as e:
logger.info(e)
logger.info('Retry in 1min')
sleep(60)
continue
except: # catch *all* exceptions
e = sys.exc_info()[0]
logger.info(e)
logger.info('Rare exception')
raise
break
def run(feeVerifyTx, feeRecipient, doFetch=False, network=BITCOIN_TESTNET, startBlock=0):
chainHead = getBlockchainHead()
if not chainHead:
raise ValueError("Empty BlockchainHead returned.")
chainHead = blockHashHex(chainHead)
logger.info('BTC BlockchainHead: %s' % chainHead)
# loop in case contract stored correct HEAD, but reorg in *Ethereum* chain
# so that contract lost the correct HEAD. we try 3 times since it would
# be quite unlucky for 5 Ethereum reorgs to coincide with storing the
# non-orphaned Bitcoin block
nTime = 5
for i in range(nTime):
# refetch if needed in case contract's HEAD was orphaned
if startBlock:
contractHeight = startBlock
else:
contractHeight = getLastBlockHeight()
realHead = blockr_get_block_header_data(contractHeight, network=network)['hash']
heightToRefetch = contractHeight
while chainHead != realHead:
logger.info('@@@ chainHead: {0} realHead: {1}'.format(chainHead, realHead))
fetchHeaders(heightToRefetch, 1, 1, feeVerifyTx, feeRecipient, network=network)
# wait for some blocks because Geth has a delay (at least in RPC), of
# returning the correct data. the non-orphaned header may already
# be in the Ethereum blockchain, so we should give it a chance before
# adjusting realHead to the previous parent
#
# realHead is adjusted to previous parent in the off-chance that
# there is more than 1 orphan block
# for j in range(4):
instance.wait_for_next_block(from_block=instance.last_block(), verbose=True)
chainHead = blockHashHex(getBlockchainHead())
realHead = blockr_get_block_header_data(heightToRefetch, network=network)['hash']
heightToRefetch -= 1
if heightToRefetch < contractHeight - 10:
if i == nTime - 1:
# this really shouldn't happen since 2 orphans are already
# rare, let alone 10
logger.info('@@@@ TERMINATING big reorg? {0}'.format(heightToRefetch))
sys.exit()
else:
logger.info('@@@@ handle orphan did not succeed iteration {0}'.format(i))
break # start the refetch again, this time ++i
break # chainHead is same realHead
actualHeight = last_block_height(network) # pybitcointools 1.1.33
if startBlock:
instance.heightToStartFetch = startBlock
else:
instance.heightToStartFetch = getLastBlockHeight() + 1
logger.info('@@@ startFetch: {0} actualHeight: {1}'.format(instance.heightToStartFetch, actualHeight))
chunkSize = CHUNK_SIZE
fetchNum = actualHeight - instance.heightToStartFetch + 1
numChunk = fetchNum / chunkSize
leftoverToFetch = fetchNum % chunkSize
logger.info('@@@ numChunk: {0} leftoverToFetch: {1}'.format(numChunk, fetchNum))
logger.info('----------------------------------')
if doFetch:
fetchHeaders(instance.heightToStartFetch, chunkSize, numChunk, feeVerifyTx, feeRecipient, network=network)
fetchHeaders(actualHeight - leftoverToFetch + 1, 1, leftoverToFetch, feeVerifyTx, feeRecipient, network=network)
# sys.exit()
def fetchHeaders(chunkStartNum, chunkSize, numChunk, feeVerifyTx, feeRecipient, network=BITCOIN_TESTNET):
for j in range(numChunk):
strings = ""
for i in range(chunkSize):
blockNum = chunkStartNum + i
bhJson = blockr_get_block_header_data(blockNum, network=network)
bhStr = serialize_header(bhJson)
logger.info("@@@ {0}: {1}".format(blockNum, bhStr))
logger.debug("Block header: %s" % repr(bhStr.decode('hex')))
strings += bhStr
storeHeaders(strings.decode('hex'), chunkSize, feeVerifyTx, feeRecipient)
chainHead = getBlockchainHead()
logger.info('@@@ DONE hexHead: %s' % blockHashHex(chainHead))
logger.info('==================================')
chunkStartNum += chunkSize
# average of 6*24=144 headers a day. So AROUND every 100 headers we check
# the balance of sender and if it's less than 1 ETH, we ask for more ETH
# from the wallet.
# CHUNK_RANGE is used so that we ask for ETH if heightToStartFetch ends in
# ????00, ????01, ????02 to ????04
if chunkStartNum % 100 in CHUNK_RANGE and useWallet:
myWei = instance.balance_at(instance.address)
myBalance = myWei / 1e18
logger.info('myBalance ETH: %s' % myBalance)
if myBalance < 1:
logger.info('going to walletWithdraw')
walletWithdraw()
myWei = instance.balance_at(instance.address)
myBalance = myWei / 1e18
logger.info('topped up ETH balance: %s' % myBalance)
def storeHeaders(bhBytes, chunkSize, feeVerifyTx, feeRecipient):
txCount = instance.transaction_count(defaultBlock='pending')
logger.info('----------------------------------')
logger.info('txCount: %s' % txCount)
hashOne = blockHashHex(int(bin_dbl_sha256(bhBytes[:80])[::-1].encode('hex'), 16))
hashLast = blockHashHex(int(bin_dbl_sha256(bhBytes[-80:])[::-1].encode('hex'), 16))
logger.info('hashOne: %s' % hashOne)
logger.info('hashLast: %s' % hashLast)
firstH = bhBytes[:80].encode('hex')
lastH = bhBytes[-80:].encode('hex')
logger.info('firstH: %s' % firstH)
logger.info('lastH: %s' % lastH)
sig = 'bulkStoreHeader:[bytes,int256]:int256'
data = [bhBytes, chunkSize]
gas = GAS_FOR_STORE_HEADERS
value = 0
#
# Store the headers
#
if feeVerifyTx != 0:
sig = 'storeBlockWithFeeAndRecipient:[bytes,int256,int256]:int256'
for i in range(chunkSize):
if feeVerifyTx != 0:
offset = 80*i
data = [ bhBytes[offset:offset+80] , feeVerifyTx, feeRecipient]
# Wait for the transaction and retry if failed
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
logger.info("store header txHash: %s" % txHash)
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="pending", retry=30, verbose=True)
if txResult is False:
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
# Wait for the transaction to be mined and retry if failed
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="latest", retry=60, verbose=True)
if txResult is False:
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
if feeVerifyTx == 0:
break
chainHead = getBlockchainHead()
expHead = int(bin_dbl_sha256(bhBytes[-80:])[::-1].encode('hex'), 16)
if chainHead != expHead:
logger.info('@@@@@ MISMATCH chainHead: {0} expHead: {1}'.format(blockHashHex(chainHead), blockHashHex(expHead)))
# sys.exit(1)
def walletWithdraw():
# execute(address _to, uint _value, bytes _data)
sig = 'execute:[address,uint256,bytes]:bytes32'
data = [instance.address, instance.ethDaily, '']
gas = 999000
# Wait for the transaction retry if failed
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
logger.info("walletWithdraw txHash: %s" % txHash)
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="pending", retry=30, verbose=True)
if txResult is False:
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
# Wait for the transaction to be mined and retry if failed
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="latest", retry=60, verbose=True)
if txResult is False:
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
def getLastBlockHeight():
sig = 'getLastBlockHeight:[]:int256'
data = []
pyepmLogger.setLevel(logging.WARNING)
callResult = instance.call(instance.relayContract, sig=sig, data=data)
pyepmLogger.setLevel(logging.INFO)
logger.debug("RESULT %s" % callResult)
chainHead = callResult[0] if len(callResult) else callResult
return chainHead
def getBlockchainHead():
sig = 'getBlockchainHead:[]:int256'
data = []
pyepmLogger.setLevel(logging.WARNING)
callResult = instance.call(instance.relayContract, sig=sig, data=data)
pyepmLogger.setLevel(logging.INFO)
chainHead = callResult[0] if len(callResult) else callResult
return chainHead
def blockHashHex(number):
hexHead = hex(number)[2:-1] # snip off the 0x and trailing L
hexHead = '0' * (64 - len(hexHead)) + hexHead
return hexHead
if __name__ == '__main__':
main()
print txResult
#!/usr/bin/env python
# from datetime import datetime, date
from time import sleep
from argparse import ArgumentParser
import logging
from pyepm import api, config, __version__
from bitcoin import * # NOQA
BITCOIN_MAINNET = 'btc'
BITCOIN_TESTNET = 'testnet'
SLEEP_TIME = 5 * 60 # 5 mins. If changing, check retry logic
GAS_FOR_STORE_HEADERS = 1200000 # it should take less than 1M gas, but buffer to avoid running out
CHUNK_SIZE = 5 # number of headers to fetch at a time
CHUNK_RANGE = range(CHUNK_SIZE)
api_config = config.read_config()
instance = api.Api(api_config)
logging.basicConfig(format='%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
pyepmLogger = logging.getLogger("pyepm")
pyepmLogger.setLevel(logging.INFO)
# instance.address = "0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826"
# instance.relayContract = "0xba164d1e85526bd5e27fd15ad14b0eae91c45a93"
# TESTNET relay: 0x142f674e911cc55c226af81ac4d6de0a671d4abf
instance.walletContract = '0xdc056dc1a79f6a07b28d351ceb084852f88febbb' # Morden
instance.ethDaily = int(1e18) # 1 ETH
useWallet = True
aWalletOwner = '0xd005c515db902b1b77beb98370ba1f16b3111d7b'
def main():
# logging.basicConfig(level=logging.DEBUG)
logger.info("fetchd using PyEPM %s" % __version__)
parser = ArgumentParser()
parser.add_argument('-s', '--sender', required=True, help='sender of transaction')
parser.add_argument('-r', '--relay', required=True, help='relay contract address')
parser.add_argument('--rpcHost', default='127.0.0.1', help='RPC hostname')
parser.add_argument('--rpcPort', default='8545', type=int, help='RPC port')
parser.add_argument('--startBlock', default=0, type=int, help='block number to start fetching from')
parser.add_argument('-w', '--waitFor', default=0, type=int, help='number of blocks to wait between fetches')
parser.add_argument('--gasPrice', default=int(10e12), type=int, help='gas price') # default 10 szabo
parser.add_argument('--fetch', action='store_true', help='fetch blockheaders')
parser.add_argument('-n', '--network', default=BITCOIN_TESTNET, choices=[BITCOIN_TESTNET, BITCOIN_MAINNET], help='Bitcoin network')
parser.add_argument('-d', '--daemon', default=False, action='store_true', help='run as daemon')
parser.add_argument('--feeVTX', default=0, type=int, help='fee to charge for verifications')
parser.add_argument('--feeRecipient', help='address of fee recipient')
args = parser.parse_args()
instance.address = args.sender
instance.relayContract = args.relay
instance.rpcHost = args.rpcHost
instance.rpcPort = args.rpcPort
instance.jsonrpc_url = "http://%s:%s" % (instance.rpcHost, instance.rpcPort)
instance.numBlocksToWait = args.waitFor # for CPP eth as of Apr 28, 3 blocks seems reasonable. 0 seems to be fine for Geth
# instance.gasPrice = args.gasPrice
feeVerifyTx = args.feeVTX
logger.info('feeVTX: %s' % feeVerifyTx)
if useWallet and instance.address != aWalletOwner:
logger.info('sender is not a wallet owner: %s' % instance.address)
feeRecipient = args.feeRecipient or instance.address
logger.info('feeRecipient: %s' % feeRecipient)
# logger.info('@@@ rpc: %s' % instance.jsonrpc_url)
# this can't be commented out easily since run() always does instance.heightToStartFetch = getLastBlockHeight() + 1 for retries
# contractHeight = getLastBlockHeight() # needs instance.relayContract to be set
# logger.info('@@@ contract height: {0} gp: {1}').format(contractHeight, instance.gasPrice)
# instance.heightToStartFetch = args.startBlock or contractHeight + 1
# this will not handle exceptions or do retries. need to use -d switch if desired
if not args.daemon:
run(feeVerifyTx, feeRecipient, doFetch=args.fetch, network=args.network, startBlock=args.startBlock)
return
while True:
for i in range(4):
try:
run(feeVerifyTx, feeRecipient, doFetch=args.fetch, network=args.network, startBlock=args.startBlock)
sleep(SLEEP_TIME)
except Exception as e:
logger.info(e)
logger.info('Retry in 1min')
sleep(60)
continue
except: # catch *all* exceptions
e = sys.exc_info()[0]
logger.info(e)
logger.info('Rare exception')
raise
break
def run(feeVerifyTx, feeRecipient, doFetch=False, network=BITCOIN_TESTNET, startBlock=0):
chainHead = getBlockchainHead()
if not chainHead:
raise ValueError("Empty BlockchainHead returned.")
chainHead = blockHashHex(chainHead)
logger.info('BTC BlockchainHead: %s' % chainHead)
# loop in case contract stored correct HEAD, but reorg in *Ethereum* chain
# so that contract lost the correct HEAD. we try 3 times since it would
# be quite unlucky for 5 Ethereum reorgs to coincide with storing the
# non-orphaned Bitcoin block
nTime = 5
for i in range(nTime):
# refetch if needed in case contract's HEAD was orphaned
if startBlock:
contractHeight = startBlock
else:
contractHeight = getLastBlockHeight()
realHead = blockr_get_block_header_data(contractHeight, network=network)['hash']
heightToRefetch = contractHeight
while chainHead != realHead:
logger.info('@@@ chainHead: {0} realHead: {1}'.format(chainHead, realHead))
fetchHeaders(heightToRefetch, 1, 1, feeVerifyTx, feeRecipient, network=network)
# wait for some blocks because Geth has a delay (at least in RPC), of
# returning the correct data. the non-orphaned header may already
# be in the Ethereum blockchain, so we should give it a chance before
# adjusting realHead to the previous parent
#
# realHead is adjusted to previous parent in the off-chance that
# there is more than 1 orphan block
# for j in range(4):
instance.wait_for_next_block(from_block=instance.last_block(), verbose=True)
chainHead = blockHashHex(getBlockchainHead())
realHead = blockr_get_block_header_data(heightToRefetch, network=network)['hash']
heightToRefetch -= 1
if heightToRefetch < contractHeight - 10:
if i == nTime - 1:
# this really shouldn't happen since 2 orphans are already
# rare, let alone 10
logger.info('@@@@ TERMINATING big reorg? {0}'.format(heightToRefetch))
sys.exit()
else:
logger.info('@@@@ handle orphan did not succeed iteration {0}'.format(i))
break # start the refetch again, this time ++i
break # chainHead is same realHead
actualHeight = last_block_height(network) # pybitcointools 1.1.33
if startBlock:
instance.heightToStartFetch = startBlock
else:
instance.heightToStartFetch = getLastBlockHeight() + 1
logger.info('@@@ startFetch: {0} actualHeight: {1}'.format(instance.heightToStartFetch, actualHeight))
chunkSize = CHUNK_SIZE
fetchNum = actualHeight - instance.heightToStartFetch + 1
numChunk = fetchNum / chunkSize
leftoverToFetch = fetchNum % chunkSize
logger.info('@@@ numChunk: {0} leftoverToFetch: {1}'.format(numChunk, fetchNum))
logger.info('----------------------------------')
if doFetch:
fetchHeaders(instance.heightToStartFetch, chunkSize, numChunk, feeVerifyTx, feeRecipient, network=network)
fetchHeaders(actualHeight - leftoverToFetch + 1, 1, leftoverToFetch, feeVerifyTx, feeRecipient, network=network)
# sys.exit()
def fetchHeaders(chunkStartNum, chunkSize, numChunk, feeVerifyTx, feeRecipient, network=BITCOIN_TESTNET):
for j in range(numChunk):
strings = ""
for i in range(chunkSize):
blockNum = chunkStartNum + i
bhJson = blockr_get_block_header_data(blockNum, network=network)
bhStr = serialize_header(bhJson)
logger.info("@@@ {0}: {1}".format(blockNum, bhStr))
logger.debug("Block header: %s" % repr(bhStr.decode('hex')))
strings += bhStr
storeHeaders(strings.decode('hex'), chunkSize, feeVerifyTx, feeRecipient)
chainHead = getBlockchainHead()
logger.info('@@@ DONE hexHead: %s' % blockHashHex(chainHead))
logger.info('==================================')
chunkStartNum += chunkSize
# average of 6*24=144 headers a day. So AROUND every 100 headers we check
# the balance of sender and if it's less than 1 ETH, we ask for more ETH
# from the wallet.
# CHUNK_RANGE is used so that we ask for ETH if heightToStartFetch ends in
# ????00, ????01, ????02 to ????04
if chunkStartNum % 100 in CHUNK_RANGE and useWallet:
myWei = instance.balance_at(instance.address)
myBalance = myWei / 1e18
logger.info('myBalance ETH: %s' % myBalance)
if myBalance < 1:
logger.info('going to walletWithdraw')
walletWithdraw()
myWei = instance.balance_at(instance.address)
myBalance = myWei / 1e18
logger.info('topped up ETH balance: %s' % myBalance)
def storeHeaders(bhBytes, chunkSize, feeVerifyTx, feeRecipient):
txCount = instance.transaction_count(defaultBlock='pending')
logger.info('----------------------------------')
logger.info('txCount: %s' % txCount)
hashOne = blockHashHex(int(bin_dbl_sha256(bhBytes[:80])[::-1].encode('hex'), 16))
hashLast = blockHashHex(int(bin_dbl_sha256(bhBytes[-80:])[::-1].encode('hex'), 16))
logger.info('hashOne: %s' % hashOne)
logger.info('hashLast: %s' % hashLast)
firstH = bhBytes[:80].encode('hex')
lastH = bhBytes[-80:].encode('hex')
logger.info('firstH: %s' % firstH)
logger.info('lastH: %s' % lastH)
sig = 'bulkStoreHeader:[bytes,int256]:int256'
data = [bhBytes, chunkSize]
gas = GAS_FOR_STORE_HEADERS
value = 0
#
# Store the headers
#
if feeVerifyTx != 0:
sig = 'storeBlockWithFeeAndRecipient:[bytes,int256,int256]:int256'
for i in range(chunkSize):
if feeVerifyTx != 0:
offset = 80*i
data = [ bhBytes[offset:offset+80] , feeVerifyTx, feeRecipient]
# Wait for the transaction and retry if failed
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
logger.info("store header txHash: %s" % txHash)
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="pending", retry=30, verbose=True)
logger.info("store header pendingblock txResult: %s" % txResult)
if txResult is False:
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
# Wait for the transaction to be mined and retry if failed
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="latest", retry=60, verbose=True)
logger.info("store header latestblock txResult: %s" % txResult)
if txResult is False:
txHash = instance.transact(instance.relayContract, sig=sig, data=data, gas=gas, value=value)
if feeVerifyTx == 0:
break
chainHead = getBlockchainHead()
expHead = int(bin_dbl_sha256(bhBytes[-80:])[::-1].encode('hex'), 16)
if chainHead != expHead:
logger.info('@@@@@ MISMATCH chainHead: {0} expHead: {1}'.format(blockHashHex(chainHead), blockHashHex(expHead)))
# sys.exit(1)
def walletWithdraw():
# execute(address _to, uint _value, bytes _data)
sig = 'execute:[address,uint256,bytes]:bytes32'
data = [instance.address, instance.ethDaily, '']
gas = 999000
# Wait for the transaction retry if failed
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
logger.info("walletWithdraw txHash: %s" % txHash)
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="pending", retry=30, verbose=True)
if txResult is False:
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
# Wait for the transaction to be mined and retry if failed
txResult = False
while txResult is False:
txResult = instance.wait_for_transaction(transactionHash=txHash, defaultBlock="latest", retry=60, verbose=True)
if txResult is False:
txHash = instance.transact(instance.walletContract, sig=sig, data=data, gas=gas)
def getLastBlockHeight():
sig = 'getLastBlockHeight:[]:int256'
data = []
pyepmLogger.setLevel(logging.WARNING)
callResult = instance.call(instance.relayContract, sig=sig, data=data)
pyepmLogger.setLevel(logging.INFO)
logger.debug("RESULT %s" % callResult)
chainHead = callResult[0] if len(callResult) else callResult
return chainHead
def getBlockchainHead():
sig = 'getBlockchainHead:[]:int256'
data = []
pyepmLogger.setLevel(logging.WARNING)
callResult = instance.call(instance.relayContract, sig=sig, data=data)
pyepmLogger.setLevel(logging.INFO)
chainHead = callResult[0] if len(callResult) else callResult
return chainHead
def blockHashHex(number):
hexHead = hex(number)[2:-1] # snip off the 0x and trailing L
hexHead = '0' * (64 - len(hexHead)) + hexHead
return hexHead
if __name__ == '__main__':
main()
|
"""
Individual pages
"""
from dominate import document
from dominate.tags import *
import urllib2
from bs4 import BeautifulSoup
from pyteaser import SummarizeUrl
from flask import (
Blueprint,
g,
render_template)
analytics = Blueprint('analytics', __name__)
from summarytool import SummaryTool
fs = SummaryTool()
urls = [
'http://www.wired.com/',
'http://www.nytimes.com/',
'http://www.technologyreview.com/lists/technologies/2014/'
]
'''
from collections import defaultdict
@analytics.route('/')
def index():
"""main index page"""
return render_template('index2.html', pages=g.pages.sorted[:3])
'''
def get_only_text(url):
"""
return the title and the text of the article
at the specified url
"""
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
return soup.title.text, text
@analytics.route('/analytics/')
def analytics_check():
"""about page"""
headlines=[]
for url in urls:
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
title='Analytics'
content = ' '.join(map(lambda p: p.text.encode('ascii', 'ignore'), soup.find_all('p')))
#headlines='\n'.join(str(line.encode('ascii', 'ignore')) for line in summaries)
sentences_dic = fs.get_senteces_ranks(content)
summary=fs.get_summary(title, content, sentences_dic)
headlines.append(summary)
with document(title='Analytics') as doc:
h1('Title')
#print headlines
h2('\n'.join(str(line) for line in headlines))
with open('templates/analytics.html', 'w') as f:
f.write(doc.render())
return render_template('analytics.html')
Added changes27
"""
Individual pages
"""
from dominate import document
from dominate.tags import *
import urllib2
from bs4 import BeautifulSoup
from pyteaser import SummarizeUrl
from flask import (
Blueprint,
g,
render_template)
analytics = Blueprint('analytics', __name__)
from summarytool import SummaryTool
fs = SummaryTool()
urls = [
'http://www.wired.com/',
'http://www.nytimes.com/',
'http://www.technologyreview.com/lists/technologies/2014/'
]
'''
from collections import defaultdict
@analytics.route('/')
def index():
"""main index page"""
return render_template('index2.html', pages=g.pages.sorted[:3])
'''
def get_only_text(url):
"""
return the title and the text of the article
at the specified url
"""
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
return soup.title.text, text
@analytics.route('/analytics/')
def analytics_check():
"""about page"""
headlines=[]
for url in urls:
page = urllib2.urlopen(url).read().decode('utf8')
soup = BeautifulSoup(page)
title='Analytics'
content = ' '.join(map(lambda p: p.text.encode('ascii', 'ignore'), soup.find_all('p')))
#headlines='\n'.join(str(line.encode('ascii', 'ignore')) for line in summaries)
sentences_dic = fs.get_senteces_ranks(content)
summary=fs.get_summary(title, content, sentences_dic)
headlines.append(summary)
print 'Headline is %s'%('\n'.join(str(line) for line in headlines))
with document(title='Analytics') as doc:
h1('Title')
#print headlines
h2('\n'.join(str(line) for line in headlines))
with open('templates/analytics.html', 'w') as f:
f.write(doc.render())
return render_template('analytics.html')
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import itertools
from dataclasses import dataclass
from hashlib import sha256
from textwrap import dedent
from typing import Iterable, Optional, Tuple
import packaging
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PythonSourceField
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.typecheck.mypy.subsystem import (
MyPy,
MyPyConfigFile,
MyPyFirstPartyPlugins,
)
from pants.backend.python.util_rules import partition, pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexResolveInfo,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.pex_requirements import PexRequirements
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.base.build_root import BuildRoot
from pants.core.goals.check import REPORT_DIR, CheckRequest, CheckResult, CheckResults
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.system_binaries import CpBinary, MkdirBinary, MvBinary
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule, rule_helper
from pants.engine.target import CoarsenedTargets, FieldSet, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize, shell_quote
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
@dataclass(frozen=True)
class MyPyPartition:
root_field_sets: FrozenOrderedSet[MyPyFieldSet]
closure: FrozenOrderedSet[Target]
resolve_description: str | None
interpreter_constraints: InterpreterConstraints
def description(self) -> str:
ics = str(sorted(str(c) for c in self.interpreter_constraints))
return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
class MyPyPartitions(Collection[MyPyPartition]):
pass
class MyPyRequest(CheckRequest):
field_set_type = MyPyFieldSet
name = MyPy.options_scope
@rule_helper
async def _generate_argv(
mypy: MyPy,
*,
pex: VenvPex,
cache_dir: str,
venv_python: str,
file_list_path: str,
python_version: Optional[str],
) -> Tuple[str, ...]:
args = [pex.pex.argv0, f"--python-executable={venv_python}", *mypy.args]
if mypy.config:
args.append(f"--config-file={mypy.config}")
if python_version:
args.append(f"--python-version={python_version}")
mypy_pex_info = await Get(PexResolveInfo, VenvPex, pex)
mypy_info = mypy_pex_info.find("mypy")
assert mypy_info is not None
if mypy_info.version > packaging.version.Version("0.700") and python_version is not None:
# Skip mtime checks because we don't propogate mtime when materialzing the sandbox, so the
# mtime checks will always fail otherwise.
args.append("--skip-cache-mtime-check")
# See "__run_wrapper.sh" below for explanation
args.append("--sqlite-cache") # Added in v 0.660
args.extend(("--cache-dir", cache_dir))
else:
# Don't bother caching
args.append("--cache-dir=/dev/null")
args.append(f"@{file_list_path}")
return tuple(args)
def determine_python_files(files: Iterable[str]) -> Tuple[str, ...]:
"""We run over all .py and .pyi files, but .pyi files take precedence.
MyPy will error if we say to run over the same module with both its .py and .pyi files, so we
must be careful to only use the .pyi stub.
"""
result: OrderedSet[str] = OrderedSet()
for f in files:
if f.endswith(".pyi"):
py_file = f[:-1] # That is, strip the `.pyi` suffix to be `.py`.
result.discard(py_file)
result.add(f)
elif f.endswith(".py"):
pyi_file = f + "i"
if pyi_file not in result:
result.add(f)
return tuple(result)
@rule
async def mypy_typecheck_partition(
partition: MyPyPartition,
config_file: MyPyConfigFile,
first_party_plugins: MyPyFirstPartyPlugins,
build_root: BuildRoot,
mypy: MyPy,
python_setup: PythonSetup,
mkdir: MkdirBinary,
cp: CpBinary,
mv: MvBinary,
) -> CheckResult:
# MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
# and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
# Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
# loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
# 3.8+ is not installed on the machine.
tool_interpreter_constraints = (
partition.interpreter_constraints
if (
mypy.options.is_default("interpreter_constraints")
and partition.interpreter_constraints.requires_python38_or_newer(
python_setup.interpreter_versions_universe
)
)
else mypy.interpreter_constraints
)
closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
roots_sources_get = Get(
SourceFiles,
SourceFilesRequest(fs.sources for fs in partition.root_field_sets),
)
# See `requirements_venv_pex` for how this will get wrapped in a `VenvPex`.
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(fs.address for fs in partition.root_field_sets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
),
)
extra_type_stubs_pex_get = Get(
Pex,
PexRequest(
output_filename="extra_type_stubs.pex",
internal_only=True,
requirements=PexRequirements(mypy.extra_type_stubs),
interpreter_constraints=partition.interpreter_constraints,
),
)
mypy_pex_get = Get(
VenvPex,
PexRequest,
mypy.to_pex_request(
interpreter_constraints=tool_interpreter_constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
(
closure_sources,
roots_sources,
mypy_pex,
extra_type_stubs_pex,
requirements_pex,
) = await MultiGet(
closure_sources_get,
roots_sources_get,
mypy_pex_get,
extra_type_stubs_pex_get,
requirements_pex_get,
)
python_files = determine_python_files(roots_sources.snapshot.files)
file_list_path = "__files.txt"
file_list_digest_request = Get(
Digest,
CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
)
# This creates a venv with all the 3rd-party requirements used by the code. We tell MyPy to
# use this venv by setting `--python-executable`. Note that this Python interpreter is
# different than what we run MyPy with.
#
# We could have directly asked the `PexFromTargetsRequest` to return a `VenvPex`, rather than
# `Pex`, but that would mean missing out on sharing a cache with other goals like `test` and
# `run`.
requirements_venv_pex_request = Get(
VenvPex,
PexRequest(
output_filename="requirements_venv.pex",
internal_only=True,
pex_path=[requirements_pex, extra_type_stubs_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
requirements_venv_pex, file_list_digest = await MultiGet(
requirements_venv_pex_request, file_list_digest_request
)
py_version = config_file.python_version_to_autoset(
partition.interpreter_constraints, python_setup.interpreter_versions_universe
)
named_cache_dir = f".cache/mypy_cache/{sha256(build_root.path.encode()).hexdigest()}"
run_cache_dir = ".tmp_cache/mypy_cache"
argv = await _generate_argv(
mypy,
pex=mypy_pex,
venv_python=requirements_venv_pex.python.argv0,
cache_dir=run_cache_dir,
file_list_path=file_list_path,
python_version=py_version,
)
script_runner_digest = await Get(
Digest,
CreateDigest(
[
FileContent(
"__mypy_runner.sh",
dedent(
f"""\
# We want to leverage the MyPy cache for fast incremental runs of MyPy.
# Pants exposes "append_only_caches" we can leverage, but with the caveat
# that it requires either only appending files, or multiprocess-safe access.
#
# MyPy guarantees neither, but there's workarounds!
#
# By default, MyPy uses 2 cache files per source file, which introduces a
# whole slew of race conditions. We can minimize the race conditions by
# using MyPy's SQLite cache. MyPy still has race conditions when using the
# db, as it issues at least 2 single-row queries per source file at different
# points in time (therefore SQLite's own safety guarantees don't apply).
#
# To workaround this we make a copy of the db from the append_only_cache,
# run MyPy on it, then move the updated cache back to the append_only_cache.
# This is multiprocess-safe as mv on the same filesystem is an atomic "rename",
# and any processes copying the "old" file will still have valid file
# descriptors for the "old" file.
#
# There is a chance of multiple processes thrashing on the cache, leaving
# it in a state that doesn't reflect reality at the current point in time,
# and forcing other processes to do potentially done work. This strategy
# still provides a net benefit because the cache is generally _mostly_
# valid (it includes entries for the standard library, and 3rdparty deps,
# among 1stparty sources), and even in the worst case
# (every single file has changed) the overhead of missing the cache each
# query should be small when compared to the work being done of typechecking.
#
# Lastly, we expect that since this is run through Pants which attempts
# to partition MyPy runs by python version (which the DB is independent
# for different versions) and uses a one-process-at-a-time daemon by default,
# multuple MyPy processes operating on a single db cache should be rare.
{mkdir.path} -p {run_cache_dir}/{py_version} 2>&1 > /dev/null
{cp.path} {named_cache_dir}/{py_version}/cache.db {run_cache_dir}/{py_version}/cache.db 2>&1 > /dev/null || true
{' '.join((shell_quote(arg) for arg in argv))}
EXIT_CODE=$?
{mv.path} {run_cache_dir}/{py_version}/cache.db {named_cache_dir}/{py_version}/cache.db 2>&1 > /dev/null || true
exit $EXIT_CODE
"""
).encode(),
is_executable=True,
)
]
),
)
merged_input_files = await Get(
Digest,
MergeDigests(
[
file_list_digest,
first_party_plugins.sources_digest,
closure_sources.source_files.snapshot.digest,
requirements_venv_pex.digest,
config_file.digest,
script_runner_digest,
]
),
)
all_used_source_roots = sorted(
set(itertools.chain(first_party_plugins.source_roots, closure_sources.source_roots))
)
env = {
"PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
"MYPYPATH": ":".join(all_used_source_roots),
}
process = await Get(
Process,
VenvPexProcess(
mypy_pex,
input_digest=merged_input_files,
extra_env=env,
output_directories=(REPORT_DIR,),
description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
level=LogLevel.DEBUG,
append_only_caches={"mypy_cache": named_cache_dir},
),
)
process = dataclasses.replace(process, argv=("__mypy_runner.sh",))
result = await Get(FallibleProcessResult, Process, process)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return CheckResult.from_fallible_process_result(
result,
partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
report=report,
)
@rule(desc="Determine if necessary to partition MyPy input", level=LogLevel.DEBUG)
async def mypy_determine_partitions(
request: MyPyRequest, mypy: MyPy, python_setup: PythonSetup
) -> MyPyPartitions:
resolve_and_interpreter_constraints_to_coarsened_targets = (
await partition._by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
)
return MyPyPartitions(
MyPyPartition(
FrozenOrderedSet(roots),
FrozenOrderedSet(CoarsenedTargets(root_cts).closure()),
resolve if len(python_setup.resolves) > 1 else None,
interpreter_constraints or mypy.interpreter_constraints,
)
for (resolve, interpreter_constraints), (roots, root_cts) in sorted(
resolve_and_interpreter_constraints_to_coarsened_targets.items()
)
)
# TODO(#10864): Improve performance, e.g. by leveraging the MyPy cache.
@rule(desc="Typecheck using MyPy", level=LogLevel.DEBUG)
async def mypy_typecheck(request: MyPyRequest, mypy: MyPy) -> CheckResults:
if mypy.skip:
return CheckResults([], checker_name=request.name)
partitions = await Get(MyPyPartitions, MyPyRequest, request)
partitioned_results = await MultiGet(
Get(CheckResult, MyPyPartition, partition) for partition in partitions
)
return CheckResults(partitioned_results, checker_name=request.name)
def rules():
return [
*collect_rules(),
UnionRule(CheckRequest, MyPyRequest),
*pex_from_targets.rules(),
]
Silence mkdir, cp, and mv commands with MyPy caching (#16340)
The current redirection is swapped, and therefore doesn't silence errors
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import itertools
from dataclasses import dataclass
from hashlib import sha256
from textwrap import dedent
from typing import Iterable, Optional, Tuple
import packaging
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PythonSourceField
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.typecheck.mypy.subsystem import (
MyPy,
MyPyConfigFile,
MyPyFirstPartyPlugins,
)
from pants.backend.python.util_rules import partition, pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexResolveInfo,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.pex_requirements import PexRequirements
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.base.build_root import BuildRoot
from pants.core.goals.check import REPORT_DIR, CheckRequest, CheckResult, CheckResults
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.system_binaries import CpBinary, MkdirBinary, MvBinary
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule, rule_helper
from pants.engine.target import CoarsenedTargets, FieldSet, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize, shell_quote
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
@dataclass(frozen=True)
class MyPyPartition:
root_field_sets: FrozenOrderedSet[MyPyFieldSet]
closure: FrozenOrderedSet[Target]
resolve_description: str | None
interpreter_constraints: InterpreterConstraints
def description(self) -> str:
ics = str(sorted(str(c) for c in self.interpreter_constraints))
return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
class MyPyPartitions(Collection[MyPyPartition]):
pass
class MyPyRequest(CheckRequest):
field_set_type = MyPyFieldSet
name = MyPy.options_scope
@rule_helper
async def _generate_argv(
mypy: MyPy,
*,
pex: VenvPex,
cache_dir: str,
venv_python: str,
file_list_path: str,
python_version: Optional[str],
) -> Tuple[str, ...]:
args = [pex.pex.argv0, f"--python-executable={venv_python}", *mypy.args]
if mypy.config:
args.append(f"--config-file={mypy.config}")
if python_version:
args.append(f"--python-version={python_version}")
mypy_pex_info = await Get(PexResolveInfo, VenvPex, pex)
mypy_info = mypy_pex_info.find("mypy")
assert mypy_info is not None
if mypy_info.version > packaging.version.Version("0.700") and python_version is not None:
# Skip mtime checks because we don't propogate mtime when materialzing the sandbox, so the
# mtime checks will always fail otherwise.
args.append("--skip-cache-mtime-check")
# See "__run_wrapper.sh" below for explanation
args.append("--sqlite-cache") # Added in v 0.660
args.extend(("--cache-dir", cache_dir))
else:
# Don't bother caching
args.append("--cache-dir=/dev/null")
args.append(f"@{file_list_path}")
return tuple(args)
def determine_python_files(files: Iterable[str]) -> Tuple[str, ...]:
"""We run over all .py and .pyi files, but .pyi files take precedence.
MyPy will error if we say to run over the same module with both its .py and .pyi files, so we
must be careful to only use the .pyi stub.
"""
result: OrderedSet[str] = OrderedSet()
for f in files:
if f.endswith(".pyi"):
py_file = f[:-1] # That is, strip the `.pyi` suffix to be `.py`.
result.discard(py_file)
result.add(f)
elif f.endswith(".py"):
pyi_file = f + "i"
if pyi_file not in result:
result.add(f)
return tuple(result)
@rule
async def mypy_typecheck_partition(
partition: MyPyPartition,
config_file: MyPyConfigFile,
first_party_plugins: MyPyFirstPartyPlugins,
build_root: BuildRoot,
mypy: MyPy,
python_setup: PythonSetup,
mkdir: MkdirBinary,
cp: CpBinary,
mv: MvBinary,
) -> CheckResult:
# MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
# and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
# Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
# loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
# 3.8+ is not installed on the machine.
tool_interpreter_constraints = (
partition.interpreter_constraints
if (
mypy.options.is_default("interpreter_constraints")
and partition.interpreter_constraints.requires_python38_or_newer(
python_setup.interpreter_versions_universe
)
)
else mypy.interpreter_constraints
)
closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
roots_sources_get = Get(
SourceFiles,
SourceFilesRequest(fs.sources for fs in partition.root_field_sets),
)
# See `requirements_venv_pex` for how this will get wrapped in a `VenvPex`.
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(fs.address for fs in partition.root_field_sets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
),
)
extra_type_stubs_pex_get = Get(
Pex,
PexRequest(
output_filename="extra_type_stubs.pex",
internal_only=True,
requirements=PexRequirements(mypy.extra_type_stubs),
interpreter_constraints=partition.interpreter_constraints,
),
)
mypy_pex_get = Get(
VenvPex,
PexRequest,
mypy.to_pex_request(
interpreter_constraints=tool_interpreter_constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
(
closure_sources,
roots_sources,
mypy_pex,
extra_type_stubs_pex,
requirements_pex,
) = await MultiGet(
closure_sources_get,
roots_sources_get,
mypy_pex_get,
extra_type_stubs_pex_get,
requirements_pex_get,
)
python_files = determine_python_files(roots_sources.snapshot.files)
file_list_path = "__files.txt"
file_list_digest_request = Get(
Digest,
CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
)
# This creates a venv with all the 3rd-party requirements used by the code. We tell MyPy to
# use this venv by setting `--python-executable`. Note that this Python interpreter is
# different than what we run MyPy with.
#
# We could have directly asked the `PexFromTargetsRequest` to return a `VenvPex`, rather than
# `Pex`, but that would mean missing out on sharing a cache with other goals like `test` and
# `run`.
requirements_venv_pex_request = Get(
VenvPex,
PexRequest(
output_filename="requirements_venv.pex",
internal_only=True,
pex_path=[requirements_pex, extra_type_stubs_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
requirements_venv_pex, file_list_digest = await MultiGet(
requirements_venv_pex_request, file_list_digest_request
)
py_version = config_file.python_version_to_autoset(
partition.interpreter_constraints, python_setup.interpreter_versions_universe
)
named_cache_dir = f".cache/mypy_cache/{sha256(build_root.path.encode()).hexdigest()}"
run_cache_dir = ".tmp_cache/mypy_cache"
argv = await _generate_argv(
mypy,
pex=mypy_pex,
venv_python=requirements_venv_pex.python.argv0,
cache_dir=run_cache_dir,
file_list_path=file_list_path,
python_version=py_version,
)
script_runner_digest = await Get(
Digest,
CreateDigest(
[
FileContent(
"__mypy_runner.sh",
dedent(
f"""\
# We want to leverage the MyPy cache for fast incremental runs of MyPy.
# Pants exposes "append_only_caches" we can leverage, but with the caveat
# that it requires either only appending files, or multiprocess-safe access.
#
# MyPy guarantees neither, but there's workarounds!
#
# By default, MyPy uses 2 cache files per source file, which introduces a
# whole slew of race conditions. We can minimize the race conditions by
# using MyPy's SQLite cache. MyPy still has race conditions when using the
# db, as it issues at least 2 single-row queries per source file at different
# points in time (therefore SQLite's own safety guarantees don't apply).
#
# To workaround this we make a copy of the db from the append_only_cache,
# run MyPy on it, then move the updated cache back to the append_only_cache.
# This is multiprocess-safe as mv on the same filesystem is an atomic "rename",
# and any processes copying the "old" file will still have valid file
# descriptors for the "old" file.
#
# There is a chance of multiple processes thrashing on the cache, leaving
# it in a state that doesn't reflect reality at the current point in time,
# and forcing other processes to do potentially done work. This strategy
# still provides a net benefit because the cache is generally _mostly_
# valid (it includes entries for the standard library, and 3rdparty deps,
# among 1stparty sources), and even in the worst case
# (every single file has changed) the overhead of missing the cache each
# query should be small when compared to the work being done of typechecking.
#
# Lastly, we expect that since this is run through Pants which attempts
# to partition MyPy runs by python version (which the DB is independent
# for different versions) and uses a one-process-at-a-time daemon by default,
# multuple MyPy processes operating on a single db cache should be rare.
{mkdir.path} -p {run_cache_dir}/{py_version} > /dev/null 2>&1 || true
{cp.path} {named_cache_dir}/{py_version}/cache.db {run_cache_dir}/{py_version}/cache.db > /dev/null 2>&1 || true
{' '.join((shell_quote(arg) for arg in argv))}
EXIT_CODE=$?
{mkdir.path} -p {named_cache_dir}/{py_version} > /dev/null 2>&1 || true
{mv.path} {run_cache_dir}/{py_version}/cache.db {named_cache_dir}/{py_version}/cache.db > /dev/null 2>&1 || true
exit $EXIT_CODE
"""
).encode(),
is_executable=True,
)
]
),
)
merged_input_files = await Get(
Digest,
MergeDigests(
[
file_list_digest,
first_party_plugins.sources_digest,
closure_sources.source_files.snapshot.digest,
requirements_venv_pex.digest,
config_file.digest,
script_runner_digest,
]
),
)
all_used_source_roots = sorted(
set(itertools.chain(first_party_plugins.source_roots, closure_sources.source_roots))
)
env = {
"PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
"MYPYPATH": ":".join(all_used_source_roots),
}
process = await Get(
Process,
VenvPexProcess(
mypy_pex,
input_digest=merged_input_files,
extra_env=env,
output_directories=(REPORT_DIR,),
description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
level=LogLevel.DEBUG,
append_only_caches={"mypy_cache": named_cache_dir},
),
)
process = dataclasses.replace(process, argv=("__mypy_runner.sh",))
result = await Get(FallibleProcessResult, Process, process)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return CheckResult.from_fallible_process_result(
result,
partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
report=report,
)
@rule(desc="Determine if necessary to partition MyPy input", level=LogLevel.DEBUG)
async def mypy_determine_partitions(
request: MyPyRequest, mypy: MyPy, python_setup: PythonSetup
) -> MyPyPartitions:
resolve_and_interpreter_constraints_to_coarsened_targets = (
await partition._by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
)
return MyPyPartitions(
MyPyPartition(
FrozenOrderedSet(roots),
FrozenOrderedSet(CoarsenedTargets(root_cts).closure()),
resolve if len(python_setup.resolves) > 1 else None,
interpreter_constraints or mypy.interpreter_constraints,
)
for (resolve, interpreter_constraints), (roots, root_cts) in sorted(
resolve_and_interpreter_constraints_to_coarsened_targets.items()
)
)
# TODO(#10864): Improve performance, e.g. by leveraging the MyPy cache.
@rule(desc="Typecheck using MyPy", level=LogLevel.DEBUG)
async def mypy_typecheck(request: MyPyRequest, mypy: MyPy) -> CheckResults:
if mypy.skip:
return CheckResults([], checker_name=request.name)
partitions = await Get(MyPyPartitions, MyPyRequest, request)
partitioned_results = await MultiGet(
Get(CheckResult, MyPyPartition, partition) for partition in partitions
)
return CheckResults(partitioned_results, checker_name=request.name)
def rules():
return [
*collect_rules(),
UnionRule(CheckRequest, MyPyRequest),
*pex_from_targets.rules(),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import socket
import email
import os
import mimetypes
ADDR = ("127.0.0.1", 8000)
_CRLF = b"\r\n"
def setup():
"""
Create new socket, and bind localhost to socket.
Set socket to listen, and return socket information.
"""
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP
)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(ADDR)
sock.listen(4)
return sock
def response_ok(_body, _type):
_date = email.Utils.formatdate(usegmt=True)
_RESPONSE_TEMPLATE = _CRLF.join([
b"HTTP/1.1 200 OK",
b"{date}",
b"Content-Type: {content_type}",
b"Content-Length: {content_length}",
b"",
b"{content_body}",
b"",
])
return _RESPONSE_TEMPLATE.format(
date=_date,
content_type=_type,
content_length='something',
content_body=_body
)
def response_error(header, text):
"""Return Header and Body information for three types of errors"""
_date = email.Utils.formatdate(usegmt=True)
response_headers = (
header +
_date +
b'Content-Type: text/html\r\n'
b'Content-Length:\r\n')
response_body = (
b'<html><body>'
b'<p>' + text + '</p>'
b'</body></html>')
return response_headers + response_body
def parse_request(request):
"""
NOTES:
Use `.split(< 2 CRLF's >, 1)` to separate the header chunk from the
body chunk, since they must always be separated by two CRLFs.
Additionally, the second optional argument to .split() is the number
of times to split. This way, we avoid dealing with a situation where
the body contains double CRLFs.
Then, for processing headers, use .split() without args, which will
strip at *any* intersituated whitespace, whereas .split(' ') will
split at *each individual* whitespace.
"""
client_req = request.split('\r\n')
meth = client_req[0].split(' ')
host = ''
for item in client_req:
if "Host: " in item:
host = item
if 'GET' != meth[0]:
raise NotImplementedError('That is not a valid GET request')
elif 'HTTP/1.1' != meth[2]:
raise NameError('That is not a valid HTTP/1.1 request')
elif 'Host: ' not in host:
raise ValueError('The required Host header is not present')
else:
return meth[1]
def resolve_uri(parse):
root = os.path.join(os.getcwd(), 'webroot')
body = ''
content_type = ''
if os.path.isdir(root + parse):
body = '<!DOCTYPE html><html><body><ul>'
for file_ in os.listdir(root + parse):
body += '<li>' + file_ + '</li>'
body += '</ul></body></html>'
content_type = 'text/html'
elif os.path.isfile(root + parse):
with open((root + parse), 'rb') as file_:
body = file_.read()
content_type, encoding = mimetypes.guess_type(parse)
else:
raise OSError
return (body, content_type)
def run_server():
"""
Create new instance of server, and begin accepting, logging,
and returning response.
"""
server = setup()
while True:
try:
conn, addr = server.accept()
msg = ''
while True:
"""
If response in msg, can use this to return Ok or Error
"""
msg_recv = conn.recv(4096)
msg += msg_recv
if len(msg_recv) < 4096:
try:
parsed_response = parse_request(msg)
body, content_type = resolve_uri(parsed_response)
server_response = response_ok(body, content_type)
except NotImplementedError:
server_response = response_error(
b"HTTP/1.1 405 Method Not Allowed\r\n",
b"GET method required.\r\n"
)
except NameError:
server_response = response_error(
b"HTTP/1.1 400 Bad Request\r\n",
b"Not a valid HTTP/1.1 request.\r\n"
)
except ValueError:
server_response = response_error(
b"HTTP/1.1 406 Not Acceptable\r\n",
b"'Host' header required.\r\n"
)
except OSError:
server_response = "resource not found"
conn.sendall(server_response)
conn.close()
break
print(msg)
except KeyboardInterrupt:
break
if __name__ == "__main__":
run_server()
"""RESP = ("HTTP/1.1 200 OK"
"Content-Type: text/plain"
""
"hello")
def echo(socket, address):
buffsize = 16
while True:
data = socket.recv(buffsize)
if len(data) < buffsize:
socket.sendall(RESP)
else:
socket.close()
break
if __name__ == '__main__':
from gevent.server import StreamServer
from gevent.monkey import patch_all
patch_all()
server = StreamServer(('127.0.0.1', 8000), echo)
print("starting server")
"""
content_length is automatically calculated
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import socket
import email
import os
import sys
import mimetypes
ADDR = ("127.0.0.1", 8000)
_CRLF = b"\r\n"
def setup():
"""
Create new socket, and bind localhost to socket.
Set socket to listen, and return socket information.
"""
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP
)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(ADDR)
sock.listen(4)
return sock
def response_ok(_body, _type):
_date = email.Utils.formatdate(usegmt=True)
_RESPONSE_TEMPLATE = _CRLF.join([
b"HTTP/1.1 200 OK",
b"{date}",
b"Content-Type: {content_type}",
b"Content-Length: {content_length}",
b"",
b"{content_body}",
b"",
])
return _RESPONSE_TEMPLATE.format(
date=_date,
content_type=_type,
content_length=str(sys.getsizeof(_body)),
content_body=_body
)
def response_error(header, text):
"""Return Header and Body information for three types of errors"""
_date = email.Utils.formatdate(usegmt=True)
response_headers = (
header +
_date +
b'Content-Type: text/html\r\n'
b'Content-Length:\r\n')
response_body = (
b'<html><body>'
b'<p>' + text + '</p>'
b'</body></html>')
return response_headers + response_body
def parse_request(request):
"""
NOTES:
Use `.split(< 2 CRLF's >, 1)` to separate the header chunk from the
body chunk, since they must always be separated by two CRLFs.
Additionally, the second optional argument to .split() is the number
of times to split. This way, we avoid dealing with a situation where
the body contains double CRLFs.
Then, for processing headers, use .split() without args, which will
strip at *any* intersituated whitespace, whereas .split(' ') will
split at *each individual* whitespace.
"""
client_req = request.split('\r\n')
meth = client_req[0].split(' ')
host = ''
for item in client_req:
if "Host: " in item:
host = item
if 'GET' != meth[0]:
raise NotImplementedError('That is not a valid GET request')
elif 'HTTP/1.1' != meth[2]:
raise NameError('That is not a valid HTTP/1.1 request')
elif 'Host: ' not in host:
raise ValueError('The required Host header is not present')
else:
return meth[1]
def resolve_uri(parse):
root = os.path.join(os.getcwd(), 'webroot')
body = ''
content_type = ''
if os.path.isdir(root + parse):
body = '<!DOCTYPE html><html><body><ul>'
for file_ in os.listdir(root + parse):
body += '<li>' + file_ + '</li>'
body += '</ul></body></html>'
content_type = 'text/html'
elif os.path.isfile(root + parse):
with open((root + parse), 'rb') as file_:
body = file_.read()
content_type, encoding = mimetypes.guess_type(parse)
else:
raise OSError
return (body, content_type)
def run_server():
"""
Create new instance of server, and begin accepting, logging,
and returning response.
"""
server = setup()
while True:
try:
conn, addr = server.accept()
msg = ''
while True:
"""
If response in msg, can use this to return Ok or Error
"""
msg_recv = conn.recv(4096)
msg += msg_recv
if len(msg_recv) < 4096:
try:
parsed_response = parse_request(msg)
body, content_type = resolve_uri(parsed_response)
server_response = response_ok(body, content_type)
except NotImplementedError:
server_response = response_error(
b"HTTP/1.1 405 Method Not Allowed\r\n",
b"GET method required.\r\n"
)
except NameError:
server_response = response_error(
b"HTTP/1.1 400 Bad Request\r\n",
b"Not a valid HTTP/1.1 request.\r\n"
)
except ValueError:
server_response = response_error(
b"HTTP/1.1 406 Not Acceptable\r\n",
b"'Host' header required.\r\n"
)
except OSError:
server_response = "resource not found"
conn.sendall(server_response)
conn.close()
break
print(msg)
except KeyboardInterrupt:
break
if __name__ == "__main__":
run_server()
"""RESP = ("HTTP/1.1 200 OK"
"Content-Type: text/plain"
""
"hello")
def echo(socket, address):
buffsize = 16
while True:
data = socket.recv(buffsize)
if len(data) < buffsize:
socket.sendall(RESP)
else:
socket.close()
break
if __name__ == '__main__':
from gevent.server import StreamServer
from gevent.monkey import patch_all
patch_all()
server = StreamServer(('127.0.0.1', 8000), echo)
print("starting server")
"""
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 12:13:10 2015
@author: Dr. Dmitry A. Duev
"""
import cherrypy
from cherrypy.lib import auth_digest
from jinja2 import Environment, FileSystemLoader
import xml.etree.ElementTree as ET
from xml.dom import minidom
from xml.etree.ElementTree import Element
import os
import shutil
import json
from collections import OrderedDict
from dicttoxml import dicttoxml
import astropy
from astropy.table import Table
from astropy.io.votable import parse_single_table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from astropy import units as u
import numpy as np
env = Environment(loader=FileSystemLoader('templates'))
def filter_names(_fin='filters.txt'):
""" Only these filter names should be used
:param _fin:
:return:
"""
with open(_fin, 'r') as f:
f_lines = f.readlines()
return [l.split()[0] for l in f_lines]
def getModefromMag(mag):
'''
VICD mode depending on the object magnitude
'''
m = float(mag)
if m < 8:
mode = '6'
elif 8 <= m < 10:
mode = '7'
elif 10 <= m < 12:
mode = '8'
elif 12 <= m < 13:
mode = '9'
elif m >= 13:
mode = '10'
return mode
def parse_csv(csv):
f_lines = csv.read()
f_lines = [l for l in f_lines.split('\r') if len(l)>0]
colnames = [l for l in f_lines[0].split(',') if len(l)>0]
data_raw = np.array([l.split(',') for l in f_lines[1:]])
data_od = OrderedDict()
for i,c in enumerate(colnames):
data_od[c] = data_raw[:,i]
# data = Table({c:data_raw[:,i] for i,c in enumerate(colnames)})
data = Table(data_od, names=colnames)
return data
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
class xmlTree(object):
'''
Class for handling xml files for Programs and Targets
'''
def __init__(self, path):
self.path = path
def getPrograms(self, programs_xml='Programs.xml'):
self.programs_xml = programs_xml
try:
# parse Programs.xml:
tree = ET.parse(os.path.join(self.path, self.programs_xml))
self.root = tree.getroot()
except:
# file does not exist or empty? create 'template' then:
with open(os.path.join(self.path, self.programs_xml), 'w') as f:
f.write('<root>\n</root>')
self.Programs = []
return self.Programs
# list for keeping track of programs:
self.Programs = []
fix_xml = False
for program in self.root:
prog = {}
for content in program:
prog[content.tag] = content.text
# do not trust Programs.xml - count the number of targets!
number_of_targets = \
str(self.getProgramNumberOfTargets(prog['number']))
# build program object:
p = Program(number=prog['number'],
name=prog['name'],
person_name=prog['person_name'],
scientific_importance=prog['scientific_importance'],
number_of_targets=number_of_targets,
counter=prog['counter'],
total_observation_time=prog['total_observation_time'],
total_science_time=prog['total_science_time'])
# now append:
self.Programs.append(p)
# edit Program.xml?
if number_of_targets != prog['number_of_targets']:
print 'Fixed wrong number of targets for program {:s}'\
.format(prog['name']) + ' in Programs.xml.'
fix_xml = True
# edit Program.xml if necessary:
if fix_xml:
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
return self.Programs
def getProgramNumberOfTargets(self, program_number):
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program_number))
pnot = len([f for f in os.listdir(target_xml_path)
if 'Target_' in f and f[0] != '.'])
return pnot
def getProgramNames(self):
return [p.name for p in self.Programs]
def getProgramNumbers(self):
return [p.number for p in self.Programs]
@staticmethod
def buildProgramXMLtree(programs):
"""
:param programs:
:return:
"""
root = ET.Element("root")
for program in programs:
prog = ET.SubElement(root, "Program")
ET.SubElement(prog, "number").text = program.number
ET.SubElement(prog, "name").text = program.name
ET.SubElement(prog, "person_name").text = program.person_name
ET.SubElement(prog, "scientific_importance").text = \
program.scientific_importance
ET.SubElement(prog, "number_of_targets").text = \
program.number_of_targets
ET.SubElement(prog, "counter").text = program.counter
ET.SubElement(prog, "total_observation_time").text = \
program.total_observation_time
ET.SubElement(prog, "total_science_time").text = \
program.total_science_time
tree = ET.ElementTree(root)
return tree
def dumpProgram(self, program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new='0'):
"""
:param program_number:
:param name:
:param number:
:param person_name:
:param scientific_importance:
:param number_of_targets:
:param counter:
:param total_observation_time:
:param total_science_time:
:param new:
:return:
"""
try:
self.Programs
except:
self.getPrograms()
programNumbers = self.getProgramNumbers()
# trying to add a program with an already existing number? atata!!
if number in programNumbers and new=='1':
return False
if (program_number not in programNumbers or program_number=='') and \
new == '1':
# add new program
self.Programs.append(Program(number, name, person_name,
scientific_importance, number_of_targets, counter,
total_observation_time, total_science_time))
else:
# change existing
ind = [i for i, p in enumerate(self.Programs)
if p.number == program_number][0]
self.Programs[ind] = Program(number, name, person_name,
scientific_importance, number_of_targets, counter,
total_observation_time, total_science_time)
# rename folder containing target xml files if program_number changed
if program_number not in ('', number):
os.rename(os.path.join(self.path,
'Program_{:s}'.format(program_number)),
os.path.join(self.path, 'Program_{:s}'.format(number)))
# update all target program_numbers
self.batchEditTargets(self.Programs[ind], program_number=number)
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
# make is pretty:
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
# create a directory for storing target xml-files
target_xml_dir = os.path.join(self.path, 'Program_{:s}'.format(number))
if not os.path.exists(target_xml_dir):
os.makedirs(target_xml_dir)
# update self.Programs:
self.Programs = self.getPrograms()
return True
def removeProgram(self, name=None):
""" Remove a program
:param name:
:return:
"""
if name is None or name == '':
return {}
try:
self.Programs
except:
self.getPrograms()
# remove from self.Programs and Programs.xml
try:
program = [p for p in self.Programs if p.name == name][0]
self.Programs.remove(program)
except:
return {}
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
# f.write(minidom.parseString(ET.tostring(tree.getroot(),
# 'utf-8')).toprettyxml(indent="\t"))
# remove dir Program_number with target xml files
target_xml_dir = os.path.join(self.path,
'Program_{:s}'.format(program.number))
if os.path.exists(target_xml_dir):
shutil.rmtree(target_xml_dir)
# update self.Programs (just to make sure)
self.Programs = self.getPrograms()
return {}
def removeTarget(self, program_number=None, target_number=None):
""" Remove a target from a program
:param program_number:
:param target_number:
:return:
"""
if program_number is None or program_number == '' or\
target_number is None or target_number == '':
return {}
try:
self.Programs
except:
self.Programs = self.getPrograms()
program = [p for p in self.Programs if p.number==program_number][0]
target_xml = 'Target_{:s}.xml'.format(target_number)
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml)
if os.path.exists(target_xml_path):
os.remove(target_xml_path)
# rename remaining xml files:
for i in range(int(target_number)+1, int(program.number_of_targets)+1):
target_xml_old = 'Target_{:d}.xml'.format(i)
target_xml_old_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml_old)
target_xml_new = 'Target_{:d}.xml'.format(i-1)
target_xml_new_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml_new)
os.rename(target_xml_old_path, target_xml_new_path)
# update self.Programs
self.Programs = self.getPrograms()
return {}
def getTargets(self, program, target_list_xml=None):
'''
Get a list of targets (each being a dict) for a given program
'''
if target_list_xml is None:
target_list_xml = ['Target_{:d}.xml'.format(i+1)
for i in range(int(program.number_of_targets))]
# list for keeping track of targets for each program:
try:
self.Targets
except:
self.Targets = {}
self.Targets[program.name] = []
for target_xml in target_list_xml:
tree = ET.parse(os.path.join(self.path,
'Program_{:s}'.format(program.number), target_xml))
root = tree.getroot()
targ = {}
targ['Object'] = []
for content in root:
if content.tag != 'Object':
targ[content.tag] = content.text
else:
obj = {}
obj['Observation'] = []
for data_obj in content:
if data_obj.tag != 'Observation':
obj[data_obj.tag] = data_obj.text
else:
obs = {}
for data_obs in data_obj:
obs[data_obs.tag] = data_obs.text
obj['Observation'].append(obs)
targ['Object'].append(obj)
self.Targets[program.name].append(targ)
# print 'N_targ = {:d}'.format(len(self.Targets[program.name]))
return self.Targets[program.name]
def getTargetNames(self, program):
""" Get target names for program
Each target must have a unique name!
:param program:
:return:
"""
try:
self.Targets[program.name]
except:
self.getTargets(program)
targetNames = [t['name'] for t in self.Targets[program.name]]
return targetNames
def batchEditTargets(self, program, time_critical_flag="",
visited_times_for_completion="", seeing_limit="",
cadence="", obj_solar_system="", obj_epoch="",
obj_sun_altitude_limit="",
obj_moon_phase_window="",
obj_airmass_limit="",
obj_sun_distance_limit="",
obj_moon_distance_limit="",
obj_sky_brightness_limit="",
obj_hour_angle_limit="",
obs_exposure_time="", obs_ao_flag="",
obs_filter_code="", obs_camera_mode="",
obs_repeat_times="",
program_number=""):
target_list_xml = ['Target_{:d}.xml'.format(i+1)
for i in range(int(program.number_of_targets))]
for target_xml in target_list_xml:
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number), target_xml)
tree = ET.parse(target_xml_path)
root = tree.getroot()
if program_number != "":
# does the tag exist? if not, create
if root.find('program_number') is None:
root.append(Element('program_number'))
tag = root.find('program_number')
tag.text = program_number
if time_critical_flag != "":
# does the tag exist? if not, create
if root.find('time_critical_flag') is None:
root.append(Element('time_critical_flag'))
tag = root.find('time_critical_flag')
tag.text = time_critical_flag
if visited_times_for_completion != "":
# does the tag exist? if not, create
if root.find('visited_times_for_completion') is None:
root.append(Element('visited_times_for_completion'))
tag = root.find('visited_times_for_completion')
tag.text = visited_times_for_completion
if seeing_limit != "":
# does the tag exist? if not, create
if root.find('seeing_limit') is None:
root.append(Element('seeing_limit'))
tag = root.find('seeing_limit')
tag.text = seeing_limit
if cadence != "":
# does the tag exist? if not, create
if root.find('cadence') is None:
root.append(Element('cadence'))
tag = root.find('cadence')
tag.text = cadence
# iterate over Objects:
objs = root.findall('Object')
for obj in objs:
if obj_solar_system != "":
# does the tag exist? if not, create
if obj.find('solar_system') is None:
obj.append(Element('solar_system'))
tag = obj.find('solar_system')
tag.text = obj_solar_system
if obj_epoch != "":
# does the tag exist? if not, create
if obj.find('epoch') is None:
obj.append(Element('epoch'))
tag = obj.find('epoch')
tag.text = obj_epoch
if obj_sun_altitude_limit != "":
# does the tag exist? if not, create
if obj.find('sun_altitude_limit') is None:
obj.append(Element('sun_altitude_limit'))
tag = obj.find('sun_altitude_limit')
tag.text = obj_sun_altitude_limit
if obj_moon_phase_window != "":
# does the tag exist? if not, create
if obj.find('moon_phase_window') is None:
obj.append(Element('moon_phase_window'))
tag = obj.find('moon_phase_window')
tag.text = obj_moon_phase_window
if obj_airmass_limit != "":
# does the tag exist? if not, create
if obj.find('airmass_limit') is None:
obj.append(Element('airmass_limit'))
tag = obj.find('airmass_limit')
tag.text = obj_airmass_limit
if obj_sun_distance_limit != "":
# does the tag exist? if not, create
if obj.find('sun_distance_limit') is None:
obj.append(Element('sun_distance_limit'))
tag = obj.find('sun_distance_limit')
tag.text = obj_sun_distance_limit
if obj_moon_distance_limit != "":
# does the tag exist? if not, create
if obj.find('moon_distance_limit') is None:
obj.append(Element('moon_distance_limit'))
tag = obj.find('moon_distance_limit')
tag.text = obj_moon_distance_limit
if obj_sky_brightness_limit != "":
# does the tag exist? if not, create
if obj.find('sky_brightness_limit') is None:
obj.append(Element('sky_brightness_limit'))
tag = obj.find('sky_brightness_limit')
tag.text = obj_sky_brightness_limit
if obj_hour_angle_limit != "":
# does the tag exist? if not, create
if obj.find('hour_angle_limit') is None:
obj.append(Element('hour_angle_limit'))
tag = obj.find('hour_angle_limit')
tag.text = obj_hour_angle_limit
# iterate over Observations:
obss = obj.findall('Observation')
for obs in obss:
if obs_exposure_time != "":
# does the tag exist? if not, create
if obs.find('exposure_time') is None:
obs.append(Element('exposure_time'))
tag = obs.find('exposure_time')
tag.text = obs_exposure_time
if obs_ao_flag != "":
# does the tag exist? if not, create
if obs.find('ao_flag') is None:
obs.append(Element('ao_flag'))
tag = obs.find('ao_flag')
tag.text = obs_ao_flag
if obs_filter_code != "":
# does the tag exist? if not, create
if obs.find('filter_code') is None:
obs.append(Element('filter_code'))
tag = obs.find('filter_code')
if obs_filter_code in filter_names():
tag.text = obs_filter_code
else:
tag.text = 'FILTER_SLOAN_I'
if obs_camera_mode != "":
# does the tag exist? if not, create
if obs.find('camera_mode') is None:
obs.append(Element('camera_mode'))
tag = obs.find('camera_mode')
tag.text = obs_camera_mode
if obs_repeat_times != "":
# does the tag exist? if not, create
if obs.find('repeat_times') is None:
obs.append(Element('repeat_times'))
tag = obs.find('repeat_times')
tag.text = obs_repeat_times
# save updated file:
# with open(target_xml_path, 'w') as f:
# out = minidom.parseString(ET.tostring(tree.getroot(), \
# 'utf-8').replace(' ', ' ')).toprettyxml(indent='', newl='')
# f.write(out.replace('<?xml version="1.0" ?>', ''))
# build an xml-file:
# this is good enough, but adds unnecessary <item> tags. remove em:
target_xml = minidom.parseString(ET.tostring(tree.getroot(), \
'utf-8')).toprettyxml()
# <item>'s left extra \t's after them - remove them:
# target_xml = target_xml.replace('\t\t\t','\t\t')
# target_xml = target_xml.replace('\t\t\t\t','\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t and
not t.isspace() and '/>' not in t]
# print target_xml_path
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
def dumpTarget(self, program, target_number, target):
""" Edit or create target xml file
:param program:
:param target_number:
:param target:
:return:
"""
if target_number == '':
targets_added = 1
# program not empty?
if int(program.number_of_targets) != 0:
# get targets
targets = self.getTargets(program)
# get max target number. when adding new tragets, start from max_number+1
max_number = max([int(t['number']) for t in targets])
else:
max_number = 0
target_number = max_number+1
target['number'] = str(target_number)
else:
targets_added = 0
target_number = int(target_number)
# file Target_*.xml number need not be = target_number from the xml file
xml_file_number = target_number
# build an xml-file:
target_xml = dicttoxml(target, custom_root='Target', attr_type=False)
# this is good enough, but adds unnecessary <item> tags. remove em:
dom = minidom.parseString(target_xml)
target_xml = dom.toprettyxml()
# <item>'s left extra \t's after them - remove them:
target_xml = target_xml.replace('\t\t\t', '\t\t')
target_xml = target_xml.replace('\t\t\t\t', '\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t]
# deal with missing <Object>s and <Observation>s:
# xml_out = []
# for line in target_xml[1:-1]:
# xml_out.append('{:s}\n'.format(line))
# xml_out.append('{:s}'.format(target_xml[-1]))
ind_obs_start = [i for i, v in enumerate(target_xml) if '<Observation>' in v]
ind_obs_stop = [i for i, v in enumerate(target_xml) if '</Observation>' in v]
for (start, stop) in zip(ind_obs_start, ind_obs_stop):
ind_num_obs = [i+start for i, v in enumerate(target_xml[start:stop]) \
if '<number>' in v]
if len(ind_num_obs) > 1:
for ind in ind_num_obs[:0:-1]:
target_xml.insert(ind, '\t\t</Observation>\n\t\t<Observation>')
ind_obj= [i for i,v in enumerate(target_xml) if v[:10] == '\t\t<number>']
for ind in ind_obj[:0:-1]:
target_xml.insert(ind, '\t</Object>\n\t<Object>')
# print target_xml
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number),
'Target_{:d}.xml'.format(xml_file_number))
target_xml = [t for t in target_xml if '/>' not in t]
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
# update program number of targets!!
self.dumpProgram(program.number,
program.name, program.number, program.person_name,
program.scientific_importance,
str(int(program.number_of_targets)+targets_added),
program.counter, program.total_observation_time,
program.total_science_time)
cherrypy.log('Target_{:d}.xml edited/created in Program_{:s}'.
format(xml_file_number, program.number))
def dumpTargetList(self, program, data):
""" List from an external file
:param program:
:param data:
:return:
"""
# program not empty?
if int(program.number_of_targets)!=0:
# get targets
targets = self.getTargets(program)
# get max target number. when adding new tragets, start from max_number+1
max_number = max([int(t['number']) for t in targets])
else:
max_number = 0
# get existing target names:
targetNames = self.getTargetNames(program)
targets_added = 0
# do some guessing about the table
if type(data) == astropy.io.votable.tree.Table:
# VOtable? convert to normal table
table = data.to_table()
else:
table = data
# target names:
target_name_field = [table[f].name for f in table.colnames
if 'name' in table[f].name or
(table[f].description is not None and
'name' in table[f].description)][0]
if type(table[target_name_field].data) is not np.ndarray:
target_names_list = table[target_name_field].data.data
else:
target_names_list = table[target_name_field].data
# RA/Dec:
if '_RAJ2000' in table.colnames and '_DEJ2000' in table.colnames:
# no columns? add them:
ra = [ra.replace(' ', ':') for ra in table['_RAJ2000']]
dec = [dec.replace(' ', ':') for dec in table['_DEJ2000']]
else:
# ra/dec in degrees?
if 'RAJ2000' not in table.colnames or 'DEJ2000' not in table.colnames:
raise Exception('Could not find coordinates in the imported file')
else:
# get units:
if table['RAJ2000'].description is not None and \
not 'degree' in table['RAJ2000'].description:
raise Exception('RAJ2000 must be in degrees!')
else:
ra_deg = table['RAJ2000']
dec_deg = table['DEJ2000']
crd = SkyCoord(ra=ra_deg, dec=dec_deg,
unit=(u.deg, u.deg), frame='icrs').to_string('hmsdms')
crd_str = np.array([c.split() for c in crd])
ra = [ra.replace('h', ':').replace('m', ':').replace('s', '')
for ra in crd_str[:, 0]]
dec = [dec.replace('d',':').replace('m', ':').replace('s', '')
for dec in crd_str[:, 1]]
# no comments?
if 'comment' in table.colnames:
if type(table['comment'].data) is not np.ndarray:
comment = table['comment'].data.data
else:
comment = table['comment'].data
if max([len(cm) for cm in comment]) == 0:
comment = ['None']*len(target_names_list)
else:
comment = ['None']*len(target_names_list)
# Vmag/Vemag/mag
if 'Vmag' in table.colnames:
mag = table['Vmag'].data.tolist()
elif 'Vemag' in table.colnames:
mag = table['Vemag'].data.tolist()
elif 'mag' in table.colnames:
mag = table['mag'].data.tolist()
# epoch should be J2000?
if 'epoch' in table.colnames:
epoch = table['epoch'].data.tolist()
else:
epoch = ['2000.0']*len(target_names_list)
# (multiple) exposures/filters
exp_cols = [i for i, v in enumerate(table.colnames) if 'exposure' in v]
filter_cols = [i for i, v in enumerate(table.colnames) if 'filter' in v]
if (len(exp_cols) != 0 and len(filter_cols) != 0) \
and len(exp_cols) != len(filter_cols):
raise Exception('num of exposures must be equal to num of filters')
obs_num = max(len(exp_cols), len(filter_cols))
if obs_num == 0:
exp = [['90']]*len(target_names_list)
filt = [['FILTER_SLOAN_I']]*len(target_names_list)
else:
if len(exp_cols) == 0:
exp = [['90']*obs_num]*len(target_names_list)
else:
# get all exposure columns:
exp = \
list(np.array(table[list(np.array(table.colnames)[exp_cols])]))
if len(filter_cols) == 0:
filt = [['FILTER_SLOAN_I']*obs_num]*len(target_names_list)
else:
# get all filter columns:
filt = \
list(np.array(table[list(np.array(table.colnames)[filter_cols])]))
# iterate over entries in the parsed table
for ei, _ in enumerate(table):
# target name must be unique! check it and skip entry if necessary:
if target_names_list[ei] in targetNames:
continue
else:
# count added targets:
targets_added += 1
# Ordnung muss sein!
target = OrderedDict([('program_number', program.number),
('number', str(max_number+targets_added)),
('name', str(target_names_list[ei])),
('time_critical_flag', '0'),
('visited_times_for_completion', '1'),
('seeing_limit', ''), ('visited_times', '0'), ('done', '0'),
('cadence', '0'), ('comment', str(comment[ei])),
('Object', [])])
target['Object'].append(OrderedDict([('number', '1'),
('RA', ra[ei]), ('dec', dec[ei]),
('epoch', epoch[ei]), ('magnitude', mag[ei]),
('sun_altitude_limit', ''), ('moon_phase_window', ''),
('airmass_limit', ''), ('sun_distance_limit', ''),
('moon_distance_limit', ''), ('sky_brightness_limit', ''),
('hour_angle_limit', ''), ('done', '0'),
('Observation',[])]))
for oi, obj in enumerate(target['Object']):
for obsi in range(max(obs_num, 1)):
if str(filt[ei][obsi]) in filter_names():
target['Object'][oi]['Observation'].append(
OrderedDict([('number', '{:d}'.format(obsi+1)),
('exposure_time', exp[ei][obsi]), ('ao_flag', '1'),
('filter_code', str(filt[ei][obsi])),
('camera_mode', getModefromMag(mag[ei])),
('repeat_times', '1'),
('repeated', '0'), ('done', '0')]))
else:
target['Object'][oi]['Observation'].append(
OrderedDict([('number', '{:d}'.format(obsi+1)),
('exposure_time', exp[ei][obsi]), ('ao_flag', '1'),
('filter_code', 'FILTER_SLOAN_I'),
('camera_mode', getModefromMag(mag[ei])),
('repeat_times', '1'),
('repeated', '0'), ('done', '0')]))
# build an xml-file:
target_xml = dicttoxml(target, custom_root='Target', attr_type=False)
# this is good enough, but adds unnecessary <item> tags. remove em:
dom = minidom.parseString(target_xml)
target_xml = dom.toprettyxml()
# <item>'s left extra \t's after them - remove them:
target_xml = target_xml.replace('\t\t\t','\t\t')
target_xml = target_xml.replace('\t\t\t\t','\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t
and '/>' not in t]
ind_obs_start = [i for i,v in enumerate(target_xml) if '<Observation>' in v]
ind_obs_stop = [i for i,v in enumerate(target_xml) if '</Observation>' in v]
for (start, stop) in zip(ind_obs_start, ind_obs_stop):
ind_num_obs = [i+start for i,v in enumerate(target_xml[start:stop]) \
if '<number>' in v]
if len(ind_num_obs) > 1:
for ind in ind_num_obs[:0:-1]:
target_xml.insert(ind, '\t\t</Observation>\n\t\t<Observation>')
# print target_xml
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number),
'Target_{:d}.xml'.format(
int(program.number_of_targets)+targets_added))
# print target_xml_path
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
# update program number of targets!!
self.dumpProgram(program.number,
program.name, program.number, program.person_name,
program.scientific_importance,
str(int(program.number_of_targets)+targets_added),
program.counter, program.total_observation_time,
program.total_science_time)
#@cherrypy.popargs('name')
class Root(object):
def __init__(self, path_to_queue):
self.path_to_queue = path_to_queue
# URL dispatcher
def _cp_dispatch(self, vpath):
if len(vpath) == 1:
cherrypy.request.params['prog_number'] = vpath.pop()
return self
# if len(vpath) == 2:
# cherrypy.request.params['targets'] = vpath.pop(0) # /band name/
# return self
return vpath
@cherrypy.expose
def index(self, prog_number=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if prog_number is None:
# render programs:
tmpl = env.get_template('index.html')
return tmpl.render(programs=programs)
# display Programs.xml:
elif prog_number == 'Programs.xml':
# render Programs.xml:
with open(os.path.join(xmlT.path, prog_number), 'r') as f:
page = ''.join('{:s}'.format(line) for line in f)
page = '<pre>'+page+'</pre>'
return page
# # display Target_n.xml:
# elif 'Program' in prog_number and 'Target' in prog_number \
# and 'xml' in prog_number:
# pass
# page = ''
# return page
else:
#render targets
# get program:
try:
number = prog_number.split('_')[1]
program = [p for p in programs if p.number==number][0]
except:
raise Exception('Program_{:s} not found.'.format(number))
# from time import time as _time
# tic = _time()
targets = xmlT.getTargets(program)
# print 'getting targets took {:f} seconds'.format(_time()-tic)
# populate template
# tic = _time()
tmpl = env.get_template('targets.html')
# print 'loading template took {:f} seconds'.format(_time()-tic)
# tic = _time()
# tmpl.render(targets=targets, programName=program.name,
# programNumber=program.number)
# print 'rendering template took {:f} seconds'.format(_time()-tic)
return tmpl.render(targets=targets, programName=program.name,
programNumber=program.number)
# request and receive Program params in json format
@cherrypy.expose
def prog_param(self, program_name=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if program_name is not None:
# construct json object:
prog = [p for p in programs if p.name == program_name]
# found?
if len(prog)>0:
json_obj = prog[0].makeJSON()
return json_obj
else:
return {}
else:
return {}
# request and receive Target params in json format
@cherrypy.expose
def targ_param(self, program_number=None, target_number=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if program_number is not None and \
(target_number is not None and target_number!=""):
# construct json object:
prog = [p for p in programs if p.number==program_number]
# found?
if len(prog)>0:
target_xml = ['Target_{:d}.xml'.format(int(target_number))]
target_dict = xmlT.getTargets(program=prog[0],
target_list_xml=target_xml)[0]
json_obj = json.dumps(target_dict)
# print json_obj
return json_obj
else:
return {}
else:
return {}
# save new/edited Program
@cherrypy.expose
def save(self, program_number=None,
name=None, number=None, person_name=None,
scientific_importance=None, number_of_targets=None,
counter=None, total_observation_time=None,
total_science_time=None, new=None):
# bad input:
if None in (program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new) or\
(name == '' or number == '' or person_name == '' or
scientific_importance == ''):
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# save program:
status = xmlT.dumpProgram(program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new)
if status == False:
cherrypy.log('Failed to create program. Program_{:s} already exists'.
format(number))
else:
cherrypy.log('Succesfully processed Program_{:s}'.format(number))
return {}
# remove new/edited Program
@cherrypy.expose
def remove(self, program_name=None):
# bad input:
if program_name is None:
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# get program names:
xmlT.removeProgram(program_name)
return {}
@cherrypy.expose
def targetBatchUpdate(self, program_number, time_critical_flag="",
visited_times_for_completion="", seeing_limit="",
cadence="", obj_solar_system="", obj_epoch="",
obj_sun_altitude_limit="",
obj_moon_phase_window="",
obj_airmass_limit="",
obj_sun_distance_limit="",
obj_moon_distance_limit="",
obj_sky_brightness_limit="",
obj_hour_angle_limit="",
obs_exposure_time="", obs_ao_flag="",
obs_filter_code="", obs_camera_mode="",
obs_repeat_times=""):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==program_number][0]
xmlT.batchEditTargets(program, time_critical_flag,
visited_times_for_completion, seeing_limit,
cadence, obj_solar_system, obj_epoch, obj_sun_altitude_limit,
obj_moon_phase_window, obj_airmass_limit,
obj_sun_distance_limit, obj_moon_distance_limit,
obj_sky_brightness_limit, obj_hour_angle_limit,
obs_exposure_time, obs_ao_flag,
obs_filter_code, obs_camera_mode, obs_repeat_times)
return {}
@cherrypy.expose
def importTargetList(self, targetList, program_number):
# read in in chunks:
# size = 0
# while True:
# data = targetList.file.read(8192)
# if not data:
# break
# size += len(data)
# # read everything in one go:
# data = targetList.file.readlines()
# # skip comments:
# data = [d for d in data if d[0]!='#']
# let's build an astropy table following Vizier's
# column naming convention
# is it a VOtable?
if targetList.filename[-3:]=='vot':
data = parse_single_table(targetList.file)
# Becky, is that you?
elif targetList.filename[-3:]=='csv':
data = parse_csv(targetList.file)
# if not - it must be readable by astropy.io.ascii
else:
# data = ascii.read(targetList.file)
# first line - header with column names
# then - data. if multiple filters/exposures, add _N at end
data = ascii.read(targetList.file, header_start=0, data_start=1)
# load Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==program_number][0]
xmlT.dumpTargetList(program, data)
raise cherrypy.HTTPRedirect('/Program_{:s}'.format(program_number))
@cherrypy.expose
def targetUpdate(self, **kargs):
'''
Update Target xml file
'''
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==kargs['program_number']][0]
nObj = len([k for k, v in kargs.iteritems() if 'obj_number' in k])
nObs = [len([k for k, v in kargs.iteritems()
if 'obs_number_{:d}'.format(i+1) in k]) for i in range(nObj)]
# print nObj,nObs
# make target dict:
target = OrderedDict((('program_number', kargs['program_number']),
('number', kargs['number']),
('name', kargs['name']),
('visited_times_for_completion', kargs['visited_times_for_completion']),
('seeing_limit', kargs['seeing_limit']),
('visited_times', kargs['visited_times']),
('done', kargs['done']),
('cadence', kargs['cadence']),
('comment', kargs['comment']),
('time_critical_flag', kargs['time_critical_flag']),
('Object', [])))
obj_numbers = sorted([s[-s[::-1].index('_'):] for s in kargs if 'obj_number' in s])
obs_numbers = [ sorted([s[-s[::-1].index('_'):] for s in kargs \
if 'obs_number_{:d}'.format(ii+1) in s]) for ii in range(nObj)]
for nOj in range(nObj):
# fix number if necessary
target['Object'].append(OrderedDict((
('number', nOj+1),
('RA', kargs['obj_RA_{:s}'.format(obj_numbers[nOj])]),
('dec', kargs['obj_dec_{:s}'.format(obj_numbers[nOj])]),
('ra_rate', kargs['obj_ra_rate_{:s}'.format(obj_numbers[nOj])]),
('dec_rate', kargs['obj_dec_rate_{:s}'.format(obj_numbers[nOj])]),
('epoch', kargs['obj_epoch_{:s}'.format(obj_numbers[nOj])]),
('magnitude', kargs['obj_magnitude_{:s}'.format(obj_numbers[nOj])]),
('sun_altitude_limit',
kargs['obj_sun_altitude_limit_{:s}'.format(obj_numbers[nOj])]),
('moon_phase_window',
kargs['obj_moon_phase_window_{:s}'.format(obj_numbers[nOj])]),
('airmass_limit',
kargs['obj_airmass_limit_{:s}'.format(obj_numbers[nOj])]),
('sun_distance_limit',
kargs['obj_sun_distance_limit_{:s}'.format(obj_numbers[nOj])]),
('moon_distance_limit',
kargs['obj_moon_distance_limit_{:s}'.format(obj_numbers[nOj])]),
('sky_brightness_limit',
kargs['obj_sky_brightness_limit_{:s}'.format(obj_numbers[nOj])]),
('hour_angle_limit',
kargs['obj_hour_angle_limit_{:s}'.format(obj_numbers[nOj])]),
('done', kargs['obj_done_{:s}'.format(obj_numbers[nOj])]),
('Observation', [])
)))
for nOs in range(nObs[nOj]):
# fix number if necessary
target['Object'][nOj]['Observation'].append(OrderedDict((
('number', nOs+1),
('exposure_time', kargs['obs_exposure_time_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('ao_flag', kargs['obs_ao_flag_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('filter_code', kargs['obs_filter_code_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('camera_mode', kargs['obs_camera_mode_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('repeat_times', kargs['obs_repeat_times_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('repeated', kargs['obs_repeated_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('done', kargs['obs_done_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])])
)))
# print target
xmlT.dumpTarget(program, kargs['target_number'], target)
# save new/edited Program
@cherrypy.expose
def removeTarget(self, program_number=None, target_number=None):
# bad input:
if program_number is None or program_number == "" \
or target_number is None or target_number == "":
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# get program names:
xmlT.removeTarget(program_number, target_number)
cherrypy.log('removed Target_{:s}.xml from Program_{:s}'.
format(target_number, program_number))
cherrypy.log('Note that remaining target xml files were ranaimed if ' +
'target_number<number_of_targets to keep file numbering order')
return {}
class Program(object):
def __init__(self, number, name, person_name, scientific_importance,
number_of_targets, counter, total_observation_time,
total_science_time):
self.number = number
self.name = name
self.person_name = person_name
self.scientific_importance = scientific_importance
self.number_of_targets = number_of_targets
self.counter = counter
self.total_observation_time = total_observation_time
self.total_science_time = total_science_time
def makeJSON(self):
dic = OrderedDict([['number', self.number],
['name', self.name],
['person_name', self.person_name],
['scientific_importance', self.scientific_importance],
['number_of_targets', self.number_of_targets],
['counter', self.counter],
['total_observation_time', self.total_observation_time],
['total_science_time', self.total_science_time]])
return json.dumps(dic)
if __name__ == '__main__':
# cherrypy.quickstart(Root())
USERS = {'admin': 'robo@0'}
def validate_password(realm, username, password):
if username in USERS and USERS[username] == password:
return True
return False
cherrypy.config.update({'server.socket_host': '0.0.0.0',
'server.socket_port': 8081,
'server.thread_pool': 8,
'log.access_file': 'server_access.log',
'log.error_file': 'server_actions.log'
})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd()),
# 'tools.auth_digest.on': True,
# 'tools.auth_digest.realm': 'hola!',
# 'tools.auth_digest.get_ha1': auth_digest.get_ha1_dict_plain(USERS),
# 'tools.auth_digest.key': 'd8765asdf6c787ag333'
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password
},
'/static': {
'tools.staticdir.on': True,
# 'tools.staticdir.dir': os.path.join(os.path.abspath(os.getcwd()), 'public')
'tools.staticdir.dir': './public',
# 'tools.auth_digest.on': True,
# 'tools.auth_digest.realm': 'hola!',
# 'tools.auth_digest.get_ha1': auth_digest.get_ha1_dict_plain(USERS),
# 'tools.auth_digest.key': 'd8765asdf6c787ag333'
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password
}
}
# path_to_queue = './'
# path_to_queue = '/Users/dmitryduev/_caltech/roboao/Queue/'
path_to_queue = '/Users/dmitryduev/web/qserv/operation/'
# path_to_queue = '/Users/dmitryduev/web/qserv/operation-current/'
cherrypy.quickstart(Root(path_to_queue), '/', conf)
fixed bug: solar_system tag when editing/adding a target
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 12:13:10 2015
@author: Dr. Dmitry A. Duev
"""
import cherrypy
from cherrypy.lib import auth_digest
from jinja2 import Environment, FileSystemLoader
import xml.etree.ElementTree as ET
from xml.dom import minidom
from xml.etree.ElementTree import Element
import os
import shutil
import json
from collections import OrderedDict
from dicttoxml import dicttoxml
import astropy
from astropy.table import Table
from astropy.io.votable import parse_single_table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from astropy import units as u
import numpy as np
env = Environment(loader=FileSystemLoader('templates'))
def filter_names(_fin='filters.txt'):
""" Only these filter names should be used
:param _fin:
:return:
"""
with open(_fin, 'r') as f:
f_lines = f.readlines()
return [l.split()[0] for l in f_lines]
def getModefromMag(mag):
'''
VICD mode depending on the object magnitude
'''
m = float(mag)
if m < 8:
mode = '6'
elif 8 <= m < 10:
mode = '7'
elif 10 <= m < 12:
mode = '8'
elif 12 <= m < 13:
mode = '9'
elif m >= 13:
mode = '10'
return mode
def parse_csv(csv):
f_lines = csv.read()
f_lines = [l for l in f_lines.split('\r') if len(l)>0]
colnames = [l for l in f_lines[0].split(',') if len(l)>0]
data_raw = np.array([l.split(',') for l in f_lines[1:]])
data_od = OrderedDict()
for i,c in enumerate(colnames):
data_od[c] = data_raw[:,i]
# data = Table({c:data_raw[:,i] for i,c in enumerate(colnames)})
data = Table(data_od, names=colnames)
return data
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
class xmlTree(object):
'''
Class for handling xml files for Programs and Targets
'''
def __init__(self, path):
self.path = path
def getPrograms(self, programs_xml='Programs.xml'):
self.programs_xml = programs_xml
try:
# parse Programs.xml:
tree = ET.parse(os.path.join(self.path, self.programs_xml))
self.root = tree.getroot()
except:
# file does not exist or empty? create 'template' then:
with open(os.path.join(self.path, self.programs_xml), 'w') as f:
f.write('<root>\n</root>')
self.Programs = []
return self.Programs
# list for keeping track of programs:
self.Programs = []
fix_xml = False
for program in self.root:
prog = {}
for content in program:
prog[content.tag] = content.text
# do not trust Programs.xml - count the number of targets!
number_of_targets = \
str(self.getProgramNumberOfTargets(prog['number']))
# build program object:
p = Program(number=prog['number'],
name=prog['name'],
person_name=prog['person_name'],
scientific_importance=prog['scientific_importance'],
number_of_targets=number_of_targets,
counter=prog['counter'],
total_observation_time=prog['total_observation_time'],
total_science_time=prog['total_science_time'])
# now append:
self.Programs.append(p)
# edit Program.xml?
if number_of_targets != prog['number_of_targets']:
print 'Fixed wrong number of targets for program {:s}'\
.format(prog['name']) + ' in Programs.xml.'
fix_xml = True
# edit Program.xml if necessary:
if fix_xml:
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
return self.Programs
def getProgramNumberOfTargets(self, program_number):
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program_number))
pnot = len([f for f in os.listdir(target_xml_path)
if 'Target_' in f and f[0] != '.'])
return pnot
def getProgramNames(self):
return [p.name for p in self.Programs]
def getProgramNumbers(self):
return [p.number for p in self.Programs]
@staticmethod
def buildProgramXMLtree(programs):
"""
:param programs:
:return:
"""
root = ET.Element("root")
for program in programs:
prog = ET.SubElement(root, "Program")
ET.SubElement(prog, "number").text = program.number
ET.SubElement(prog, "name").text = program.name
ET.SubElement(prog, "person_name").text = program.person_name
ET.SubElement(prog, "scientific_importance").text = \
program.scientific_importance
ET.SubElement(prog, "number_of_targets").text = \
program.number_of_targets
ET.SubElement(prog, "counter").text = program.counter
ET.SubElement(prog, "total_observation_time").text = \
program.total_observation_time
ET.SubElement(prog, "total_science_time").text = \
program.total_science_time
tree = ET.ElementTree(root)
return tree
def dumpProgram(self, program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new='0'):
"""
:param program_number:
:param name:
:param number:
:param person_name:
:param scientific_importance:
:param number_of_targets:
:param counter:
:param total_observation_time:
:param total_science_time:
:param new:
:return:
"""
try:
self.Programs
except:
self.getPrograms()
programNumbers = self.getProgramNumbers()
# trying to add a program with an already existing number? atata!!
if number in programNumbers and new=='1':
return False
if (program_number not in programNumbers or program_number=='') and \
new == '1':
# add new program
self.Programs.append(Program(number, name, person_name,
scientific_importance, number_of_targets, counter,
total_observation_time, total_science_time))
else:
# change existing
ind = [i for i, p in enumerate(self.Programs)
if p.number == program_number][0]
self.Programs[ind] = Program(number, name, person_name,
scientific_importance, number_of_targets, counter,
total_observation_time, total_science_time)
# rename folder containing target xml files if program_number changed
if program_number not in ('', number):
os.rename(os.path.join(self.path,
'Program_{:s}'.format(program_number)),
os.path.join(self.path, 'Program_{:s}'.format(number)))
# update all target program_numbers
self.batchEditTargets(self.Programs[ind], program_number=number)
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
# make is pretty:
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
# create a directory for storing target xml-files
target_xml_dir = os.path.join(self.path, 'Program_{:s}'.format(number))
if not os.path.exists(target_xml_dir):
os.makedirs(target_xml_dir)
# update self.Programs:
self.Programs = self.getPrograms()
return True
def removeProgram(self, name=None):
""" Remove a program
:param name:
:return:
"""
if name is None or name == '':
return {}
try:
self.Programs
except:
self.getPrograms()
# remove from self.Programs and Programs.xml
try:
program = [p for p in self.Programs if p.name == name][0]
self.Programs.remove(program)
except:
return {}
# build new xml and write it to disk:
tree = self.buildProgramXMLtree(self.Programs)
with open(os.path.join(self.path, 'Programs.xml'), 'w') as f:
contents = minidom.parseString(ET.tostring(tree.getroot(),
'utf-8')).toprettyxml(indent="\t")
contents = contents.replace('<?xml version="1.0" ?>\n', '')
f.write(contents)
# f.write(minidom.parseString(ET.tostring(tree.getroot(),
# 'utf-8')).toprettyxml(indent="\t"))
# remove dir Program_number with target xml files
target_xml_dir = os.path.join(self.path,
'Program_{:s}'.format(program.number))
if os.path.exists(target_xml_dir):
shutil.rmtree(target_xml_dir)
# update self.Programs (just to make sure)
self.Programs = self.getPrograms()
return {}
def removeTarget(self, program_number=None, target_number=None):
""" Remove a target from a program
:param program_number:
:param target_number:
:return:
"""
if program_number is None or program_number == '' or\
target_number is None or target_number == '':
return {}
try:
self.Programs
except:
self.Programs = self.getPrograms()
program = [p for p in self.Programs if p.number==program_number][0]
target_xml = 'Target_{:s}.xml'.format(target_number)
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml)
if os.path.exists(target_xml_path):
os.remove(target_xml_path)
# rename remaining xml files:
for i in range(int(target_number)+1, int(program.number_of_targets)+1):
target_xml_old = 'Target_{:d}.xml'.format(i)
target_xml_old_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml_old)
target_xml_new = 'Target_{:d}.xml'.format(i-1)
target_xml_new_path = os.path.join(self.path,
'Program_{:s}'.format(program_number), target_xml_new)
os.rename(target_xml_old_path, target_xml_new_path)
# update self.Programs
self.Programs = self.getPrograms()
return {}
def getTargets(self, program, target_list_xml=None):
'''
Get a list of targets (each being a dict) for a given program
'''
if target_list_xml is None:
target_list_xml = ['Target_{:d}.xml'.format(i+1)
for i in range(int(program.number_of_targets))]
# list for keeping track of targets for each program:
try:
self.Targets
except:
self.Targets = {}
self.Targets[program.name] = []
for target_xml in target_list_xml:
tree = ET.parse(os.path.join(self.path,
'Program_{:s}'.format(program.number), target_xml))
root = tree.getroot()
targ = {}
targ['Object'] = []
for content in root:
if content.tag != 'Object':
targ[content.tag] = content.text
else:
obj = {}
obj['Observation'] = []
for data_obj in content:
if data_obj.tag != 'Observation':
obj[data_obj.tag] = data_obj.text
else:
obs = {}
for data_obs in data_obj:
obs[data_obs.tag] = data_obs.text
obj['Observation'].append(obs)
targ['Object'].append(obj)
self.Targets[program.name].append(targ)
# print 'N_targ = {:d}'.format(len(self.Targets[program.name]))
return self.Targets[program.name]
def getTargetNames(self, program):
""" Get target names for program
Each target must have a unique name!
:param program:
:return:
"""
try:
self.Targets[program.name]
except:
self.getTargets(program)
targetNames = [t['name'] for t in self.Targets[program.name]]
return targetNames
def batchEditTargets(self, program, time_critical_flag="",
visited_times_for_completion="", seeing_limit="",
cadence="", obj_solar_system="", obj_epoch="",
obj_sun_altitude_limit="",
obj_moon_phase_window="",
obj_airmass_limit="",
obj_sun_distance_limit="",
obj_moon_distance_limit="",
obj_sky_brightness_limit="",
obj_hour_angle_limit="",
obs_exposure_time="", obs_ao_flag="",
obs_filter_code="", obs_camera_mode="",
obs_repeat_times="",
program_number=""):
target_list_xml = ['Target_{:d}.xml'.format(i+1)
for i in range(int(program.number_of_targets))]
for target_xml in target_list_xml:
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number), target_xml)
tree = ET.parse(target_xml_path)
root = tree.getroot()
if program_number != "":
# does the tag exist? if not, create
if root.find('program_number') is None:
root.append(Element('program_number'))
tag = root.find('program_number')
tag.text = program_number
if time_critical_flag != "":
# does the tag exist? if not, create
if root.find('time_critical_flag') is None:
root.append(Element('time_critical_flag'))
tag = root.find('time_critical_flag')
tag.text = time_critical_flag
if visited_times_for_completion != "":
# does the tag exist? if not, create
if root.find('visited_times_for_completion') is None:
root.append(Element('visited_times_for_completion'))
tag = root.find('visited_times_for_completion')
tag.text = visited_times_for_completion
if seeing_limit != "":
# does the tag exist? if not, create
if root.find('seeing_limit') is None:
root.append(Element('seeing_limit'))
tag = root.find('seeing_limit')
tag.text = seeing_limit
if cadence != "":
# does the tag exist? if not, create
if root.find('cadence') is None:
root.append(Element('cadence'))
tag = root.find('cadence')
tag.text = cadence
# iterate over Objects:
objs = root.findall('Object')
for obj in objs:
if obj_solar_system != "":
# does the tag exist? if not, create
if obj.find('solar_system') is None:
obj.append(Element('solar_system'))
tag = obj.find('solar_system')
tag.text = obj_solar_system
if obj_epoch != "":
# does the tag exist? if not, create
if obj.find('epoch') is None:
obj.append(Element('epoch'))
tag = obj.find('epoch')
tag.text = obj_epoch
if obj_sun_altitude_limit != "":
# does the tag exist? if not, create
if obj.find('sun_altitude_limit') is None:
obj.append(Element('sun_altitude_limit'))
tag = obj.find('sun_altitude_limit')
tag.text = obj_sun_altitude_limit
if obj_moon_phase_window != "":
# does the tag exist? if not, create
if obj.find('moon_phase_window') is None:
obj.append(Element('moon_phase_window'))
tag = obj.find('moon_phase_window')
tag.text = obj_moon_phase_window
if obj_airmass_limit != "":
# does the tag exist? if not, create
if obj.find('airmass_limit') is None:
obj.append(Element('airmass_limit'))
tag = obj.find('airmass_limit')
tag.text = obj_airmass_limit
if obj_sun_distance_limit != "":
# does the tag exist? if not, create
if obj.find('sun_distance_limit') is None:
obj.append(Element('sun_distance_limit'))
tag = obj.find('sun_distance_limit')
tag.text = obj_sun_distance_limit
if obj_moon_distance_limit != "":
# does the tag exist? if not, create
if obj.find('moon_distance_limit') is None:
obj.append(Element('moon_distance_limit'))
tag = obj.find('moon_distance_limit')
tag.text = obj_moon_distance_limit
if obj_sky_brightness_limit != "":
# does the tag exist? if not, create
if obj.find('sky_brightness_limit') is None:
obj.append(Element('sky_brightness_limit'))
tag = obj.find('sky_brightness_limit')
tag.text = obj_sky_brightness_limit
if obj_hour_angle_limit != "":
# does the tag exist? if not, create
if obj.find('hour_angle_limit') is None:
obj.append(Element('hour_angle_limit'))
tag = obj.find('hour_angle_limit')
tag.text = obj_hour_angle_limit
# iterate over Observations:
obss = obj.findall('Observation')
for obs in obss:
if obs_exposure_time != "":
# does the tag exist? if not, create
if obs.find('exposure_time') is None:
obs.append(Element('exposure_time'))
tag = obs.find('exposure_time')
tag.text = obs_exposure_time
if obs_ao_flag != "":
# does the tag exist? if not, create
if obs.find('ao_flag') is None:
obs.append(Element('ao_flag'))
tag = obs.find('ao_flag')
tag.text = obs_ao_flag
if obs_filter_code != "":
# does the tag exist? if not, create
if obs.find('filter_code') is None:
obs.append(Element('filter_code'))
tag = obs.find('filter_code')
if obs_filter_code in filter_names():
tag.text = obs_filter_code
else:
tag.text = 'FILTER_SLOAN_I'
if obs_camera_mode != "":
# does the tag exist? if not, create
if obs.find('camera_mode') is None:
obs.append(Element('camera_mode'))
tag = obs.find('camera_mode')
tag.text = obs_camera_mode
if obs_repeat_times != "":
# does the tag exist? if not, create
if obs.find('repeat_times') is None:
obs.append(Element('repeat_times'))
tag = obs.find('repeat_times')
tag.text = obs_repeat_times
# save updated file:
# with open(target_xml_path, 'w') as f:
# out = minidom.parseString(ET.tostring(tree.getroot(), \
# 'utf-8').replace(' ', ' ')).toprettyxml(indent='', newl='')
# f.write(out.replace('<?xml version="1.0" ?>', ''))
# build an xml-file:
# this is good enough, but adds unnecessary <item> tags. remove em:
target_xml = minidom.parseString(ET.tostring(tree.getroot(), \
'utf-8')).toprettyxml()
# <item>'s left extra \t's after them - remove them:
# target_xml = target_xml.replace('\t\t\t','\t\t')
# target_xml = target_xml.replace('\t\t\t\t','\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t and
not t.isspace() and '/>' not in t]
# print target_xml_path
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
def dumpTarget(self, program, target_number, target):
""" Edit or create target xml file
:param program:
:param target_number:
:param target:
:return:
"""
if target_number == '':
targets_added = 1
# program not empty?
if int(program.number_of_targets) != 0:
# get targets
targets = self.getTargets(program)
# get max target number. when adding new tragets, start from max_number+1
max_number = max([int(t['number']) for t in targets])
else:
max_number = 0
target_number = max_number+1
target['number'] = str(target_number)
else:
targets_added = 0
target_number = int(target_number)
# file Target_*.xml number need not be = target_number from the xml file
xml_file_number = target_number
# build an xml-file:
target_xml = dicttoxml(target, custom_root='Target', attr_type=False)
# this is good enough, but adds unnecessary <item> tags. remove em:
dom = minidom.parseString(target_xml)
target_xml = dom.toprettyxml()
# <item>'s left extra \t's after them - remove them:
target_xml = target_xml.replace('\t\t\t', '\t\t')
target_xml = target_xml.replace('\t\t\t\t', '\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t]
# deal with missing <Object>s and <Observation>s:
# xml_out = []
# for line in target_xml[1:-1]:
# xml_out.append('{:s}\n'.format(line))
# xml_out.append('{:s}'.format(target_xml[-1]))
ind_obs_start = [i for i, v in enumerate(target_xml) if '<Observation>' in v]
ind_obs_stop = [i for i, v in enumerate(target_xml) if '</Observation>' in v]
for (start, stop) in zip(ind_obs_start, ind_obs_stop):
ind_num_obs = [i+start for i, v in enumerate(target_xml[start:stop]) \
if '<number>' in v]
if len(ind_num_obs) > 1:
for ind in ind_num_obs[:0:-1]:
target_xml.insert(ind, '\t\t</Observation>\n\t\t<Observation>')
ind_obj= [i for i,v in enumerate(target_xml) if v[:10] == '\t\t<number>']
for ind in ind_obj[:0:-1]:
target_xml.insert(ind, '\t</Object>\n\t<Object>')
# print target_xml
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number),
'Target_{:d}.xml'.format(xml_file_number))
target_xml = [t for t in target_xml if '/>' not in t]
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
# update program number of targets!!
self.dumpProgram(program.number,
program.name, program.number, program.person_name,
program.scientific_importance,
str(int(program.number_of_targets)+targets_added),
program.counter, program.total_observation_time,
program.total_science_time)
cherrypy.log('Target_{:d}.xml edited/created in Program_{:s}'.
format(xml_file_number, program.number))
def dumpTargetList(self, program, data):
""" List from an external file
:param program:
:param data:
:return:
"""
# program not empty?
if int(program.number_of_targets)!=0:
# get targets
targets = self.getTargets(program)
# get max target number. when adding new tragets, start from max_number+1
max_number = max([int(t['number']) for t in targets])
else:
max_number = 0
# get existing target names:
targetNames = self.getTargetNames(program)
targets_added = 0
# do some guessing about the table
if type(data) == astropy.io.votable.tree.Table:
# VOtable? convert to normal table
table = data.to_table()
else:
table = data
# target names:
target_name_field = [table[f].name for f in table.colnames
if 'name' in table[f].name or
(table[f].description is not None and
'name' in table[f].description)][0]
if type(table[target_name_field].data) is not np.ndarray:
target_names_list = table[target_name_field].data.data
else:
target_names_list = table[target_name_field].data
# RA/Dec:
if '_RAJ2000' in table.colnames and '_DEJ2000' in table.colnames:
# no columns? add them:
ra = [ra.replace(' ', ':') for ra in table['_RAJ2000']]
dec = [dec.replace(' ', ':') for dec in table['_DEJ2000']]
else:
# ra/dec in degrees?
if 'RAJ2000' not in table.colnames or 'DEJ2000' not in table.colnames:
raise Exception('Could not find coordinates in the imported file')
else:
# get units:
if table['RAJ2000'].description is not None and \
not 'degree' in table['RAJ2000'].description:
raise Exception('RAJ2000 must be in degrees!')
else:
ra_deg = table['RAJ2000']
dec_deg = table['DEJ2000']
crd = SkyCoord(ra=ra_deg, dec=dec_deg,
unit=(u.deg, u.deg), frame='icrs').to_string('hmsdms')
crd_str = np.array([c.split() for c in crd])
ra = [ra.replace('h', ':').replace('m', ':').replace('s', '')
for ra in crd_str[:, 0]]
dec = [dec.replace('d',':').replace('m', ':').replace('s', '')
for dec in crd_str[:, 1]]
# no comments?
if 'comment' in table.colnames:
if type(table['comment'].data) is not np.ndarray:
comment = table['comment'].data.data
else:
comment = table['comment'].data
if max([len(cm) for cm in comment]) == 0:
comment = ['None']*len(target_names_list)
else:
comment = ['None']*len(target_names_list)
# Vmag/Vemag/mag
if 'Vmag' in table.colnames:
mag = table['Vmag'].data.tolist()
elif 'Vemag' in table.colnames:
mag = table['Vemag'].data.tolist()
elif 'mag' in table.colnames:
mag = table['mag'].data.tolist()
# epoch should be J2000?
if 'epoch' in table.colnames:
epoch = table['epoch'].data.tolist()
else:
epoch = ['2000.0']*len(target_names_list)
# (multiple) exposures/filters
exp_cols = [i for i, v in enumerate(table.colnames) if 'exposure' in v]
filter_cols = [i for i, v in enumerate(table.colnames) if 'filter' in v]
if (len(exp_cols) != 0 and len(filter_cols) != 0) \
and len(exp_cols) != len(filter_cols):
raise Exception('num of exposures must be equal to num of filters')
obs_num = max(len(exp_cols), len(filter_cols))
if obs_num == 0:
exp = [['90']]*len(target_names_list)
filt = [['FILTER_SLOAN_I']]*len(target_names_list)
else:
if len(exp_cols) == 0:
exp = [['90']*obs_num]*len(target_names_list)
else:
# get all exposure columns:
exp = \
list(np.array(table[list(np.array(table.colnames)[exp_cols])]))
if len(filter_cols) == 0:
filt = [['FILTER_SLOAN_I']*obs_num]*len(target_names_list)
else:
# get all filter columns:
filt = \
list(np.array(table[list(np.array(table.colnames)[filter_cols])]))
# iterate over entries in the parsed table
for ei, _ in enumerate(table):
# target name must be unique! check it and skip entry if necessary:
if target_names_list[ei] in targetNames:
continue
else:
# count added targets:
targets_added += 1
# Ordnung muss sein!
target = OrderedDict([('program_number', program.number),
('number', str(max_number+targets_added)),
('name', str(target_names_list[ei])),
('time_critical_flag', '0'),
('visited_times_for_completion', '1'),
('seeing_limit', ''), ('visited_times', '0'), ('done', '0'),
('cadence', '0'), ('comment', str(comment[ei])),
('Object', [])])
target['Object'].append(OrderedDict([('number', '1'),
('RA', ra[ei]), ('dec', dec[ei]),
('epoch', epoch[ei]), ('magnitude', mag[ei]),
('sun_altitude_limit', ''), ('moon_phase_window', ''),
('airmass_limit', ''), ('sun_distance_limit', ''),
('moon_distance_limit', ''), ('sky_brightness_limit', ''),
('hour_angle_limit', ''), ('done', '0'),
('Observation',[])]))
for oi, obj in enumerate(target['Object']):
for obsi in range(max(obs_num, 1)):
if str(filt[ei][obsi]) in filter_names():
target['Object'][oi]['Observation'].append(
OrderedDict([('number', '{:d}'.format(obsi+1)),
('exposure_time', exp[ei][obsi]), ('ao_flag', '1'),
('filter_code', str(filt[ei][obsi])),
('camera_mode', getModefromMag(mag[ei])),
('repeat_times', '1'),
('repeated', '0'), ('done', '0')]))
else:
target['Object'][oi]['Observation'].append(
OrderedDict([('number', '{:d}'.format(obsi+1)),
('exposure_time', exp[ei][obsi]), ('ao_flag', '1'),
('filter_code', 'FILTER_SLOAN_I'),
('camera_mode', getModefromMag(mag[ei])),
('repeat_times', '1'),
('repeated', '0'), ('done', '0')]))
# build an xml-file:
target_xml = dicttoxml(target, custom_root='Target', attr_type=False)
# this is good enough, but adds unnecessary <item> tags. remove em:
dom = minidom.parseString(target_xml)
target_xml = dom.toprettyxml()
# <item>'s left extra \t's after them - remove them:
target_xml = target_xml.replace('\t\t\t','\t\t')
target_xml = target_xml.replace('\t\t\t\t','\t\t\t')
target_xml = target_xml.replace('<?xml version="1.0" ?>', '')
target_xml = target_xml.split('\n')
target_xml = [t for t in target_xml if 'item>' not in t
and '/>' not in t]
ind_obs_start = [i for i,v in enumerate(target_xml) if '<Observation>' in v]
ind_obs_stop = [i for i,v in enumerate(target_xml) if '</Observation>' in v]
for (start, stop) in zip(ind_obs_start, ind_obs_stop):
ind_num_obs = [i+start for i,v in enumerate(target_xml[start:stop]) \
if '<number>' in v]
if len(ind_num_obs) > 1:
for ind in ind_num_obs[:0:-1]:
target_xml.insert(ind, '\t\t</Observation>\n\t\t<Observation>')
# print target_xml
target_xml_path = os.path.join(self.path,
'Program_{:s}'.format(program.number),
'Target_{:d}.xml'.format(
int(program.number_of_targets)+targets_added))
# print target_xml_path
with open(target_xml_path, 'w') as f:
for line in target_xml[1:-1]:
f.write('{:s}\n'.format(line))
f.write('{:s}'.format(target_xml[-1]))
# update program number of targets!!
self.dumpProgram(program.number,
program.name, program.number, program.person_name,
program.scientific_importance,
str(int(program.number_of_targets)+targets_added),
program.counter, program.total_observation_time,
program.total_science_time)
#@cherrypy.popargs('name')
class Root(object):
def __init__(self, path_to_queue):
self.path_to_queue = path_to_queue
# URL dispatcher
def _cp_dispatch(self, vpath):
if len(vpath) == 1:
cherrypy.request.params['prog_number'] = vpath.pop()
return self
# if len(vpath) == 2:
# cherrypy.request.params['targets'] = vpath.pop(0) # /band name/
# return self
return vpath
@cherrypy.expose
def index(self, prog_number=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if prog_number is None:
# render programs:
tmpl = env.get_template('index.html')
return tmpl.render(programs=programs)
# display Programs.xml:
elif prog_number == 'Programs.xml':
# render Programs.xml:
with open(os.path.join(xmlT.path, prog_number), 'r') as f:
page = ''.join('{:s}'.format(line) for line in f)
page = '<pre>'+page+'</pre>'
return page
# # display Target_n.xml:
# elif 'Program' in prog_number and 'Target' in prog_number \
# and 'xml' in prog_number:
# pass
# page = ''
# return page
else:
#render targets
# get program:
try:
number = prog_number.split('_')[1]
program = [p for p in programs if p.number==number][0]
except:
raise Exception('Program_{:s} not found.'.format(number))
# from time import time as _time
# tic = _time()
targets = xmlT.getTargets(program)
# print 'getting targets took {:f} seconds'.format(_time()-tic)
# populate template
# tic = _time()
tmpl = env.get_template('targets.html')
# print 'loading template took {:f} seconds'.format(_time()-tic)
# tic = _time()
# tmpl.render(targets=targets, programName=program.name,
# programNumber=program.number)
# print 'rendering template took {:f} seconds'.format(_time()-tic)
return tmpl.render(targets=targets, programName=program.name,
programNumber=program.number)
# request and receive Program params in json format
@cherrypy.expose
def prog_param(self, program_name=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if program_name is not None:
# construct json object:
prog = [p for p in programs if p.name == program_name]
# found?
if len(prog)>0:
json_obj = prog[0].makeJSON()
return json_obj
else:
return {}
else:
return {}
# request and receive Target params in json format
@cherrypy.expose
def targ_param(self, program_number=None, target_number=None):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
if program_number is not None and \
(target_number is not None and target_number!=""):
# construct json object:
prog = [p for p in programs if p.number==program_number]
# found?
if len(prog)>0:
target_xml = ['Target_{:d}.xml'.format(int(target_number))]
target_dict = xmlT.getTargets(program=prog[0],
target_list_xml=target_xml)[0]
json_obj = json.dumps(target_dict)
# print json_obj
return json_obj
else:
return {}
else:
return {}
# save new/edited Program
@cherrypy.expose
def save(self, program_number=None,
name=None, number=None, person_name=None,
scientific_importance=None, number_of_targets=None,
counter=None, total_observation_time=None,
total_science_time=None, new=None):
# bad input:
if None in (program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new) or\
(name == '' or number == '' or person_name == '' or
scientific_importance == ''):
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# save program:
status = xmlT.dumpProgram(program_number, name, number, person_name,
scientific_importance, number_of_targets,
counter, total_observation_time, total_science_time, new)
if status == False:
cherrypy.log('Failed to create program. Program_{:s} already exists'.
format(number))
else:
cherrypy.log('Succesfully processed Program_{:s}'.format(number))
return {}
# remove new/edited Program
@cherrypy.expose
def remove(self, program_name=None):
# bad input:
if program_name is None:
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# get program names:
xmlT.removeProgram(program_name)
return {}
@cherrypy.expose
def targetBatchUpdate(self, program_number, time_critical_flag="",
visited_times_for_completion="", seeing_limit="",
cadence="", obj_solar_system="", obj_epoch="",
obj_sun_altitude_limit="",
obj_moon_phase_window="",
obj_airmass_limit="",
obj_sun_distance_limit="",
obj_moon_distance_limit="",
obj_sky_brightness_limit="",
obj_hour_angle_limit="",
obs_exposure_time="", obs_ao_flag="",
obs_filter_code="", obs_camera_mode="",
obs_repeat_times=""):
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==program_number][0]
xmlT.batchEditTargets(program, time_critical_flag,
visited_times_for_completion, seeing_limit,
cadence, obj_solar_system, obj_epoch, obj_sun_altitude_limit,
obj_moon_phase_window, obj_airmass_limit,
obj_sun_distance_limit, obj_moon_distance_limit,
obj_sky_brightness_limit, obj_hour_angle_limit,
obs_exposure_time, obs_ao_flag,
obs_filter_code, obs_camera_mode, obs_repeat_times)
return {}
@cherrypy.expose
def importTargetList(self, targetList, program_number):
# read in in chunks:
# size = 0
# while True:
# data = targetList.file.read(8192)
# if not data:
# break
# size += len(data)
# # read everything in one go:
# data = targetList.file.readlines()
# # skip comments:
# data = [d for d in data if d[0]!='#']
# let's build an astropy table following Vizier's
# column naming convention
# is it a VOtable?
if targetList.filename[-3:]=='vot':
data = parse_single_table(targetList.file)
# Becky, is that you?
elif targetList.filename[-3:]=='csv':
data = parse_csv(targetList.file)
# if not - it must be readable by astropy.io.ascii
else:
# data = ascii.read(targetList.file)
# first line - header with column names
# then - data. if multiple filters/exposures, add _N at end
data = ascii.read(targetList.file, header_start=0, data_start=1)
# load Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==program_number][0]
xmlT.dumpTargetList(program, data)
raise cherrypy.HTTPRedirect('/Program_{:s}'.format(program_number))
@cherrypy.expose
def targetUpdate(self, **kargs):
'''
Update Target xml file
'''
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# get entries:
programs = xmlT.getPrograms(programs_xml='Programs.xml')
program = [p for p in programs if p.number==kargs['program_number']][0]
nObj = len([k for k, v in kargs.iteritems() if 'obj_number' in k])
nObs = [len([k for k, v in kargs.iteritems()
if 'obs_number_{:d}'.format(i+1) in k]) for i in range(nObj)]
# print nObj,nObs
# make target dict:
target = OrderedDict((('program_number', kargs['program_number']),
('number', kargs['number']),
('name', kargs['name']),
('visited_times_for_completion', kargs['visited_times_for_completion']),
('seeing_limit', kargs['seeing_limit']),
('visited_times', kargs['visited_times']),
('done', kargs['done']),
('cadence', kargs['cadence']),
('comment', kargs['comment']),
('time_critical_flag', kargs['time_critical_flag']),
('Object', [])))
obj_numbers = sorted([s[-s[::-1].index('_'):] for s in kargs if 'obj_number' in s])
obs_numbers = [ sorted([s[-s[::-1].index('_'):] for s in kargs \
if 'obs_number_{:d}'.format(ii+1) in s]) for ii in range(nObj)]
for nOj in range(nObj):
# fix number if necessary
target['Object'].append(OrderedDict((
('number', nOj+1),
('RA', kargs['obj_RA_{:s}'.format(obj_numbers[nOj])]),
('dec', kargs['obj_dec_{:s}'.format(obj_numbers[nOj])]),
('ra_rate', kargs['obj_ra_rate_{:s}'.format(obj_numbers[nOj])]),
('dec_rate', kargs['obj_dec_rate_{:s}'.format(obj_numbers[nOj])]),
('epoch', kargs['obj_epoch_{:s}'.format(obj_numbers[nOj])]),
('magnitude', kargs['obj_magnitude_{:s}'.format(obj_numbers[nOj])]),
('solar_system', kargs['obj_solar_system_{:s}'.format(obj_numbers[nOj])]),
('sun_altitude_limit',
kargs['obj_sun_altitude_limit_{:s}'.format(obj_numbers[nOj])]),
('moon_phase_window',
kargs['obj_moon_phase_window_{:s}'.format(obj_numbers[nOj])]),
('airmass_limit',
kargs['obj_airmass_limit_{:s}'.format(obj_numbers[nOj])]),
('sun_distance_limit',
kargs['obj_sun_distance_limit_{:s}'.format(obj_numbers[nOj])]),
('moon_distance_limit',
kargs['obj_moon_distance_limit_{:s}'.format(obj_numbers[nOj])]),
('sky_brightness_limit',
kargs['obj_sky_brightness_limit_{:s}'.format(obj_numbers[nOj])]),
('hour_angle_limit',
kargs['obj_hour_angle_limit_{:s}'.format(obj_numbers[nOj])]),
('done', kargs['obj_done_{:s}'.format(obj_numbers[nOj])]),
('Observation', [])
)))
for nOs in range(nObs[nOj]):
# fix number if necessary
target['Object'][nOj]['Observation'].append(OrderedDict((
('number', nOs+1),
('exposure_time', kargs['obs_exposure_time_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('ao_flag', kargs['obs_ao_flag_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('filter_code', kargs['obs_filter_code_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('camera_mode', kargs['obs_camera_mode_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('repeat_times', kargs['obs_repeat_times_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('repeated', kargs['obs_repeated_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])]),
('done', kargs['obs_done_{:s}_{:s}'.
format(obj_numbers[nOj], obs_numbers[nOj][nOs])])
)))
# print target
xmlT.dumpTarget(program, kargs['target_number'], target)
# save new/edited Program
@cherrypy.expose
def removeTarget(self, program_number=None, target_number=None):
# bad input:
if program_number is None or program_number == "" \
or target_number is None or target_number == "":
return {}
# read in Programs.xml:
path = self.path_to_queue
xmlT = xmlTree(path)
# read in Programs:
xmlT.getPrograms(programs_xml='Programs.xml')
# get program names:
xmlT.removeTarget(program_number, target_number)
cherrypy.log('removed Target_{:s}.xml from Program_{:s}'.
format(target_number, program_number))
cherrypy.log('Note that remaining target xml files were ranaimed if ' +
'target_number<number_of_targets to keep file numbering order')
return {}
class Program(object):
def __init__(self, number, name, person_name, scientific_importance,
number_of_targets, counter, total_observation_time,
total_science_time):
self.number = number
self.name = name
self.person_name = person_name
self.scientific_importance = scientific_importance
self.number_of_targets = number_of_targets
self.counter = counter
self.total_observation_time = total_observation_time
self.total_science_time = total_science_time
def makeJSON(self):
dic = OrderedDict([['number', self.number],
['name', self.name],
['person_name', self.person_name],
['scientific_importance', self.scientific_importance],
['number_of_targets', self.number_of_targets],
['counter', self.counter],
['total_observation_time', self.total_observation_time],
['total_science_time', self.total_science_time]])
return json.dumps(dic)
if __name__ == '__main__':
# cherrypy.quickstart(Root())
USERS = {'admin': 'robo@0'}
def validate_password(realm, username, password):
if username in USERS and USERS[username] == password:
return True
return False
cherrypy.config.update({'server.socket_host': '0.0.0.0',
'server.socket_port': 8081,
'server.thread_pool': 8,
'log.access_file': 'server_access.log',
'log.error_file': 'server_actions.log'
})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd()),
# 'tools.auth_digest.on': True,
# 'tools.auth_digest.realm': 'hola!',
# 'tools.auth_digest.get_ha1': auth_digest.get_ha1_dict_plain(USERS),
# 'tools.auth_digest.key': 'd8765asdf6c787ag333'
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password
},
'/static': {
'tools.staticdir.on': True,
# 'tools.staticdir.dir': os.path.join(os.path.abspath(os.getcwd()), 'public')
'tools.staticdir.dir': './public',
# 'tools.auth_digest.on': True,
# 'tools.auth_digest.realm': 'hola!',
# 'tools.auth_digest.get_ha1': auth_digest.get_ha1_dict_plain(USERS),
# 'tools.auth_digest.key': 'd8765asdf6c787ag333'
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password
}
}
# path_to_queue = './'
# path_to_queue = '/Users/dmitryduev/_caltech/roboao/Queue/'
path_to_queue = '/Users/dmitryduev/web/qserv/operation/'
# path_to_queue = '/Users/dmitryduev/web/qserv/operation-current/'
cherrypy.quickstart(Root(path_to_queue), '/', conf)
|
"""HTTP Client for asyncio."""
import asyncio
import http.cookies
import io
import json
import mimetypes
import urllib.parse
import weakref
import warnings
import chardet
import aiohttp
from . import hdrs, helpers, streams
from .log import client_logger
from .streams import EOF_MARKER, FlowControlStreamReader
from .multidict import CIMultiDictProxy, MultiDictProxy, MultiDict
from .multipart import MultipartWriter
__all__ = ('request',)
HTTP_PORT = 80
HTTPS_PORT = 443
@asyncio.coroutine
def request(method, url, *,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=aiohttp.HttpVersion11,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None):
"""Constructs and sends a request. Returns response object.
:param str method: http method
:param str url: request url
:param params: (optional) Dictionary or bytes to be sent in the query
string of the new request
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the request
:param dict headers: (optional) Dictionary of HTTP Headers to send with
the request
:param dict cookies: (optional) Dict object to send with the request
:param auth: (optional) BasicAuth named tuple represent HTTP Basic Auth
:type auth: aiohttp.helpers.BasicAuth
:param bool allow_redirects: (optional) If set to False, do not follow
redirects
:param version: Request http version.
:type version: aiohttp.protocol.HttpVersion
:param bool compress: Set to True if request has to be compressed
with deflate encoding.
:param chunked: Set to chunk size for chunked transfer encoding.
:type chunked: bool or int
:param bool expect100: Expect 100-continue response from server.
:param connector: BaseConnector sub-class instance to support
connection pooling and session cookies.
:type connector: aiohttp.connector.BaseConnector
:param bool read_until_eof: Read response until eof if response
does not have Content-Length header.
:param request_class: (optional) Custom Request class implementation.
:param response_class: (optional) Custom Response class implementation.
:param loop: Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
redirects = 0
method = method.upper()
if loop is None:
loop = asyncio.get_event_loop()
if request_class is None:
request_class = ClientRequest
if connector is None:
connector = aiohttp.TCPConnector(force_close=True, loop=loop)
while True:
req = request_class(
method, url, params=params, headers=headers, data=data,
cookies=cookies, files=files, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
loop=loop, expect100=expect100, response_class=response_class)
conn = yield from connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError() from exc
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
if max_redirects and redirects >= max_redirects:
resp.close(force=True)
break
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if resp.status != 307:
method = hdrs.METH_GET
data = None
cookies = resp.cookies
r_url = (resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = urllib.parse.urlsplit(r_url)[0]
if scheme not in ('http', 'https', ''):
resp.close(force=True)
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = urllib.parse.urljoin(url, r_url)
url = urllib.parse.urldefrag(r_url)[0]
if url:
yield from asyncio.async(resp.release(), loop=loop)
continue
break
return resp
class ClientRequest:
GET_METHODS = {hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union(
{hdrs.METH_DELETE, hdrs.METH_TRACE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# Adding weakref to self for _writer cancelling doesn't make sense:
# _writer exists until .write_bytes coro is finished,
# .write_bytes generator has strong reference to self and `del request`
# doesn't produce request finalization.
# After .write_bytes is done _writer has set to None and we have nothing
# to cancel.
# Maybe we need to add .cancel() method to ClientRequest through for
# forced closing request sending.
def __init__(self, method, url, *,
params=None, headers=None, data=None, cookies=None,
files=None, auth=None, encoding='utf-8',
version=aiohttp.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None):
self.url = url
self.method = method.upper()
self.encoding = encoding
self.chunked = chunked
self.compress = compress
self.loop = loop
self.response_class = response_class or ClientResponse
self.update_version(version)
self.update_host(url)
self.update_path(params)
self.update_headers(headers)
self.update_cookies(cookies)
self.update_content_encoding()
self.update_auth(auth)
if files:
warnings.warn(
'files parameter is deprecated. use data instead',
DeprecationWarning)
if data:
raise ValueError(
'data and files parameters are '
'not supported at the same time.')
data = files
self.update_body_from_data(data)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
if not netloc:
raise ValueError('Host could not be detected.')
# check domain idna encoding
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise ValueError('URL has an invalid label.')
# basic auth info
if '@' in netloc:
authinfo, netloc = netloc.split('@', 1)
self.auth = helpers.BasicAuth(*authinfo.split(':', 1))
# Record entire netloc for usage in host header
self.netloc = netloc
# extract host and port
self.ssl = scheme == 'https'
if ':' in netloc:
netloc, port_s = netloc.split(':', 1)
try:
self.port = int(port_s)
except ValueError:
raise ValueError(
'Port number could not be converted.') from None
else:
if self.ssl:
self.port = HTTPS_PORT
else:
self.port = HTTP_PORT
self.scheme = scheme
self.host = netloc
def update_version(self, version):
"""Convert request version to two elements tuple.
parser http version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_path(self, params):
"""Build path."""
# extract path
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(self.url)
if not path:
path = '/'
if isinstance(params, dict):
params = list(params.items())
elif isinstance(params, (MultiDictProxy, MultiDict)):
params = list(params.items())
if params:
params = urllib.parse.urlencode(params)
if query:
query = '%s&%s' % (query, params)
else:
query = params
self.path = urllib.parse.urlunsplit(
('', '', urllib.parse.quote(path, safe='/%:'), query, fragment))
def update_headers(self, headers):
"""Update request headers."""
self.headers = MultiDict()
if headers:
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, (MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key.upper(), value)
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in self.headers:
self.headers[hdr] = val
# add host
if hdrs.HOST not in self.headers:
self.headers[hdrs.HOST] = self.netloc
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = http.cookies.SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(c, name, value)
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self):
"""Set request content encoding."""
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
self.compress = enc
self.chunked = True # enable chunked, no need to deal with length
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
warnings.warn(
'BasicAuth() tuple is required instead ', DeprecationWarning)
auth = helpers.BasicAuth(*auth)
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, data):
if not data:
return
if isinstance(data, str):
data = data.encode(self.encoding)
if isinstance(data, (bytes, bytearray)):
self.body = data
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
elif isinstance(data, (asyncio.StreamReader, streams.DataQueue)):
self.body = data
elif asyncio.iscoroutine(data):
self.body = data
if (hdrs.CONTENT_LENGTH not in self.headers and
self.chunked is None):
self.chunked = True
elif isinstance(data, io.IOBase):
assert not isinstance(data, io.StringIO), \
'attempt to send text data instead of binary'
self.body = data
self.chunked = True
if hasattr(data, 'mode'):
if data.mode == 'r':
raise ValueError('file {!r} should be open in binary mode'
''.format(data))
if (hdrs.CONTENT_TYPE not in self.headers and
hasattr(data, 'name')):
mime = mimetypes.guess_type(data.name)[0]
mime = 'application/octet-stream' if mime is None else mime
self.headers[hdrs.CONTENT_TYPE] = mime
elif isinstance(data, MultipartWriter):
self.body = data.serialize()
self.headers.update(data.headers)
self.chunked = self.chunked or 8192
else:
if not isinstance(data, helpers.FormData):
data = helpers.FormData(data)
self.body = data(self.encoding)
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = data.content_type
if data.is_multipart:
self.chunked = self.chunked or 8192
else:
if (hdrs.CONTENT_LENGTH not in self.headers and
not self.chunked):
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
if 'chunked' not in te:
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
self.chunked = self.chunked if type(self.chunked) is int else 8192
else:
if 'chunked' in te:
self.chunked = 8192
else:
self.chunked = None
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def write_bytes(self, request, reader):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from self._continue
try:
if asyncio.iscoroutine(self.body):
exc = None
value = None
stream = self.body
while True:
try:
if exc is not None:
result = stream.throw(exc)
else:
result = stream.send(value)
except StopIteration as exc:
if isinstance(exc.value, bytes):
yield from request.write(exc.value, drain=True)
break
except:
self.response.close(True)
raise
if isinstance(result, asyncio.Future):
exc = None
value = None
try:
value = yield result
except Exception as err:
exc = err
elif isinstance(result, (bytes, bytearray)):
yield from request.write(result, drain=True)
value = None
else:
raise ValueError(
'Bytes object is expected, got: %s.' %
type(result))
elif isinstance(self.body, asyncio.StreamReader):
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
while chunk:
yield from request.write(chunk, drain=True)
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
elif isinstance(self.body, streams.DataQueue):
while True:
try:
chunk = yield from self.body.read()
if chunk is EOF_MARKER:
break
yield from request.write(chunk, drain=True)
except streams.EofStream:
break
elif isinstance(self.body, io.IOBase):
chunk = self.body.read(self.chunked)
while chunk:
request.write(chunk)
chunk = self.body.read(self.chunked)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
request.write(chunk)
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
else:
try:
ret = request.write_eof()
# NB: in asyncio 3.4.1+ StreamWriter.drain() is coroutine
# see bug #170
if (asyncio.iscoroutine(ret) or
isinstance(ret, asyncio.Future)):
yield from ret
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
self._writer = None
def send(self, writer, reader):
request = aiohttp.Request(writer, self.method, self.path, self.version)
if self.compress:
request.add_compression_filter(self.compress)
if self.chunked is not None:
request.enable_chunked_encoding()
request.add_chunking_filter(self.chunked)
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
request.add_headers(
*((k, v)
for k, v in ((k, value)
for k, value in self.headers.items())))
request.send_headers()
self._writer = asyncio.async(
self.write_bytes(request, reader), loop=self.loop)
self.response = self.response_class(
self.method, self.url, self.host,
writer=self._writer, continue100=self._continue)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
class ClientResponse:
message = None # RawResponseMessage object
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
cookies = None # Response cookies (Set-Cookie)
content = None # Payload stream
connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_response_parser = aiohttp.HttpResponseParser()
_connection_wr = None # weakref to self for releasing connection on del
_writer_wr = None # weakref to self for cancelling writer on del
def __init__(self, method, url, host='', *, writer=None, continue100=None):
super().__init__()
self.method = method
self.url = url
self.host = host
self.headers = None
self._content = None
self._writer = writer
if writer is not None:
self._writer_wr = weakref.ref(self, lambda wr: writer.cancel())
self._continue = continue100
def __repr__(self):
out = io.StringIO()
print('<ClientResponse({}) [{} {}]>'.format(
self.url, self.status, self.reason), file=out)
print(self.headers, file=out)
return out.getvalue()
__str__ = __repr__
def waiting_for_continue(self):
return self._continue is not None
def _setup_connection(self, connection):
self._reader = connection.reader
self.connection = connection
self.content = self.flow_control_class(
connection.reader, loop=connection.loop)
msg = ('ClientResponse has to be closed explicitly! {}:{}:{}'
.format(self.method, self.host, self.url))
def _do_close_connection(wr, connection=connection, msg=msg):
warnings.warn(msg, ResourceWarning)
connection.close()
self._connection_wr = weakref.ref(self, _do_close_connection)
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._setup_connection(connection)
while True:
httpstream = self._reader.set_parser(self._response_parser)
# read response
self.message = yield from httpstream.read()
if self.message.code != 100:
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# response status
self.version = self.message.version
self.status = self.message.code
self.reason = self.message.reason
# headers
self.headers = CIMultiDictProxy(self.message.headers)
# payload
response_with_body = self.method.lower() != 'head'
self._reader.set_parser(
aiohttp.HttpPayloadParser(self.message,
readall=read_until_eof,
response_with_body=response_with_body),
self.content)
# cookies
self.cookies = http.cookies.SimpleCookie()
if hdrs.SET_COOKIE in self.headers:
for hdr in self.headers.getall(hdrs.SET_COOKIE):
try:
self.cookies.load(hdr)
except http.cookies.CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
connection.share_cookies(self.cookies)
return self
def close(self, force=False):
if self.connection is not None:
if self.content and not self.content.at_eof():
force = True
if force:
self.connection.close()
else:
self.connection.release()
if self._reader is not None:
self._reader.unset_parser()
self.connection = None
self._connection_wr = None
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
self._writer_wr = None
@asyncio.coroutine
def release(self):
try:
chunk = yield from self.content.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self.content.readany()
finally:
self.close()
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
self._writer_wr = None
self.close()
@asyncio.coroutine
def read(self, decode=False):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except:
self.close(True)
raise
else:
self.close()
data = self._content
if decode:
warnings.warn(
'.read(True) is deprecated. use .json() instead',
DeprecationWarning)
return (yield from self.json())
return data
@asyncio.coroutine
def read_and_close(self, decode=False):
"""Read response payload and then close response."""
warnings.warn(
'read_and_close is deprecated, use .read() instead',
DeprecationWarning)
return (yield from self.read(decode))
def _get_encoding(self, encoding):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mtype, stype, _, params = helpers.parse_mimetype(ctype)
if not encoding:
encoding = params.get('charset')
if not encoding:
encoding = chardet.detect(self._content)['encoding']
return encoding
@asyncio.coroutine
def text(self, encoding=None):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
return self._content.decode(self._get_encoding(encoding))
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads):
"""Reads and decodes JSON response."""
if self._content is None:
yield from self.read()
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if 'json' not in ctype:
client_logger.warning(
'Attempt to decode JSON with unexpected mimetype: %s', ctype)
if not self._content.strip():
return None
return loads(self._content.decode(self._get_encoding(encoding)))
Work on session object
"""HTTP Client for asyncio."""
import asyncio
import http.cookies
import io
import json
import mimetypes
import urllib.parse
import weakref
import warnings
import chardet
import aiohttp
from . import hdrs, helpers, streams
from .log import client_logger
from .streams import EOF_MARKER, FlowControlStreamReader
from .multidict import CIMultiDictProxy, MultiDictProxy, MultiDict
from .multipart import MultipartWriter
__all__ = ('request',)
HTTP_PORT = 80
HTTPS_PORT = 443
@asyncio.coroutine
def request(method, url, *,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=aiohttp.HttpVersion11,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None):
"""Constructs and sends a request. Returns response object.
:param str method: http method
:param str url: request url
:param params: (optional) Dictionary or bytes to be sent in the query
string of the new request
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the request
:param dict headers: (optional) Dictionary of HTTP Headers to send with
the request
:param dict cookies: (optional) Dict object to send with the request
:param auth: (optional) BasicAuth named tuple represent HTTP Basic Auth
:type auth: aiohttp.helpers.BasicAuth
:param bool allow_redirects: (optional) If set to False, do not follow
redirects
:param version: Request http version.
:type version: aiohttp.protocol.HttpVersion
:param bool compress: Set to True if request has to be compressed
with deflate encoding.
:param chunked: Set to chunk size for chunked transfer encoding.
:type chunked: bool or int
:param bool expect100: Expect 100-continue response from server.
:param connector: BaseConnector sub-class instance to support
connection pooling and session cookies.
:type connector: aiohttp.connector.BaseConnector
:param bool read_until_eof: Read response until eof if response
does not have Content-Length header.
:param request_class: (optional) Custom Request class implementation.
:param response_class: (optional) Custom Response class implementation.
:param loop: Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
session = Session(connector=connector, loop=loop,
request_class=request_class,
response_class=response_class)
resp = yield from session.request(method, url,
params=params,
data=data,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof)
return resp
class Session:
def __init__(self, *, connector=None, loop=None, request_class=None,
response_class=None):
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
if connector is None:
connector = aiohttp.TCPConnector(force_close=True, loop=loop)
self._connector = connector
if request_class is None:
request_class = ClientRequest
self._request_class = request_class
self._response_class = response_class
@asyncio.coroutine
def request(self, method, url, *,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=aiohttp.HttpVersion11,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
redirects = 0
method = method.upper()
while True:
req = self._request_class(
method, url, params=params, headers=headers, data=data,
cookies=cookies, files=files, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class)
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError() from exc
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
if max_redirects and redirects >= max_redirects:
resp.close(force=True)
break
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if resp.status != 307:
method = hdrs.METH_GET
data = None
cookies = resp.cookies
r_url = (resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = urllib.parse.urlsplit(r_url)[0]
if scheme not in ('http', 'https', ''):
resp.close(force=True)
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = urllib.parse.urljoin(url, r_url)
url = urllib.parse.urldefrag(r_url)[0]
if url:
yield from asyncio.async(resp.release(), loop=self._loop)
continue
break
return resp
@asyncio.coroutine
def get(self, url, allow_redirects=True, **kwargs):
resp = yield from self.request('GET', url,
allow_redirects=allow_redirects,
**kwargs)
return resp
@asyncio.coroutine
def options(self, url, allow_redirects=True, **kwargs):
resp = yield from self.request('OPTIONS', url,
allow_redirects=allow_redirects,
**kwargs)
return resp
@asyncio.coroutine
def head(self, url, allow_redirects=False, **kwargs):
resp = yield from self.request('HEAD', url,
allow_redirects=allow_redirects,
**kwargs)
return resp
class ClientRequest:
GET_METHODS = {hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union(
{hdrs.METH_DELETE, hdrs.METH_TRACE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# Adding weakref to self for _writer cancelling doesn't make sense:
# _writer exists until .write_bytes coro is finished,
# .write_bytes generator has strong reference to self and `del request`
# doesn't produce request finalization.
# After .write_bytes is done _writer has set to None and we have nothing
# to cancel.
# Maybe we need to add .cancel() method to ClientRequest through for
# forced closing request sending.
def __init__(self, method, url, *,
params=None, headers=None, data=None, cookies=None,
files=None, auth=None, encoding='utf-8',
version=aiohttp.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None):
self.url = url
self.method = method.upper()
self.encoding = encoding
self.chunked = chunked
self.compress = compress
self.loop = loop
self.response_class = response_class or ClientResponse
self.update_version(version)
self.update_host(url)
self.update_path(params)
self.update_headers(headers)
self.update_cookies(cookies)
self.update_content_encoding()
self.update_auth(auth)
if files:
warnings.warn(
'files parameter is deprecated. use data instead',
DeprecationWarning)
if data:
raise ValueError(
'data and files parameters are '
'not supported at the same time.')
data = files
self.update_body_from_data(data)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
if not netloc:
raise ValueError('Host could not be detected.')
# check domain idna encoding
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise ValueError('URL has an invalid label.')
# basic auth info
if '@' in netloc:
authinfo, netloc = netloc.split('@', 1)
self.auth = helpers.BasicAuth(*authinfo.split(':', 1))
# Record entire netloc for usage in host header
self.netloc = netloc
# extract host and port
self.ssl = scheme == 'https'
if ':' in netloc:
netloc, port_s = netloc.split(':', 1)
try:
self.port = int(port_s)
except ValueError:
raise ValueError(
'Port number could not be converted.') from None
else:
if self.ssl:
self.port = HTTPS_PORT
else:
self.port = HTTP_PORT
self.scheme = scheme
self.host = netloc
def update_version(self, version):
"""Convert request version to two elements tuple.
parser http version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_path(self, params):
"""Build path."""
# extract path
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(self.url)
if not path:
path = '/'
if isinstance(params, dict):
params = list(params.items())
elif isinstance(params, (MultiDictProxy, MultiDict)):
params = list(params.items())
if params:
params = urllib.parse.urlencode(params)
if query:
query = '%s&%s' % (query, params)
else:
query = params
self.path = urllib.parse.urlunsplit(
('', '', urllib.parse.quote(path, safe='/%:'), query, fragment))
def update_headers(self, headers):
"""Update request headers."""
self.headers = MultiDict()
if headers:
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, (MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key.upper(), value)
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in self.headers:
self.headers[hdr] = val
# add host
if hdrs.HOST not in self.headers:
self.headers[hdrs.HOST] = self.netloc
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = http.cookies.SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(c, name, value)
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self):
"""Set request content encoding."""
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
self.compress = enc
self.chunked = True # enable chunked, no need to deal with length
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
warnings.warn(
'BasicAuth() tuple is required instead ', DeprecationWarning)
auth = helpers.BasicAuth(*auth)
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, data):
if not data:
return
if isinstance(data, str):
data = data.encode(self.encoding)
if isinstance(data, (bytes, bytearray)):
self.body = data
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
elif isinstance(data, (asyncio.StreamReader, streams.DataQueue)):
self.body = data
elif asyncio.iscoroutine(data):
self.body = data
if (hdrs.CONTENT_LENGTH not in self.headers and
self.chunked is None):
self.chunked = True
elif isinstance(data, io.IOBase):
assert not isinstance(data, io.StringIO), \
'attempt to send text data instead of binary'
self.body = data
self.chunked = True
if hasattr(data, 'mode'):
if data.mode == 'r':
raise ValueError('file {!r} should be open in binary mode'
''.format(data))
if (hdrs.CONTENT_TYPE not in self.headers and
hasattr(data, 'name')):
mime = mimetypes.guess_type(data.name)[0]
mime = 'application/octet-stream' if mime is None else mime
self.headers[hdrs.CONTENT_TYPE] = mime
elif isinstance(data, MultipartWriter):
self.body = data.serialize()
self.headers.update(data.headers)
self.chunked = self.chunked or 8192
else:
if not isinstance(data, helpers.FormData):
data = helpers.FormData(data)
self.body = data(self.encoding)
if hdrs.CONTENT_TYPE not in self.headers:
self.headers[hdrs.CONTENT_TYPE] = data.content_type
if data.is_multipart:
self.chunked = self.chunked or 8192
else:
if (hdrs.CONTENT_LENGTH not in self.headers and
not self.chunked):
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
if 'chunked' not in te:
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
self.chunked = self.chunked if type(self.chunked) is int else 8192
else:
if 'chunked' in te:
self.chunked = 8192
else:
self.chunked = None
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def write_bytes(self, request, reader):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from self._continue
try:
if asyncio.iscoroutine(self.body):
exc = None
value = None
stream = self.body
while True:
try:
if exc is not None:
result = stream.throw(exc)
else:
result = stream.send(value)
except StopIteration as exc:
if isinstance(exc.value, bytes):
yield from request.write(exc.value, drain=True)
break
except:
self.response.close(True)
raise
if isinstance(result, asyncio.Future):
exc = None
value = None
try:
value = yield result
except Exception as err:
exc = err
elif isinstance(result, (bytes, bytearray)):
yield from request.write(result, drain=True)
value = None
else:
raise ValueError(
'Bytes object is expected, got: %s.' %
type(result))
elif isinstance(self.body, asyncio.StreamReader):
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
while chunk:
yield from request.write(chunk, drain=True)
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
elif isinstance(self.body, streams.DataQueue):
while True:
try:
chunk = yield from self.body.read()
if chunk is EOF_MARKER:
break
yield from request.write(chunk, drain=True)
except streams.EofStream:
break
elif isinstance(self.body, io.IOBase):
chunk = self.body.read(self.chunked)
while chunk:
request.write(chunk)
chunk = self.body.read(self.chunked)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
request.write(chunk)
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
else:
try:
ret = request.write_eof()
# NB: in asyncio 3.4.1+ StreamWriter.drain() is coroutine
# see bug #170
if (asyncio.iscoroutine(ret) or
isinstance(ret, asyncio.Future)):
yield from ret
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
self._writer = None
def send(self, writer, reader):
request = aiohttp.Request(writer, self.method, self.path, self.version)
if self.compress:
request.add_compression_filter(self.compress)
if self.chunked is not None:
request.enable_chunked_encoding()
request.add_chunking_filter(self.chunked)
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
request.add_headers(
*((k, v)
for k, v in ((k, value)
for k, value in self.headers.items())))
request.send_headers()
self._writer = asyncio.async(
self.write_bytes(request, reader), loop=self.loop)
self.response = self.response_class(
self.method, self.url, self.host,
writer=self._writer, continue100=self._continue)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
class ClientResponse:
message = None # RawResponseMessage object
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
cookies = None # Response cookies (Set-Cookie)
content = None # Payload stream
connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_response_parser = aiohttp.HttpResponseParser()
_connection_wr = None # weakref to self for releasing connection on del
_writer_wr = None # weakref to self for cancelling writer on del
def __init__(self, method, url, host='', *, writer=None, continue100=None):
super().__init__()
self.method = method
self.url = url
self.host = host
self.headers = None
self._content = None
self._writer = writer
if writer is not None:
self._writer_wr = weakref.ref(self, lambda wr: writer.cancel())
self._continue = continue100
def __repr__(self):
out = io.StringIO()
print('<ClientResponse({}) [{} {}]>'.format(
self.url, self.status, self.reason), file=out)
print(self.headers, file=out)
return out.getvalue()
__str__ = __repr__
def waiting_for_continue(self):
return self._continue is not None
def _setup_connection(self, connection):
self._reader = connection.reader
self.connection = connection
self.content = self.flow_control_class(
connection.reader, loop=connection.loop)
msg = ('ClientResponse has to be closed explicitly! {}:{}:{}'
.format(self.method, self.host, self.url))
def _do_close_connection(wr, connection=connection, msg=msg):
warnings.warn(msg, ResourceWarning)
connection.close()
self._connection_wr = weakref.ref(self, _do_close_connection)
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._setup_connection(connection)
while True:
httpstream = self._reader.set_parser(self._response_parser)
# read response
self.message = yield from httpstream.read()
if self.message.code != 100:
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# response status
self.version = self.message.version
self.status = self.message.code
self.reason = self.message.reason
# headers
self.headers = CIMultiDictProxy(self.message.headers)
# payload
response_with_body = self.method.lower() != 'head'
self._reader.set_parser(
aiohttp.HttpPayloadParser(self.message,
readall=read_until_eof,
response_with_body=response_with_body),
self.content)
# cookies
self.cookies = http.cookies.SimpleCookie()
if hdrs.SET_COOKIE in self.headers:
for hdr in self.headers.getall(hdrs.SET_COOKIE):
try:
self.cookies.load(hdr)
except http.cookies.CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
connection.share_cookies(self.cookies)
return self
def close(self, force=False):
if self.connection is not None:
if self.content and not self.content.at_eof():
force = True
if force:
self.connection.close()
else:
self.connection.release()
if self._reader is not None:
self._reader.unset_parser()
self.connection = None
self._connection_wr = None
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
self._writer_wr = None
@asyncio.coroutine
def release(self):
try:
chunk = yield from self.content.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self.content.readany()
finally:
self.close()
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
self._writer_wr = None
self.close()
@asyncio.coroutine
def read(self, decode=False):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except:
self.close(True)
raise
else:
self.close()
data = self._content
if decode:
warnings.warn(
'.read(True) is deprecated. use .json() instead',
DeprecationWarning)
return (yield from self.json())
return data
@asyncio.coroutine
def read_and_close(self, decode=False):
"""Read response payload and then close response."""
warnings.warn(
'read_and_close is deprecated, use .read() instead',
DeprecationWarning)
return (yield from self.read(decode))
def _get_encoding(self, encoding):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mtype, stype, _, params = helpers.parse_mimetype(ctype)
if not encoding:
encoding = params.get('charset')
if not encoding:
encoding = chardet.detect(self._content)['encoding']
return encoding
@asyncio.coroutine
def text(self, encoding=None):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
return self._content.decode(self._get_encoding(encoding))
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads):
"""Reads and decodes JSON response."""
if self._content is None:
yield from self.read()
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if 'json' not in ctype:
client_logger.warning(
'Attempt to decode JSON with unexpected mimetype: %s', ctype)
if not self._content.strip():
return None
return loads(self._content.decode(self._get_encoding(encoding)))
|
# coding: utf-8
""" A script for making figures for our streams paper 2 """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import cPickle as pickle
import inspect
from collections import OrderedDict
# Third-party
import astropy.units as u
from astropy.constants import G
from astropy.io.misc import fnpickle, fnunpickle
import h5py
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib import rc_context, rcParams, cm
from matplotlib.patches import Rectangle, Ellipse, Circle
import scipy.optimize as so
from scipy.stats import norm
import triangle
from streams import usys
from streams.util import streamspath, _unit_transform, _label_map
from streams.coordinates.frame import galactocentric
from streams.io.sgr import SgrSimulation
from streams.io import read_hdf5, read_config
from streams.inference import StreamModel
from streams.integrate import LeapfrogIntegrator
from streams.potential.lm10 import LawMajewski2010
matplotlib.rc('xtick', labelsize=18)
matplotlib.rc('ytick', labelsize=18)
matplotlib.rc('axes', edgecolor='#444444', labelsize=24,
labelweight=400, linewidth=1.5)
matplotlib.rc('lines', markeredgewidth=0)
matplotlib.rc('font', family='Source Sans Pro')
# expr = "(tub!=0)"
expr = "(tub!=0) & (tub>1800) & (tub<5500)"
sgr_path = 'sgr_nfw/M2.5e+0{}'
snapfile = 'SNAP113'
# sgr_path = 'sgr_plummer/2.5e{}'
# snapfile = 'SNAP'
plot_path = os.path.join(streamspath, "plots/paper2/")
if not os.path.exists(plot_path):
os.mkdir(plot_path)
ext = 'png'
grid_figsize = (14,7.5)
def simulated_streams():
filename = os.path.join(plot_path, "simulated_streams.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
ticks = [-100,-50,0,50]
alphas = [0.2, 0.27, 0.34, 0.4]
rcparams = {'lines.linestyle' : 'none',
'lines.marker' : ','}
with rc_context(rc=rcparams):
for ii,_m in enumerate(range(6,9+1)):
alpha = alphas[ii]
mass = "2.5e{}".format(_m)
print(mass)
m = float(mass)
data_filename = os.path.join(streamspath, "data", "observed_particles",
"2.5e{}.hdf5".format(_m))
cfg_filename = os.path.join(streamspath, "config", "exp1_{}.yml".format(_m))
data = read_hdf5(data_filename)
true_particles = data["true_particles"].to_frame(galactocentric)
config = read_config(cfg_filename)
idx = config['particle_idx']
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles()
p_bound = sgr.particles(expr="tub==0")
axes[0,ii].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes[0,ii].transAxes)
axes[0,ii].plot(p["x"].value, p["y"].value,
alpha=alpha, rasterized=True, color='#555555')
axes[1,ii].plot(p["x"].value, p["z"].value,
alpha=alpha, rasterized=True, color='#555555')
if _m == 8:
axes[0,ii].plot(true_particles["x"].value[idx],
true_particles["y"].value[idx],
marker='+', markeredgewidth=1.5,
markersize=8, alpha=0.9, color='k')
axes[1,ii].plot(true_particles["x"].value[idx],
true_particles["z"].value[idx],
marker='+', markeredgewidth=1.5,
markersize=8, alpha=0.9, color='k')
axes[1,ii].set_xticks(ticks)
axes[1,ii].set_xlabel("$X$ [kpc]")
axes[0,0].set_ylabel("$Y$ [kpc]")
axes[1,0].set_ylabel("$Z$ [kpc]")
axes[0,0].set_yticks(ticks)
axes[1,0].set_yticks(ticks)
axes[-1,-1].set_xlim(-110,75)
axes[-1,-1].set_ylim(-110,75)
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename, dpi=200)
def potentials():
filename = os.path.join(plot_path, "potentials.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize)
base_params = dict(q1=1., qz=1., q2=1., phi=0.)
potentials = []
potentials.append(LawMajewski2010(**base_params))
pp = base_params.copy()
pp['q1'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,1].text(0.5, 1.05, r"$q_1=1.5$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,1].transAxes)
pp = base_params.copy()
pp['qz'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,2].text(0.5, 1.05, r"$q_z=1.5$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,2].transAxes)
pp = base_params.copy()
pp['phi'] = 45*u.degree
pp['q1'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,3].text(0.5, 1.05, r"$q_1=1.5$, $\phi=45^\circ$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,3].transAxes)
grid = np.linspace(-75, 75, 250)
X1, X2 = np.meshgrid(grid,grid)
# top row:
r = np.array([np.zeros_like(X1.ravel()).tolist() \
for xx in range(3)])
r[0] = X1.ravel()
r[1] = X2.ravel()
levels = None
for ii,potential in enumerate(potentials):
axes[0,ii].set_xticks([-50,0,50])
axes[0,ii].set_yticks([-50,0,50])
Z = potential._value_at(r.T).reshape(X1.shape)
if levels is None:
cs = axes[0,ii].contourf(X1, X2, Z, cmap=cm.Blues_r)
levels = cs.levels
else:
cs = axes[0,ii].contourf(X1, X2, Z, cmap=cm.Blues_r, levels=levels)
if ii > 0:
axes[0,ii].set_yticklabels([])
axes[0,ii].set_xticklabels([])
axes[0,ii].set_aspect('equal', 'box')
# bottom row:
r = np.array([np.zeros_like(X1.ravel()).tolist() \
for xx in range(3)])
r[0] = X1.ravel()
r[2] = X2.ravel()
for ii,potential in enumerate(potentials):
axes[1,ii].set_xticks([-50,0,50])
axes[1,ii].set_yticks([-50,0,50])
Z = potential._value_at(r.T).reshape(X1.shape)
if levels is None:
cs = axes[1,ii].contourf(X1, X2, Z, cmap=cm.Blues_r)
levels = cs.levels
else:
cs = axes[1,ii].contourf(X1, X2, Z, cmap=cm.Blues_r, levels=levels)
if ii > 0:
axes[1,ii].set_yticklabels([])
axes[1,ii].set_aspect('equal', 'box')
axes[1,ii].set_xlabel("$X$ [kpc]")
axes[0,0].set_ylabel("$Y$ [kpc]")
axes[1,0].set_ylabel("$Z$ [kpc]")
fig.tight_layout(pad=1.5, h_pad=0.)
fig.savefig(filename)
def Lpts():
potential = LawMajewski2010()
filename = os.path.join(plot_path, "Lpts_r.{}".format(ext))
filename2 = os.path.join(plot_path, "Lpts_v.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
fig2,axes2 = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
bins = np.linspace(-3,3,50)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
X = np.vstack((s._X[...,:3], p._X[...,:3].copy()))
V = np.vstack((s._X[...,3:], p._X[...,3:].copy()))
integrator = LeapfrogIntegrator(sgr.potential._acceleration_at,
np.array(X), np.array(V),
args=(X.shape[0], np.zeros_like(X)))
ts, rs, vs = integrator.run(t1=sgr.t1, t2=sgr.t2, dt=-1.)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
m_t = (-s.mdot*ts + s.m0)[:,np.newaxis]
s_R = np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))
s_V = np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))
r_tide = sgr.potential._tidal_radius(m_t, s_orbit[...,:3])
v_disp = s_V * r_tide / s_R
# cartesian basis to project into
x1_hat = s_orbit[...,:3] / np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))[...,np.newaxis]
_x2_hat = s_orbit[...,3:] / np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))[...,np.newaxis]
_x3_hat = np.cross(x1_hat, _x2_hat)
_x2_hat = -np.cross(x1_hat, _x3_hat)
x2_hat = _x2_hat / np.linalg.norm(_x2_hat, axis=-1)[...,np.newaxis]
x3_hat = _x3_hat / np.linalg.norm(_x3_hat, axis=-1)[...,np.newaxis]
# translate to satellite position
rel_orbits = p_orbits - s_orbit
rel_pos = rel_orbits[...,:3]
rel_vel = rel_orbits[...,3:]
# project onto each
x1 = np.sum(rel_pos * x1_hat, axis=-1)
x2 = np.sum(rel_pos * x2_hat, axis=-1)
x3 = np.sum(rel_pos * x3_hat, axis=-1)
vx1 = np.sum(rel_vel * x1_hat, axis=-1)
vx2 = np.sum(rel_vel * x2_hat, axis=-1)
vx3 = np.sum(rel_vel * x3_hat, axis=-1)
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes[0,k].plot(x1[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
x2[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[1,k].plot(x1[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
x3[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[1,k].add_patch(circ)
axes[0,k].axhline(0., color='k', alpha=0.75)
axes[1,k].axhline(0., color='k', alpha=0.75)
axes[0,k].set_xlim(-5,5)
axes[0,k].set_ylim(axes[0,k].get_xlim())
axes[1,k].set_xlabel(r"$x_1/r_{\rm tide}$")
if k == 0:
axes[0,k].set_ylabel(r"$x_2/r_{\rm tide}$")
axes[1,k].set_ylabel(r"$x_3/r_{\rm tide}$")
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes2[0,k].plot(vx1[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
vx2[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes2[1,k].plot(vx1[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
vx3[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[1,k].add_patch(circ)
axes2[0,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].set_xlim(-5,5)
axes2[1,k].set_ylim(axes2[1,k].get_xlim())
axes2[1,k].set_xlabel(r"$v_{x_1}/\sigma_v$")
if k == 0:
axes2[0,k].set_ylabel(r"$v_{x_2}/\sigma_v$")
axes2[1,k].set_ylabel(r"$v_{x_3}/\sigma_v$")
axes[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes[0,k].transAxes)
axes2[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes2[0,k].transAxes)
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename)
fig2.tight_layout()
fig2.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig2.savefig(filename2)
def phasespace():
filename = os.path.join(plot_path, "Lpts_rv.png")
fig,axes = plt.subplots(3,4,figsize=(14,12),
sharex=True, sharey=True)
bins = np.linspace(-3,3,50)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
X = np.vstack((s._X[...,:3], p._X[...,:3].copy()))
V = np.vstack((s._X[...,3:], p._X[...,3:].copy()))
integrator = LeapfrogIntegrator(sgr.potential._acceleration_at,
np.array(X), np.array(V),
args=(X.shape[0], np.zeros_like(X)))
ts, rs, vs = integrator.run(t1=sgr.t1, t2=sgr.t2, dt=-1.)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
m_t = (-s.mdot*ts + s.m0)[:,np.newaxis]
s_R = np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))
s_V = np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))
r_tide = sgr.potential._tidal_radius(m_t, s_orbit[...,:3])
v_disp = s_V * r_tide / s_R
# cartesian basis to project into
x_hat = s_orbit[...,:3] / np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))[...,np.newaxis]
_y_hat = s_orbit[...,3:] / np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))[...,np.newaxis]
z_hat = np.cross(x_hat, _y_hat)
y_hat = -np.cross(x_hat, z_hat)
# translate to satellite position
rel_orbits = p_orbits - s_orbit
rel_pos = rel_orbits[...,:3]
rel_vel = rel_orbits[...,3:]
# project onto each
X = np.sum(rel_pos * x_hat, axis=-1)
Y = np.sum(rel_pos * y_hat, axis=-1)
Z = np.sum(rel_pos * z_hat, axis=-1)
VX = np.sum(rel_vel * x_hat, axis=-1)
VY = np.sum(rel_vel * y_hat, axis=-1)
VZ = np.sum(rel_vel * z_hat, axis=-1)
VV = np.sqrt(VX**2+VY**2+VZ**2)
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes[0,k].plot(X[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VX[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[1,k].plot(Y[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VY[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[2,k].plot(Z[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VZ[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[0,k].set_xlim(-5,5)
axes[0,k].set_ylim(axes[0,k].get_xlim())
# axes[1,k].set_xlabel(r"$x_1$")
# if k == 0:
# axes[0,k].set_ylabel(r"$x_2$")
# axes[1,k].set_ylabel(r"$x_3$")
axes[0,k].set_title(r"$2.5\times10^{}M_\odot$".format(_m))
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename)
def total_rv():
filenamer = os.path.join(plot_path, "rel_r.png")
filenamev = os.path.join(plot_path, "rel_v.png")
figr,axesr = plt.subplots(4,1,figsize=(10,14),
sharex=True)
figv,axesv = plt.subplots(4,1,figsize=(10,14),
sharex=True)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
X = np.vstack((s._X[...,:3], p._X[...,:3].copy()))
V = np.vstack((s._X[...,3:], p._X[...,3:].copy()))
integrator = LeapfrogIntegrator(sgr.potential._acceleration_at,
np.array(X), np.array(V),
args=(X.shape[0], np.zeros_like(X)))
ts, rs, vs = integrator.run(t1=sgr.t1, t2=sgr.t2, dt=-1.)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
m_t = (-s.mdot*ts + s.m0)[:,np.newaxis]
s_R = np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))
s_V = np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))
r_tide = sgr.potential._tidal_radius(m_t, s_orbit[...,:3])
v_disp = s_V * r_tide / s_R
# cartesian basis to project into
x_hat = s_orbit[...,:3] / np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))[...,np.newaxis]
_y_hat = s_orbit[...,3:] / np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))[...,np.newaxis]
z_hat = np.cross(x_hat, _y_hat)
y_hat = -np.cross(x_hat, z_hat)
# translate to satellite position
rel_orbits = p_orbits - s_orbit
rel_pos = rel_orbits[...,:3]
rel_vel = rel_orbits[...,3:]
# project onto each
X = np.sum(rel_pos * x_hat, axis=-1)
Y = np.sum(rel_pos * y_hat, axis=-1)
Z = np.sum(rel_pos * z_hat, axis=-1)
RR = np.sqrt(X**2 + Y**2 + Z**2)
VX = np.sum(rel_vel * x_hat, axis=-1)
VY = np.sum(rel_vel * y_hat, axis=-1)
VZ = np.sum(rel_vel * z_hat, axis=-1)
VV = (np.sqrt(VX**2 + VY**2 + VZ**2)*u.kpc/u.Myr).to(u.km/u.s).value
v_disp = (v_disp*u.kpc/u.Myr).to(u.km/u.s).value
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axesr[k].plot(ts[ix1:ix2],
RR[ix1:ix2,ii],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axesv[k].plot(ts[ix1:ix2],
VV[ix1:ix2,ii],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axesr[k].plot(ts, r_tide*2., marker=None)
axesr[k].set_xlim(ts.min(), ts.max())
axesv[k].set_xlim(ts.min(), ts.max())
axesr[k].set_ylim(0,max(r_tide)*7)
axesv[k].set_ylim(0,max(v_disp)*7)
# axes[1,k].set_xlabel(r"$x_1$")
# if k == 0:
# axes[0,k].set_ylabel(r"$x_2$")
# axes[1,k].set_ylabel(r"$x_3$")
axesr[k].text(3000, max(r_tide)*5, r"$2.5\times10^{}M_\odot$".format(_m))
axesv[k].text(3000, max(v_disp)*5, r"$2.5\times10^{}M_\odot$".format(_m))
axesr[-1].set_xlabel("time [Myr]")
axesv[-1].set_xlabel("time [Myr]")
figr.suptitle("Relative distance", fontsize=26)
figr.tight_layout()
figr.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
figr.savefig(filenamer)
figv.suptitle("Relative velocity", fontsize=26)
figv.tight_layout()
figv.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
figv.savefig(filenamev)
def trace_plots():
cfg_filename = os.path.join(streamspath, "config", "exp1_8.yml")
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot", "exper1_8", "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
flatchain = f["flatchain"].value
acceptance_fraction = f["acceptance_fraction"].value
p0 = f["p0"].value
acor = f["acor"].value
labels = ["$q_1$", "$q_z$", r"$\phi$", "$v_h$", r"$\alpha$"]
bounds = [(1.3,1.5),(1.25,1.45),(85,115),(118,132),(0.5,2.5)]
ticks = [(1.35,1.4,1.45),(1.3,1.35,1.4),(90,100,110),(120,125,130),(1.,1.5,2.)]
# plot individual walkers
fig,axes = plt.subplots(5,1,figsize=(8.5,11),sharex=True)
k = 0
for gname,group in model.parameters.items():
for pname,p in group.items():
thischain = _unit_transform[pname](chain[...,k])
for ii in range(config['walkers']):
axes.flat[k].plot(thischain[ii,5000:],
alpha=0.1, marker=None,
drawstyle='steps', color='k')
#axes.flat[k].set_ylabel(labels[k], rotation='horizontal')
axes[k].text(-0.02, 0.5, labels[k],
horizontalalignment='right',
fontsize=22,
transform=axes[k].transAxes)
if pname == "phi":
axes[k].text(1.07, 0.475, "deg",
horizontalalignment='left',
fontsize=18,
transform=axes[k].transAxes)
elif pname == "v_halo":
axes[k].text(1.07, 0.475, "km/s",
horizontalalignment='left',
fontsize=18,
transform=axes[k].transAxes)
axes[k].text(0.25, 0.1, r"$t_{\rm acor}$=" + "{}".format(int(acor[k])),
horizontalalignment='right',
fontsize=18,
transform=axes[k].transAxes)
axes.flat[k].set_yticks(ticks[k])
axes.flat[k].set_ylim(bounds[k])
axes.flat[k].yaxis.tick_right()
#axes.flat[k].yaxis.set_label_position("right")
k += 1
axes.flat[-1].set_xlabel("Step number")
fig.tight_layout()
fig.subplots_adjust(hspace=0.04, left=0.14, right=0.86)
fig.savefig(os.path.join(plot_path, "mcmc_trace.{}".format(ext)))
def exp1_posterior():
cfg_filename = os.path.join(streamspath, "config", "exp1_8.yml")
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot", "exper1_8", "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
#flatchain = f["flatchain"].value
acceptance_fraction = f["acceptance_fraction"].value
p0 = f["p0"].value
acor = f["acor"].value
_flatchain = np.vstack(chain[:,2500::int(np.median(acor))])
flatchain = np.zeros_like(_flatchain)
params = OrderedDict(model.parameters['potential'].items() + \
model.parameters['satellite'].items())
labels = ["$q_1$", "$q_z$", r"$\phi$ [deg]", "$v_h$ [km/s]", r"$\alpha$"]
truths = []
bounds = []
for ii,p in enumerate(params.values()):
if p.name == 'alpha':
truths.append(np.nan)
bounds.append((1., 2.0))
flatchain[:,ii] = _unit_transform[p.name](_flatchain[:,ii])
continue
truth = _unit_transform[p.name](p.truth)
truths.append(truth)
bounds.append((0.95*truth, 1.05*truth))
flatchain[:,ii] = _unit_transform[p.name](_flatchain[:,ii])
fig = triangle.corner(flatchain, plot_datapoints=False,
truths=truths, extents=bounds, labels=labels)
fig.savefig(os.path.join(plot_path, "exp1_posterior.png"))
def exp_posteriors(exp_num, slicey=-5000):
cfg_filename = os.path.join(streamspath, "config", "exp{}.yml".format(exp_num))
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot",
"exper{}".format(exp_num), "inference.hdf5")
# hdf5_filename = os.path.join(streamspath, "plots", "infer_potential",
# "exper{}".format(exp_num), "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
acceptance_fraction = f["acceptance_fraction"].value
_p0 = f["p0"].value
acor = f["acor"].value
print("median acor: {}".format(int(np.median(acor))))
if slicey is None:
slicey = slice(-5000,None,int(np.median(acor)))
else:
slicey = slice(slicey,None,int(np.median(acor)))
_flatchain = np.vstack(chain[acceptance_fraction>0.03,slicey])
d = model.label_flatchain(_flatchain)
p0 = model.label_flatchain(_p0)
# Potential
this_flatchain = np.zeros((_flatchain.shape[0], len(d["potential"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
for ii,pname in enumerate(d["potential"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](np.squeeze(d["potential"][pname]))
this_p0[:,ii] = _unit_transform[pname](np.squeeze(p0["potential"][pname]))
p = model.parameters["potential"][pname]
truth = _unit_transform[pname](p.truth)
truths.append(truth)
bounds.append((0.75*truth, 1.25*truth))
labels.append(_label_map[pname])
q16,q50,q84 = np.array(np.percentile(this_flatchain, [16, 50, 84], axis=0))
q_m, q_p = q50-q16, q84-q50
for ii,pname in enumerate(d["potential"].keys()):
print("{} \n\t truth={:.2f}\n\t measured={:.2f}+{:.2f}-{:.2f}"\
.format(pname,truths[ii],q50[ii],q_p[ii],q_m[ii]))
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, extents=bounds, labels=labels)
fig.savefig(os.path.join(plot_path, "exp{}_potential.{}".format(exp_num, ext)))
# Particle
p_idx = 5
this_flatchain = np.zeros((_flatchain.shape[0], len(d["particles"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
for ii,pname in enumerate(d["particles"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](d["particles"][pname][:,p_idx])
this_p0[:,ii] = _unit_transform[pname](p0["particles"][pname][:,p_idx])
p = model.parameters["particles"][pname]
truth = _unit_transform[pname](p.truth[p_idx])
truths.append(truth)
if pname == "tub":
bounds.append((model.lnpargs[1], model.lnpargs[0]))
else:
sig = model.particles.errors[pname].value[p_idx]
mu = model.particles[pname].value[p_idx]
bounds.append((mu-3*sig, mu+3*sig))
labels.append(_label_map[pname])
# HACK
bounds = [(32.5,41), (1.75,2.4), (-1.45,-1.0), (-85,-40), bounds[-1]]
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, labels=labels, extents=bounds)
fig.savefig(os.path.join(plot_path, "exp{}_particle.{}".format(exp_num, ext)))
# Satellite
this_flatchain = np.zeros((_flatchain.shape[0], len(d["satellite"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
#for ii,pname in enumerate(keys):
for ii,pname in enumerate(d["satellite"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](d["satellite"][pname][:,0])
this_p0[:,ii] = _unit_transform[pname](p0["satellite"][pname][:,0])
p = model.parameters["satellite"][pname]
truth = _unit_transform[pname](p.truth)
if pname == "alpha":
bounds.append((1., 2.5))
truths.append(np.nan)
else:
truths.append(truth)
sig = model.satellite.errors[pname].value[0]
mu = model.satellite[pname].value[0]
bounds.append((mu-3*sig, mu+3*sig))
labels.append(_label_map[pname])
# HACK
bounds = [(28,33), (-2.1,-1.6), (1.3,1.8), (125,185), bounds[-1]]
if len(d["satellite"]) > len(bounds):
bounds = [(0,10), (-20,5)] + bounds
#bounds = None
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, labels=labels, extents=bounds)
fig.savefig(os.path.join(plot_path, "exp{}_satellite.{}".format(exp_num, ext)))
def exp2_posteriors():
exp_posteriors(2, slicey=-10000)
def exp3_posteriors():
exp_posteriors(3, slicey=-15000)
def exp4_posteriors():
exp_posteriors(4, slicey=-15000)
if __name__ == '__main__':
from argparse import ArgumentParser
import logging
# Create logger
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(name)s / %(levelname)s / %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-l", "--list", action="store_true", dest="list",
default=False, help="List all functions")
parser.add_argument("-o", "--overwrite", action="store_true",
dest="overwrite", default=False,
help="Overwrite existing files.")
parser.add_argument("-f", "--function", dest="function", type=str,
help="The name of the function to execute.")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
def _print_funcs():
fs = inspect.getmembers(sys.modules[__name__],
lambda member: inspect.isfunction(member) and member.__module__ == __name__ and not member.__name__.startswith("_"))
print("\n".join([f[0] for f in fs]))
if args.list:
print("="*79)
_print_funcs()
print("="*79)
sys.exit(0)
if args.function is None:
print ("You must specify a function name! Use -l to get the list "
"of functions.")
sys.exit(1)
func = getattr(sys.modules[__name__], args.__dict__.get("function"))
func()
change test
# coding: utf-8
""" A script for making figures for our streams paper 2 """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import cPickle as pickle
import inspect
from collections import OrderedDict
# Third-party
import astropy.units as u
from astropy.constants import G
from astropy.io.misc import fnpickle, fnunpickle
import h5py
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib import rc_context, rcParams, cm
from matplotlib.patches import Rectangle, Ellipse, Circle
import scipy.optimize as so
from scipy.stats import norm
import triangle
from streams import usys
from streams.util import streamspath, _unit_transform, _label_map
from streams.coordinates.frame import galactocentric
from streams.io.sgr import SgrSimulation
from streams.io import read_hdf5, read_config
from streams.inference import StreamModel, particles_x1x2x3
from streams.integrate import LeapfrogIntegrator
from streams.potential.lm10 import LawMajewski2010
matplotlib.rc('xtick', labelsize=18)
matplotlib.rc('ytick', labelsize=18)
matplotlib.rc('axes', edgecolor='#444444', labelsize=24,
labelweight=400, linewidth=1.5)
matplotlib.rc('lines', markeredgewidth=0)
matplotlib.rc('font', family='Source Sans Pro')
# expr = "(tub!=0)"
expr = "(tub!=0) & (tub>1800) & (tub<5500)"
sgr_path = 'sgr_nfw/M2.5e+0{}'
snapfile = 'SNAP113'
# sgr_path = 'sgr_plummer/2.5e{}'
# snapfile = 'SNAP'
plot_path = os.path.join(streamspath, "plots/paper2/")
if not os.path.exists(plot_path):
os.mkdir(plot_path)
ext = 'png'
grid_figsize = (14,7.5)
def simulated_streams():
filename = os.path.join(plot_path, "simulated_streams.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
ticks = [-100,-50,0,50]
alphas = [0.2, 0.27, 0.34, 0.4]
rcparams = {'lines.linestyle' : 'none',
'lines.marker' : ','}
with rc_context(rc=rcparams):
for ii,_m in enumerate(range(6,9+1)):
alpha = alphas[ii]
mass = "2.5e{}".format(_m)
print(mass)
m = float(mass)
data_filename = os.path.join(streamspath, "data", "observed_particles",
"2.5e{}.hdf5".format(_m))
cfg_filename = os.path.join(streamspath, "config", "exp1_{}.yml".format(_m))
data = read_hdf5(data_filename)
true_particles = data["true_particles"].to_frame(galactocentric)
config = read_config(cfg_filename)
idx = config['particle_idx']
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles()
p_bound = sgr.particles(expr="tub==0")
axes[0,ii].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes[0,ii].transAxes)
axes[0,ii].plot(p["x"].value, p["y"].value,
alpha=alpha, rasterized=True, color='#555555')
axes[1,ii].plot(p["x"].value, p["z"].value,
alpha=alpha, rasterized=True, color='#555555')
if _m == 8:
axes[0,ii].plot(true_particles["x"].value[idx],
true_particles["y"].value[idx],
marker='+', markeredgewidth=1.5,
markersize=8, alpha=0.9, color='k')
axes[1,ii].plot(true_particles["x"].value[idx],
true_particles["z"].value[idx],
marker='+', markeredgewidth=1.5,
markersize=8, alpha=0.9, color='k')
axes[1,ii].set_xticks(ticks)
axes[1,ii].set_xlabel("$X$ [kpc]")
axes[0,0].set_ylabel("$Y$ [kpc]")
axes[1,0].set_ylabel("$Z$ [kpc]")
axes[0,0].set_yticks(ticks)
axes[1,0].set_yticks(ticks)
axes[-1,-1].set_xlim(-110,75)
axes[-1,-1].set_ylim(-110,75)
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename, dpi=200)
def potentials():
filename = os.path.join(plot_path, "potentials.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize)
base_params = dict(q1=1., qz=1., q2=1., phi=0.)
potentials = []
potentials.append(LawMajewski2010(**base_params))
pp = base_params.copy()
pp['q1'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,1].text(0.5, 1.05, r"$q_1=1.5$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,1].transAxes)
pp = base_params.copy()
pp['qz'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,2].text(0.5, 1.05, r"$q_z=1.5$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,2].transAxes)
pp = base_params.copy()
pp['phi'] = 45*u.degree
pp['q1'] = 1.5
potentials.append(LawMajewski2010(**pp))
axes[0,3].text(0.5, 1.05, r"$q_1=1.5$, $\phi=45^\circ$",
horizontalalignment='center',
fontsize=20,
transform=axes[0,3].transAxes)
grid = np.linspace(-75, 75, 250)
X1, X2 = np.meshgrid(grid,grid)
# top row:
r = np.array([np.zeros_like(X1.ravel()).tolist() \
for xx in range(3)])
r[0] = X1.ravel()
r[1] = X2.ravel()
levels = None
for ii,potential in enumerate(potentials):
axes[0,ii].set_xticks([-50,0,50])
axes[0,ii].set_yticks([-50,0,50])
Z = potential._value_at(r.T).reshape(X1.shape)
if levels is None:
cs = axes[0,ii].contourf(X1, X2, Z, cmap=cm.Blues_r)
levels = cs.levels
else:
cs = axes[0,ii].contourf(X1, X2, Z, cmap=cm.Blues_r, levels=levels)
if ii > 0:
axes[0,ii].set_yticklabels([])
axes[0,ii].set_xticklabels([])
axes[0,ii].set_aspect('equal', 'box')
# bottom row:
r = np.array([np.zeros_like(X1.ravel()).tolist() \
for xx in range(3)])
r[0] = X1.ravel()
r[2] = X2.ravel()
for ii,potential in enumerate(potentials):
axes[1,ii].set_xticks([-50,0,50])
axes[1,ii].set_yticks([-50,0,50])
Z = potential._value_at(r.T).reshape(X1.shape)
if levels is None:
cs = axes[1,ii].contourf(X1, X2, Z, cmap=cm.Blues_r)
levels = cs.levels
else:
cs = axes[1,ii].contourf(X1, X2, Z, cmap=cm.Blues_r, levels=levels)
if ii > 0:
axes[1,ii].set_yticklabels([])
axes[1,ii].set_aspect('equal', 'box')
axes[1,ii].set_xlabel("$X$ [kpc]")
axes[0,0].set_ylabel("$Y$ [kpc]")
axes[1,0].set_ylabel("$Z$ [kpc]")
fig.tight_layout(pad=1.5, h_pad=0.)
fig.savefig(filename)
def Lpts():
potential = LawMajewski2010()
filename = os.path.join(plot_path, "Lpts_r.{}".format(ext))
filename2 = os.path.join(plot_path, "Lpts_v.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
fig2,axes2 = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
bins = np.linspace(-3,3,50)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
dt = -1.
coord, r_tide, v_disp = particles_x1x2x3(p, s,
sgr.potential,
sgr.t1, sgr.t2, dt,
at_tub=False)
(x1,x2,x3,vx1,vx2,vx3) = coord
ts = np.arange(sgr.t1,sgr.t2+dt,dt)
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes[0,k].plot(x1[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
x2[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[1,k].plot(x1[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
x3[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[1,k].add_patch(circ)
axes[0,k].axhline(0., color='k', alpha=0.75)
axes[1,k].axhline(0., color='k', alpha=0.75)
axes[0,k].set_xlim(-5,5)
axes[0,k].set_ylim(axes[0,k].get_xlim())
axes[1,k].set_xlabel(r"$x_1/r_{\rm tide}$")
if k == 0:
axes[0,k].set_ylabel(r"$x_2/r_{\rm tide}$")
axes[1,k].set_ylabel(r"$x_3/r_{\rm tide}$")
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes2[0,k].plot(vx1[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
vx2[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes2[1,k].plot(vx1[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
vx3[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[1,k].add_patch(circ)
axes2[0,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].set_xlim(-5,5)
axes2[1,k].set_ylim(axes2[1,k].get_xlim())
axes2[1,k].set_xlabel(r"$v_{x_1}/\sigma_v$")
if k == 0:
axes2[0,k].set_ylabel(r"$v_{x_2}/\sigma_v$")
axes2[1,k].set_ylabel(r"$v_{x_3}/\sigma_v$")
axes[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes[0,k].transAxes)
axes2[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes2[0,k].transAxes)
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename)
fig2.tight_layout()
fig2.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig2.savefig(filename2)
def Lpts_tub():
potential = LawMajewski2010()
filename = os.path.join(plot_path, "Lpts_r_tub.{}".format(ext))
filename2 = os.path.join(plot_path, "Lpts_v_tub.{}".format(ext))
fig,axes = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
fig2,axes2 = plt.subplots(2,4,figsize=grid_figsize,
sharex=True, sharey=True)
bins = np.linspace(-3,3,50)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
coord, r_tide, v_disp = particles_x1x2x3(p, s,
sgr.potential,
sgr.t1, sgr.t2, -1.,
at_tub=True)
(x1,x2,x3,vx1,vx2,vx3) = coord
axes[0,k].plot(x1/r_tide,
x2/r_tide,
linestyle='none', alpha=0.1, marker='.',
color='#555555', zorder=-1)
axes[1,k].plot(x1/r_tide,
x3/r_tide,
linestyle='none', alpha=0.1, marker='.',
color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes[1,k].add_patch(circ)
axes[0,k].axhline(0., color='k', alpha=0.75)
axes[1,k].axhline(0., color='k', alpha=0.75)
axes[0,k].set_xlim(-5,5)
axes[0,k].set_ylim(axes[0,k].get_xlim())
axes[1,k].set_xlabel(r"$x_1/r_{\rm tide}$")
if k == 0:
axes[0,k].set_ylabel(r"$x_2/r_{\rm tide}$")
axes[1,k].set_ylabel(r"$x_3/r_{\rm tide}$")
axes2[0,k].plot(vx1/v_disp,
vx2/v_disp,
linestyle='none', alpha=0.1, marker='.',
color='#555555', zorder=-1)
axes2[1,k].plot(vx1/v_disp,
vx3/v_disp,
linestyle='none', alpha=0.1, marker='.',
color='#555555', zorder=-1)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[0,k].add_patch(circ)
circ = Circle((0,0), radius=1., fill=False, alpha=0.75,
edgecolor='k', linestyle='solid')
axes2[1,k].add_patch(circ)
axes2[0,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].axhline(0., color='k', alpha=0.75)
axes2[1,k].set_xlim(-5,5)
axes2[1,k].set_ylim(axes2[1,k].get_xlim())
axes2[1,k].set_xlabel(r"$v_{x_1}/\sigma_v$")
if k == 0:
axes2[0,k].set_ylabel(r"$v_{x_2}/\sigma_v$")
axes2[1,k].set_ylabel(r"$v_{x_3}/\sigma_v$")
axes[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes[0,k].transAxes)
axes2[0,k].text(0.5, 1.05, r"$2.5\times10^{}M_\odot$".format(_m),
horizontalalignment='center',
fontsize=24,
transform=axes2[0,k].transAxes)
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename)
fig2.tight_layout()
fig2.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig2.savefig(filename2)
def phasespace():
filename = os.path.join(plot_path, "Lpts_rv.png")
fig,axes = plt.subplots(3,4,figsize=(14,12),
sharex=True, sharey=True)
bins = np.linspace(-3,3,50)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
X = np.vstack((s._X[...,:3], p._X[...,:3].copy()))
V = np.vstack((s._X[...,3:], p._X[...,3:].copy()))
integrator = LeapfrogIntegrator(sgr.potential._acceleration_at,
np.array(X), np.array(V),
args=(X.shape[0], np.zeros_like(X)))
ts, rs, vs = integrator.run(t1=sgr.t1, t2=sgr.t2, dt=-1.)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
m_t = (-s.mdot*ts + s.m0)[:,np.newaxis]
s_R = np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))
s_V = np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))
r_tide = sgr.potential._tidal_radius(m_t, s_orbit[...,:3])
v_disp = s_V * r_tide / s_R
# cartesian basis to project into
x_hat = s_orbit[...,:3] / np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))[...,np.newaxis]
_y_hat = s_orbit[...,3:] / np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))[...,np.newaxis]
z_hat = np.cross(x_hat, _y_hat)
y_hat = -np.cross(x_hat, z_hat)
# translate to satellite position
rel_orbits = p_orbits - s_orbit
rel_pos = rel_orbits[...,:3]
rel_vel = rel_orbits[...,3:]
# project onto each
X = np.sum(rel_pos * x_hat, axis=-1)
Y = np.sum(rel_pos * y_hat, axis=-1)
Z = np.sum(rel_pos * z_hat, axis=-1)
VX = np.sum(rel_vel * x_hat, axis=-1)
VY = np.sum(rel_vel * y_hat, axis=-1)
VZ = np.sum(rel_vel * z_hat, axis=-1)
VV = np.sqrt(VX**2+VY**2+VZ**2)
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axes[0,k].plot(X[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VX[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[1,k].plot(Y[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VY[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[2,k].plot(Z[jj-bnd:jj+bnd,ii]/r_tide[jj-bnd:jj+bnd,0],
VZ[jj-bnd:jj+bnd,ii]/v_disp[jj-bnd:jj+bnd,0],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axes[0,k].set_xlim(-5,5)
axes[0,k].set_ylim(axes[0,k].get_xlim())
# axes[1,k].set_xlabel(r"$x_1$")
# if k == 0:
# axes[0,k].set_ylabel(r"$x_2$")
# axes[1,k].set_ylabel(r"$x_3$")
axes[0,k].set_title(r"$2.5\times10^{}M_\odot$".format(_m))
fig.tight_layout()
fig.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
fig.savefig(filename)
def total_rv():
filenamer = os.path.join(plot_path, "rel_r.png")
filenamev = os.path.join(plot_path, "rel_v.png")
figr,axesr = plt.subplots(4,1,figsize=(10,14),
sharex=True)
figv,axesv = plt.subplots(4,1,figsize=(10,14),
sharex=True)
nparticles = 2000
for k,_m in enumerate(range(6,9+1)):
mass = "2.5e{}".format(_m)
m = float(mass)
print(mass)
sgr = SgrSimulation(sgr_path.format(_m),snapfile)
p = sgr.particles(n=nparticles, expr=expr)
s = sgr.satellite()
X = np.vstack((s._X[...,:3], p._X[...,:3].copy()))
V = np.vstack((s._X[...,3:], p._X[...,3:].copy()))
integrator = LeapfrogIntegrator(sgr.potential._acceleration_at,
np.array(X), np.array(V),
args=(X.shape[0], np.zeros_like(X)))
ts, rs, vs = integrator.run(t1=sgr.t1, t2=sgr.t2, dt=-1.)
s_orbit = np.vstack((rs[:,0][:,np.newaxis].T, vs[:,0][:,np.newaxis].T)).T
p_orbits = np.vstack((rs[:,1:].T, vs[:,1:].T)).T
t_idx = np.array([np.argmin(np.fabs(ts - t)) for t in p.tub])
m_t = (-s.mdot*ts + s.m0)[:,np.newaxis]
s_R = np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))
s_V = np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))
r_tide = sgr.potential._tidal_radius(m_t, s_orbit[...,:3])
v_disp = s_V * r_tide / s_R
# cartesian basis to project into
x_hat = s_orbit[...,:3] / np.sqrt(np.sum(s_orbit[...,:3]**2, axis=-1))[...,np.newaxis]
_y_hat = s_orbit[...,3:] / np.sqrt(np.sum(s_orbit[...,3:]**2, axis=-1))[...,np.newaxis]
z_hat = np.cross(x_hat, _y_hat)
y_hat = -np.cross(x_hat, z_hat)
# translate to satellite position
rel_orbits = p_orbits - s_orbit
rel_pos = rel_orbits[...,:3]
rel_vel = rel_orbits[...,3:]
# project onto each
X = np.sum(rel_pos * x_hat, axis=-1)
Y = np.sum(rel_pos * y_hat, axis=-1)
Z = np.sum(rel_pos * z_hat, axis=-1)
RR = np.sqrt(X**2 + Y**2 + Z**2)
VX = np.sum(rel_vel * x_hat, axis=-1)
VY = np.sum(rel_vel * y_hat, axis=-1)
VZ = np.sum(rel_vel * z_hat, axis=-1)
VV = (np.sqrt(VX**2 + VY**2 + VZ**2)*u.kpc/u.Myr).to(u.km/u.s).value
v_disp = (v_disp*u.kpc/u.Myr).to(u.km/u.s).value
_tcross = r_tide / np.sqrt(G.decompose(usys).value*m/r_tide)
for ii,jj in enumerate(t_idx):
#tcross = r_tide[jj,0] / _v[jj,ii]
tcross = _tcross[jj]
bnd = int(tcross / 2)
ix1,ix2 = jj-bnd, jj+bnd
if ix1 < 0: ix1 = 0
if ix2 > max(sgr.t1,sgr.t2): ix2 = -1
axesr[k].plot(ts[ix1:ix2],
RR[ix1:ix2,ii],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axesv[k].plot(ts[ix1:ix2],
VV[ix1:ix2,ii],
linestyle='-', alpha=0.1, marker=None, color='#555555', zorder=-1)
axesr[k].plot(ts, r_tide*2., marker=None)
axesr[k].set_xlim(ts.min(), ts.max())
axesv[k].set_xlim(ts.min(), ts.max())
axesr[k].set_ylim(0,max(r_tide)*7)
axesv[k].set_ylim(0,max(v_disp)*7)
# axes[1,k].set_xlabel(r"$x_1$")
# if k == 0:
# axes[0,k].set_ylabel(r"$x_2$")
# axes[1,k].set_ylabel(r"$x_3$")
axesr[k].text(3000, max(r_tide)*5, r"$2.5\times10^{}M_\odot$".format(_m))
axesv[k].text(3000, max(v_disp)*5, r"$2.5\times10^{}M_\odot$".format(_m))
axesr[-1].set_xlabel("time [Myr]")
axesv[-1].set_xlabel("time [Myr]")
figr.suptitle("Relative distance", fontsize=26)
figr.tight_layout()
figr.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
figr.savefig(filenamer)
figv.suptitle("Relative velocity", fontsize=26)
figv.tight_layout()
figv.subplots_adjust(top=0.92, hspace=0.025, wspace=0.1)
figv.savefig(filenamev)
def trace_plots():
cfg_filename = os.path.join(streamspath, "config", "exp1_8.yml")
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot", "exper1_8", "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
flatchain = f["flatchain"].value
acceptance_fraction = f["acceptance_fraction"].value
p0 = f["p0"].value
acor = f["acor"].value
labels = ["$q_1$", "$q_z$", r"$\phi$", "$v_h$", r"$\alpha$"]
bounds = [(1.3,1.5),(1.25,1.45),(85,115),(118,132),(0.5,2.5)]
ticks = [(1.35,1.4,1.45),(1.3,1.35,1.4),(90,100,110),(120,125,130),(1.,1.5,2.)]
# plot individual walkers
fig,axes = plt.subplots(5,1,figsize=(8.5,11),sharex=True)
k = 0
for gname,group in model.parameters.items():
for pname,p in group.items():
thischain = _unit_transform[pname](chain[...,k])
for ii in range(config['walkers']):
axes.flat[k].plot(thischain[ii,5000:],
alpha=0.1, marker=None,
drawstyle='steps', color='k')
#axes.flat[k].set_ylabel(labels[k], rotation='horizontal')
axes[k].text(-0.02, 0.5, labels[k],
horizontalalignment='right',
fontsize=22,
transform=axes[k].transAxes)
if pname == "phi":
axes[k].text(1.07, 0.475, "deg",
horizontalalignment='left',
fontsize=18,
transform=axes[k].transAxes)
elif pname == "v_halo":
axes[k].text(1.07, 0.475, "km/s",
horizontalalignment='left',
fontsize=18,
transform=axes[k].transAxes)
axes[k].text(0.25, 0.1, r"$t_{\rm acor}$=" + "{}".format(int(acor[k])),
horizontalalignment='right',
fontsize=18,
transform=axes[k].transAxes)
axes.flat[k].set_yticks(ticks[k])
axes.flat[k].set_ylim(bounds[k])
axes.flat[k].yaxis.tick_right()
#axes.flat[k].yaxis.set_label_position("right")
k += 1
axes.flat[-1].set_xlabel("Step number")
fig.tight_layout()
fig.subplots_adjust(hspace=0.04, left=0.14, right=0.86)
fig.savefig(os.path.join(plot_path, "mcmc_trace.{}".format(ext)))
def exp1_posterior():
cfg_filename = os.path.join(streamspath, "config", "exp1_8.yml")
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot", "exper1_8", "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
#flatchain = f["flatchain"].value
acceptance_fraction = f["acceptance_fraction"].value
p0 = f["p0"].value
acor = f["acor"].value
_flatchain = np.vstack(chain[:,2500::int(np.median(acor))])
flatchain = np.zeros_like(_flatchain)
params = OrderedDict(model.parameters['potential'].items() + \
model.parameters['satellite'].items())
labels = ["$q_1$", "$q_z$", r"$\phi$ [deg]", "$v_h$ [km/s]", r"$\alpha$"]
truths = []
bounds = []
for ii,p in enumerate(params.values()):
if p.name == 'alpha':
truths.append(np.nan)
bounds.append((1., 2.0))
flatchain[:,ii] = _unit_transform[p.name](_flatchain[:,ii])
continue
truth = _unit_transform[p.name](p.truth)
truths.append(truth)
bounds.append((0.95*truth, 1.05*truth))
flatchain[:,ii] = _unit_transform[p.name](_flatchain[:,ii])
fig = triangle.corner(flatchain, plot_datapoints=False,
truths=truths, extents=bounds, labels=labels)
fig.savefig(os.path.join(plot_path, "exp1_posterior.png"))
def exp_posteriors(exp_num, slicey=-5000):
cfg_filename = os.path.join(streamspath, "config", "exp{}.yml".format(exp_num))
config = read_config(cfg_filename)
model = StreamModel.from_config(config)
hdf5_filename = os.path.join(streamspath, "plots", "hotfoot",
"exper{}".format(exp_num), "inference.hdf5")
# hdf5_filename = os.path.join(streamspath, "plots", "infer_potential",
# "exper{}".format(exp_num), "inference.hdf5")
with h5py.File(hdf5_filename, "r") as f:
chain = f["chain"].value
acceptance_fraction = f["acceptance_fraction"].value
_p0 = f["p0"].value
acor = f["acor"].value
print("median acor: {}".format(int(np.median(acor))))
if slicey is None:
slicey = slice(-5000,None,int(np.median(acor)))
else:
slicey = slice(slicey,None,int(np.median(acor)))
_flatchain = np.vstack(chain[acceptance_fraction>0.03,slicey])
d = model.label_flatchain(_flatchain)
p0 = model.label_flatchain(_p0)
# Potential
this_flatchain = np.zeros((_flatchain.shape[0], len(d["potential"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
for ii,pname in enumerate(d["potential"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](np.squeeze(d["potential"][pname]))
this_p0[:,ii] = _unit_transform[pname](np.squeeze(p0["potential"][pname]))
p = model.parameters["potential"][pname]
truth = _unit_transform[pname](p.truth)
truths.append(truth)
bounds.append((0.75*truth, 1.25*truth))
labels.append(_label_map[pname])
q16,q50,q84 = np.array(np.percentile(this_flatchain, [16, 50, 84], axis=0))
q_m, q_p = q50-q16, q84-q50
for ii,pname in enumerate(d["potential"].keys()):
print("{} \n\t truth={:.2f}\n\t measured={:.2f}+{:.2f}-{:.2f}"\
.format(pname,truths[ii],q50[ii],q_p[ii],q_m[ii]))
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, extents=bounds, labels=labels)
fig.savefig(os.path.join(plot_path, "exp{}_potential.{}".format(exp_num, ext)))
# Particle
p_idx = 5
this_flatchain = np.zeros((_flatchain.shape[0], len(d["particles"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
for ii,pname in enumerate(d["particles"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](d["particles"][pname][:,p_idx])
this_p0[:,ii] = _unit_transform[pname](p0["particles"][pname][:,p_idx])
p = model.parameters["particles"][pname]
truth = _unit_transform[pname](p.truth[p_idx])
truths.append(truth)
if pname == "tub":
bounds.append((model.lnpargs[1], model.lnpargs[0]))
else:
sig = model.particles.errors[pname].value[p_idx]
mu = model.particles[pname].value[p_idx]
bounds.append((mu-3*sig, mu+3*sig))
labels.append(_label_map[pname])
# HACK
bounds = [(32.5,41), (1.75,2.4), (-1.45,-1.0), (-85,-40), bounds[-1]]
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, labels=labels, extents=bounds)
fig.savefig(os.path.join(plot_path, "exp{}_particle.{}".format(exp_num, ext)))
# Satellite
this_flatchain = np.zeros((_flatchain.shape[0], len(d["satellite"])))
this_p0 = np.zeros((_p0.shape[0], this_flatchain.shape[1]))
truths = []
bounds = []
labels = []
#for ii,pname in enumerate(keys):
for ii,pname in enumerate(d["satellite"].keys()):
this_flatchain[:,ii] = _unit_transform[pname](d["satellite"][pname][:,0])
this_p0[:,ii] = _unit_transform[pname](p0["satellite"][pname][:,0])
p = model.parameters["satellite"][pname]
truth = _unit_transform[pname](p.truth)
if pname == "alpha":
bounds.append((1., 2.5))
truths.append(np.nan)
else:
truths.append(truth)
sig = model.satellite.errors[pname].value[0]
mu = model.satellite[pname].value[0]
bounds.append((mu-3*sig, mu+3*sig))
labels.append(_label_map[pname])
# HACK
bounds = [(28,33), (-2.1,-1.6), (1.3,1.8), (125,185), bounds[-1]]
if len(d["satellite"]) > len(bounds):
bounds = [(0,10), (-20,5)] + bounds
#bounds = None
fig = triangle.corner(this_flatchain, plot_datapoints=False,
truths=truths, labels=labels, extents=bounds)
fig.savefig(os.path.join(plot_path, "exp{}_satellite.{}".format(exp_num, ext)))
def exp2_posteriors():
exp_posteriors(2, slicey=-10000)
def exp3_posteriors():
exp_posteriors(3, slicey=-15000)
def exp4_posteriors():
exp_posteriors(4, slicey=-15000)
if __name__ == '__main__':
from argparse import ArgumentParser
import logging
# Create logger
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(name)s / %(levelname)s / %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-l", "--list", action="store_true", dest="list",
default=False, help="List all functions")
parser.add_argument("-o", "--overwrite", action="store_true",
dest="overwrite", default=False,
help="Overwrite existing files.")
parser.add_argument("-f", "--function", dest="function", type=str,
help="The name of the function to execute.")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
def _print_funcs():
fs = inspect.getmembers(sys.modules[__name__],
lambda member: inspect.isfunction(member) and member.__module__ == __name__ and not member.__name__.startswith("_"))
print("\n".join([f[0] for f in fs]))
if args.list:
print("="*79)
_print_funcs()
print("="*79)
sys.exit(0)
if args.function is None:
print ("You must specify a function name! Use -l to get the list "
"of functions.")
sys.exit(1)
func = getattr(sys.modules[__name__], args.__dict__.get("function"))
func()
|
#!/usr/bin/env python
"""
@package ion.services.sa.observatory.management.instrument_site_impl
@author Ian Katz
"""
from pyon.core.exception import BadRequest, Inconsistent
from pyon.public import PRED, RT
from ion.services.sa.observatory.site_impl import SiteImpl
class InstrumentSiteImpl(SiteImpl):
"""
@brief resource management for InstrumentSite resources
"""
def _primary_object_name(self):
return RT.InstrumentSite
def _primary_object_label(self):
return "instrument_site"
def link_deployment(self, instrument_site_id='', deployment_id=''):
return self._link_resources_single_object(instrument_site_id, PRED.hasDeployment, deployment_id)
def unlink_deployment(self, instrument_site_id='', deployment_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasDeployment, deployment_id)
def link_device(self, instrument_site_id='', instrument_device_id=''):
# a device may not be linked to any other site
if 0 < len(self._find_having(PRED.hasDevice, instrument_device_id)):
raise BadRequest("Instrument device is already associated with a site")
# make sure that only 1 site-device-deployment triangle exists at one time
deployments_site = self.find_stemming_deployment(instrument_site_id)
if 1 < len(deployments_site):
raise Inconsistent("The site is associated to multiple deployments!")
deployments_inst = self._find_stemming(instrument_device_id, PRED.hasDeployment, RT.Deployment)
if 1 < len(deployments_inst):
raise Inconsistent("The instrument device is associated to multiple deployments!")
if 1 == len(deployments_inst):
if 1 == len(deployments_site):
if deployments_site[0]._id != deployments_inst[0]._id:
raise BadRequest("The deployments of the device and site do not agree")
for dev in self.find_stemming_device(instrument_site_id):
if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
raise BadRequest("Device has deployment, and site already has a device with deployment")
return self._link_resources(instrument_site_id, PRED.hasDevice, instrument_device_id)
def unlink_device(self, instrument_site_id='', instrument_device_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasDevice, instrument_device_id)
def link_model(self, instrument_site_id='', instrument_model_id=''):
return self._link_resources_single_object(instrument_site_id, PRED.hasModel, instrument_model_id)
def unlink_model(self, instrument_site_id='', instrument_model_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasModel, instrument_model_id)
def link_output_product(self, site_id, data_product_id):
# output product can't be linked to any other site, this site can't be linked to any other output product
if 0 < len(self.find_stemming_output_product(site_id)):
raise BadRequest("Site already has an output data product assigned")
return self._link_resources_single_subject(site_id, PRED.hasOutputProduct, data_product_id)
def unlink_output_product(self, site_id, data_product_id):
return self._unlink_resources(site_id, PRED.hasOutputProduct, data_product_id)
def find_having_deployment(self, deployment_id):
return self._find_having(PRED.hasDeployment, deployment_id)
def find_stemming_deployment(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasDeployment, RT.Deployment)
def find_having_device(self, instrument_device_id):
return self._find_having(PRED.hasDevice, instrument_device_id)
def find_stemming_device(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasDevice, RT.InstrumentDevice)
def find_having_model(self, instrument_model_id):
return self._find_having(PRED.hasModel, instrument_model_id)
def find_stemming_model(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasModel, RT.InstrumentModel)
def find_having_output_product(self, data_product_id):
return self._find_having(PRED.hasOutputProduct, data_product_id)
def find_stemming_output_product(self, site_id):
return self._find_stemming(site_id, PRED.hasMode, RT.DataProduct)
def on_pre_delete(self, obj_id, obj):
#todo: unlink parent/children sites, agents, models, devices?
return
fixed typo
#!/usr/bin/env python
"""
@package ion.services.sa.observatory.management.instrument_site_impl
@author Ian Katz
"""
from pyon.core.exception import BadRequest, Inconsistent
from pyon.public import PRED, RT
from ion.services.sa.observatory.site_impl import SiteImpl
class InstrumentSiteImpl(SiteImpl):
"""
@brief resource management for InstrumentSite resources
"""
def _primary_object_name(self):
return RT.InstrumentSite
def _primary_object_label(self):
return "instrument_site"
def link_deployment(self, instrument_site_id='', deployment_id=''):
return self._link_resources_single_object(instrument_site_id, PRED.hasDeployment, deployment_id)
def unlink_deployment(self, instrument_site_id='', deployment_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasDeployment, deployment_id)
def link_device(self, instrument_site_id='', instrument_device_id=''):
# a device may not be linked to any other site
if 0 < len(self._find_having(PRED.hasDevice, instrument_device_id)):
raise BadRequest("Instrument device is already associated with a site")
# make sure that only 1 site-device-deployment triangle exists at one time
deployments_site = self.find_stemming_deployment(instrument_site_id)
if 1 < len(deployments_site):
raise Inconsistent("The site is associated to multiple deployments!")
deployments_inst = self._find_stemming(instrument_device_id, PRED.hasDeployment, RT.Deployment)
if 1 < len(deployments_inst):
raise Inconsistent("The instrument device is associated to multiple deployments!")
if 1 == len(deployments_inst):
if 1 == len(deployments_site):
if deployments_site[0]._id != deployments_inst[0]._id:
raise BadRequest("The deployments of the device and site do not agree")
for dev in self.find_stemming_device(instrument_site_id):
if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
raise BadRequest("Device has deployment, and site already has a device with deployment")
return self._link_resources(instrument_site_id, PRED.hasDevice, instrument_device_id)
def unlink_device(self, instrument_site_id='', instrument_device_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasDevice, instrument_device_id)
def link_model(self, instrument_site_id='', instrument_model_id=''):
return self._link_resources_single_object(instrument_site_id, PRED.hasModel, instrument_model_id)
def unlink_model(self, instrument_site_id='', instrument_model_id=''):
return self._unlink_resources(instrument_site_id, PRED.hasModel, instrument_model_id)
def link_output_product(self, site_id, data_product_id):
# output product can't be linked to any other site, this site can't be linked to any other output product
if 0 < len(self.find_stemming_output_product(site_id)):
raise BadRequest("Site already has an output data product assigned")
return self._link_resources_single_subject(site_id, PRED.hasOutputProduct, data_product_id)
def unlink_output_product(self, site_id, data_product_id):
return self._unlink_resources(site_id, PRED.hasOutputProduct, data_product_id)
def find_having_deployment(self, deployment_id):
return self._find_having(PRED.hasDeployment, deployment_id)
def find_stemming_deployment(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasDeployment, RT.Deployment)
def find_having_device(self, instrument_device_id):
return self._find_having(PRED.hasDevice, instrument_device_id)
def find_stemming_device(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasDevice, RT.InstrumentDevice)
def find_having_model(self, instrument_model_id):
return self._find_having(PRED.hasModel, instrument_model_id)
def find_stemming_model(self, instrument_site_id):
return self._find_stemming(instrument_site_id, PRED.hasModel, RT.InstrumentModel)
def find_having_output_product(self, data_product_id):
return self._find_having(PRED.hasOutputProduct, data_product_id)
def find_stemming_output_product(self, site_id):
return self._find_stemming(site_id, PRED.hasModel, RT.DataProduct)
def on_pre_delete(self, obj_id, obj):
#todo: unlink parent/children sites, agents, models, devices?
return
|
from __future__ import unicode_literals
from django import forms
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _, string_concat
from django.contrib.auth import authenticate
from django.contrib.auth.password_validation import (
validate_password, password_validators_help_text_html
)
from django.contrib.auth.models import User
from decisions.subscriptions.models import Subscription
class RegisterForm(forms.Form):
email = forms.EmailField(
label=_("Email address"),
widget=forms.TextInput(
attrs={
"placeholder": _("user@example.com"),
}
),
)
username = forms.CharField(
label=_("Display name"),
required=False,
widget=forms.TextInput(),
)
password = forms.CharField(
widget=forms.PasswordInput(),
label=_("Password")
)
password_again = forms.CharField(
widget=forms.PasswordInput(),
label=_("Password again"),
help_text=string_concat(
_("Your password must meet these requirements:"),
password_validators_help_text_html())
)
def clean(self):
if ("password" in self.cleaned_data
and "password_again" in self.cleaned_data):
if self.cleaned_data["password"] != self.cleaned_data["password_again"]:
self.add_error("password",
forms.ValidationError(_("Passwords don't match")))
self.add_error("password_again",
forms.ValidationError(_("Passwords don't match")))
if "email" in self.cleaned_data:
self.cleaned_data["username"] = self.get_username()
if "username" in self.cleaned_data:
username_exists = User.objects.filter(
username=self.cleaned_data["username"]).count()
if username_exists:
self.add_error("username", forms.ValidationError(
_("Please choose another display name")))
if "email" in self.cleaned_data:
email_exists = User.objects.filter(
email=self.cleaned_data["email"]).count()
if email_exists:
self.add_error("email", forms.ValidationError(
_("Please choose another email address")))
if "password" in self.cleaned_data:
validate_password(self.cleaned_data["password"])
return self.cleaned_data
def get_username(self):
if self.cleaned_data.get("username"):
return self.cleaned_data["username"]
return self.cleaned_data["email"].split("@", 1)[0]
class LoginForm(forms.Form):
user = None
email = forms.EmailField(
label=_("Email address"),
widget=forms.TextInput(
attrs={
"placeholder": _("user@example.com"),
}
),
)
password = forms.CharField(
widget=forms.PasswordInput(),
label=_("Password")
)
next = forms.CharField(widget=forms.HiddenInput, required=False)
def clean(self):
self.user = authenticate(**self.cleaned_data)
if not self.user:
raise forms.ValidationError(_("Email or password did not match. Please try again."))
return self.cleaned_data
class BSRadioChoiceInput(widgets.RadioChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
from django.utils.html import format_html
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
print self.attrs
active = "active" if self.is_checked() else ""
return format_html(
'<label{} class="btn {}">{} {}</label>', label_for, active, self.tag(attrs), self.choice_label
)
class BSRadioFieldRenderer(widgets.ChoiceFieldRenderer):
choice_input_class = BSRadioChoiceInput
outer_html = '<div{id_attr} class="btn-group" data-toggle="buttons">{content}</div>'
inner_html = '{choice_value}{sub_widgets}'
class BSRadioSelect(forms.RadioSelect):
renderer = BSRadioFieldRenderer
class SubscriptionForm(forms.Form):
search_term = forms.CharField(
label=_('Search term'),
widget=forms.TextInput(
attrs={
})
)
send_mail = forms.BooleanField(
label=_('Sends email'),
help_text=_('If checked, notifications about new search results are also sent by email. Otherwise they will just show up in your feed.'),
required=False,
widget=BSRadioSelect(choices=[
(True, _("Sends email")),
(False, _("No"))
])
)
class SubscriptionEditForm(SubscriptionForm):
active = forms.BooleanField(
label=_('Active'),
help_text=_('If you do not wish to receive any more notifications from this subscriptions, you can disable it. Old notifications will not disappear from your feed.'),
required=False,
widget=BSRadioSelect(choices=[
(True, _("Active")),
(False, _("No"))
])
)
closes #45. adds autocomplete annotations to register/login forms
from __future__ import unicode_literals
from django import forms
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _, string_concat
from django.contrib.auth import authenticate
from django.contrib.auth.password_validation import (
validate_password, password_validators_help_text_html
)
from django.contrib.auth.models import User
from decisions.subscriptions.models import Subscription
class RegisterForm(forms.Form):
email = forms.EmailField(
label=_("Email address"),
widget=forms.TextInput(
attrs={
"placeholder": _("user@example.com"),
"autocomplete": "username",
}
),
)
username = forms.CharField(
label=_("Display name"),
required=False,
widget=forms.TextInput(attrs={
"autocomplete": "nickname"
}),
)
password = forms.CharField(
widget=forms.PasswordInput(attrs={
"autocomplete": "new-password"
}),
label=_("Password")
)
password_again = forms.CharField(
widget=forms.PasswordInput(),
label=_("Password again"),
help_text=string_concat(
_("Your password must meet these requirements:"),
password_validators_help_text_html())
)
def clean(self):
if ("password" in self.cleaned_data
and "password_again" in self.cleaned_data):
if self.cleaned_data["password"] != self.cleaned_data["password_again"]:
self.add_error("password",
forms.ValidationError(_("Passwords don't match")))
self.add_error("password_again",
forms.ValidationError(_("Passwords don't match")))
if "email" in self.cleaned_data:
self.cleaned_data["username"] = self.get_username()
if "username" in self.cleaned_data:
username_exists = User.objects.filter(
username=self.cleaned_data["username"]).count()
if username_exists:
self.add_error("username", forms.ValidationError(
_("Please choose another display name")))
if "email" in self.cleaned_data:
email_exists = User.objects.filter(
email=self.cleaned_data["email"]).count()
if email_exists:
self.add_error("email", forms.ValidationError(
_("Please choose another email address")))
if "password" in self.cleaned_data:
validate_password(self.cleaned_data["password"])
return self.cleaned_data
def get_username(self):
if self.cleaned_data.get("username"):
return self.cleaned_data["username"]
return self.cleaned_data["email"].split("@", 1)[0]
class LoginForm(forms.Form):
user = None
email = forms.EmailField(
label=_("Email address"),
widget=forms.TextInput(
attrs={
"placeholder": _("user@example.com"),
"autocomplete": "username",
}
),
)
password = forms.CharField(
widget=forms.PasswordInput(attrs={
"autocomplete": "current-password"
}),
label=_("Password")
)
next = forms.CharField(widget=forms.HiddenInput, required=False)
def clean(self):
self.user = authenticate(**self.cleaned_data)
if not self.user:
raise forms.ValidationError(_("Email or password did not match. Please try again."))
return self.cleaned_data
class BSRadioChoiceInput(widgets.RadioChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
from django.utils.html import format_html
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
print self.attrs
active = "active" if self.is_checked() else ""
return format_html(
'<label{} class="btn {}">{} {}</label>', label_for, active, self.tag(attrs), self.choice_label
)
class BSRadioFieldRenderer(widgets.ChoiceFieldRenderer):
choice_input_class = BSRadioChoiceInput
outer_html = '<div{id_attr} class="btn-group" data-toggle="buttons">{content}</div>'
inner_html = '{choice_value}{sub_widgets}'
class BSRadioSelect(forms.RadioSelect):
renderer = BSRadioFieldRenderer
class SubscriptionForm(forms.Form):
search_term = forms.CharField(
label=_('Search term'),
widget=forms.TextInput(
attrs={
})
)
send_mail = forms.BooleanField(
label=_('Sends email'),
help_text=_('If checked, notifications about new search results are also sent by email. Otherwise they will just show up in your feed.'),
required=False,
widget=BSRadioSelect(choices=[
(True, _("Sends email")),
(False, _("No"))
])
)
class SubscriptionEditForm(SubscriptionForm):
active = forms.BooleanField(
label=_('Active'),
help_text=_('If you do not wish to receive any more notifications from this subscriptions, you can disable it. Old notifications will not disappear from your feed.'),
required=False,
widget=BSRadioSelect(choices=[
(True, _("Active")),
(False, _("No"))
])
)
|
# Copyright 2017 John Reese
# Licensed under the MIT license
"""
Core implementation of aiosqlite proxies
"""
import asyncio
import logging
import sqlite3
from functools import partial
from queue import Queue, Empty
from threading import Thread
from typing import Any, Callable, Iterable, Optional, Tuple
from .context import contextmanager
__all__ = ["connect", "Connection", "Cursor"]
LOG = logging.getLogger("aiosqlite")
class Cursor:
def __init__(self, conn: "Connection", cursor: sqlite3.Cursor) -> None:
self._conn = conn
self._cursor = cursor
def __aiter__(self) -> "Cursor":
"""The cursor proxy is also an async iterator."""
return self
async def __anext__(self) -> sqlite3.Row:
"""Use `cursor.fetchone()` to provide an async iterable."""
row = await self.fetchone()
if row is None:
raise StopAsyncIteration
return row
async def _execute(self, fn, *args, **kwargs):
"""Execute the given function on the shared connection's thread."""
return await self._conn._execute(fn, *args, **kwargs)
async def execute(self, sql: str, parameters: Iterable[Any] = None) -> None:
"""Execute the given query."""
if parameters is None:
parameters = []
await self._execute(self._cursor.execute, sql, parameters)
async def executemany(self, sql: str, parameters: Iterable[Iterable[Any]]) -> None:
"""Execute the given multiquery."""
await self._execute(self._cursor.executemany, sql, parameters)
async def executescript(self, sql_script: str) -> None:
"""Execute a user script."""
await self._execute(self._cursor.executescript, sql_script)
async def fetchone(self) -> Optional[sqlite3.Row]:
"""Fetch a single row."""
return await self._execute(self._cursor.fetchone)
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]:
"""Fetch up to `cursor.arraysize` number of rows."""
args = () # type: Tuple[int, ...]
if size is not None:
args = (size,)
return await self._execute(self._cursor.fetchmany, *args)
async def fetchall(self) -> Iterable[sqlite3.Row]:
"""Fetch all remaining rows."""
return await self._execute(self._cursor.fetchall)
async def close(self) -> None:
"""Close the cursor."""
await self._execute(self._cursor.close)
@property
def rowcount(self) -> int:
return self._cursor.rowcount
@property
def lastrowid(self) -> int:
return self._cursor.lastrowid
@property
def arraysize(self) -> int:
return self._cursor.arraysize
@arraysize.setter
def arraysize(self, value: int) -> None:
self._cursor.arraysize = value
@property
def description(self) -> Tuple[Tuple]:
return self._cursor.description
@property
def connection(self) -> sqlite3.Connection:
return self._cursor.connection
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class Connection(Thread):
def __init__(
self,
connector: Callable[[], sqlite3.Connection],
loop: asyncio.AbstractEventLoop,
) -> None:
super().__init__()
self._running = True
self._connection = None # type: Optional[sqlite3.Connection]
self._connector = connector
self._loop = loop
self._lock = asyncio.Lock(loop=loop)
self._tx = Queue() # type: Queue
self._rx = Queue() # type: Queue
@property
def _conn(self) -> sqlite3.Connection:
if self._connection is None:
raise ValueError("no active connection")
return self._connection
def run(self) -> None:
"""Execute function calls on a separate thread."""
while self._running:
try:
fn = self._tx.get(timeout=0.1)
except Empty:
continue
try:
LOG.debug("executing %s", fn)
result = fn()
LOG.debug("returning %s", result)
self._rx.put(result)
except BaseException as e:
LOG.exception("returning exception %s", e)
self._rx.put(e)
async def _execute(self, fn, *args, **kwargs):
"""Queue a function with the given arguments for execution."""
await self._lock.acquire()
pt = partial(fn, *args, **kwargs)
self._tx.put_nowait(pt)
while True:
try:
result = self._rx.get_nowait()
break
except Empty:
await asyncio.sleep(0.005)
continue
self._lock.release()
if isinstance(result, Exception):
raise result
return result
async def _connect(self):
"""Connect to the actual sqlite database."""
if self._connection is None:
self._connection = await self._execute(self._connector)
async def __aenter__(self) -> "Connection":
self.start()
await self._connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self.close()
self._connection = None
@contextmanager
async def cursor(self) -> Cursor:
"""Create an aiosqlite cursor wrapping a sqlite3 cursor object."""
return Cursor(self, await self._execute(self._conn.cursor))
async def commit(self) -> None:
"""Commit the current transaction."""
await self._execute(self._conn.commit)
async def rollback(self) -> None:
"""Roll back the current transaction."""
await self._execute(self._conn.rollback)
async def close(self) -> None:
"""Complete queued queries/cursors and close the connection."""
await self._execute(self._conn.close)
self._running = False
@contextmanager
async def execute(self, sql: str, parameters: Iterable[Any] = None) -> Cursor:
"""Helper to create a cursor and execute the given query."""
if parameters is None:
parameters = []
cursor = await self._execute(self._conn.execute, sql, parameters)
return Cursor(self, cursor)
@contextmanager
async def executemany(
self, sql: str, parameters: Iterable[Iterable[Any]]
) -> Cursor:
"""Helper to create a cursor and execute the given multiquery."""
cursor = await self._execute(self._conn.executemany, sql, parameters)
return Cursor(self, cursor)
@contextmanager
async def executescript(self, sql_script: str) -> Cursor:
"""Helper to create a cursor and execute a user script."""
cursor = await self._execute(self._conn.executescript, sql_script)
return Cursor(self, cursor)
async def interrupt(self) -> None:
"""Interrupt pending queries."""
return self._conn.interrupt()
@property
def isolation_level(self) -> str:
return self._conn.isolation_level
@isolation_level.setter
def isolation_level(self, value: str) -> None:
self._conn.isolation_level = value
@property
def in_transaction(self) -> bool:
return self._conn.in_transaction
def connect(
database: str, *, loop: asyncio.AbstractEventLoop = None, **kwargs: Any
) -> Connection:
"""Create and return a connection proxy to the sqlite database."""
if loop is None:
loop = asyncio.get_event_loop()
def connector() -> sqlite3.Connection:
return sqlite3.connect(database, **kwargs)
return Connection(connector, loop)
Decreased timeout in _execute
Per https://github.com/jreese/aiosqlite/issues/8
# Copyright 2017 John Reese
# Licensed under the MIT license
"""
Core implementation of aiosqlite proxies
"""
import asyncio
import logging
import sqlite3
from functools import partial
from queue import Queue, Empty
from threading import Thread
from typing import Any, Callable, Iterable, Optional, Tuple
from .context import contextmanager
__all__ = ["connect", "Connection", "Cursor"]
LOG = logging.getLogger("aiosqlite")
class Cursor:
def __init__(self, conn: "Connection", cursor: sqlite3.Cursor) -> None:
self._conn = conn
self._cursor = cursor
def __aiter__(self) -> "Cursor":
"""The cursor proxy is also an async iterator."""
return self
async def __anext__(self) -> sqlite3.Row:
"""Use `cursor.fetchone()` to provide an async iterable."""
row = await self.fetchone()
if row is None:
raise StopAsyncIteration
return row
async def _execute(self, fn, *args, **kwargs):
"""Execute the given function on the shared connection's thread."""
return await self._conn._execute(fn, *args, **kwargs)
async def execute(self, sql: str, parameters: Iterable[Any] = None) -> None:
"""Execute the given query."""
if parameters is None:
parameters = []
await self._execute(self._cursor.execute, sql, parameters)
async def executemany(self, sql: str, parameters: Iterable[Iterable[Any]]) -> None:
"""Execute the given multiquery."""
await self._execute(self._cursor.executemany, sql, parameters)
async def executescript(self, sql_script: str) -> None:
"""Execute a user script."""
await self._execute(self._cursor.executescript, sql_script)
async def fetchone(self) -> Optional[sqlite3.Row]:
"""Fetch a single row."""
return await self._execute(self._cursor.fetchone)
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]:
"""Fetch up to `cursor.arraysize` number of rows."""
args = () # type: Tuple[int, ...]
if size is not None:
args = (size,)
return await self._execute(self._cursor.fetchmany, *args)
async def fetchall(self) -> Iterable[sqlite3.Row]:
"""Fetch all remaining rows."""
return await self._execute(self._cursor.fetchall)
async def close(self) -> None:
"""Close the cursor."""
await self._execute(self._cursor.close)
@property
def rowcount(self) -> int:
return self._cursor.rowcount
@property
def lastrowid(self) -> int:
return self._cursor.lastrowid
@property
def arraysize(self) -> int:
return self._cursor.arraysize
@arraysize.setter
def arraysize(self, value: int) -> None:
self._cursor.arraysize = value
@property
def description(self) -> Tuple[Tuple]:
return self._cursor.description
@property
def connection(self) -> sqlite3.Connection:
return self._cursor.connection
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class Connection(Thread):
def __init__(
self,
connector: Callable[[], sqlite3.Connection],
loop: asyncio.AbstractEventLoop,
) -> None:
super().__init__()
self._running = True
self._connection = None # type: Optional[sqlite3.Connection]
self._connector = connector
self._loop = loop
self._lock = asyncio.Lock(loop=loop)
self._tx = Queue() # type: Queue
self._rx = Queue() # type: Queue
@property
def _conn(self) -> sqlite3.Connection:
if self._connection is None:
raise ValueError("no active connection")
return self._connection
def run(self) -> None:
"""Execute function calls on a separate thread."""
while self._running:
try:
fn = self._tx.get(timeout=0.1)
except Empty:
continue
try:
LOG.debug("executing %s", fn)
result = fn()
LOG.debug("returning %s", result)
self._rx.put(result)
except BaseException as e:
LOG.exception("returning exception %s", e)
self._rx.put(e)
async def _execute(self, fn, *args, **kwargs):
"""Queue a function with the given arguments for execution."""
await self._lock.acquire()
pt = partial(fn, *args, **kwargs)
self._tx.put_nowait(pt)
while True:
try:
result = self._rx.get_nowait()
break
except Empty:
await asyncio.sleep(0.001)
continue
self._lock.release()
if isinstance(result, Exception):
raise result
return result
async def _connect(self):
"""Connect to the actual sqlite database."""
if self._connection is None:
self._connection = await self._execute(self._connector)
async def __aenter__(self) -> "Connection":
self.start()
await self._connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self.close()
self._connection = None
@contextmanager
async def cursor(self) -> Cursor:
"""Create an aiosqlite cursor wrapping a sqlite3 cursor object."""
return Cursor(self, await self._execute(self._conn.cursor))
async def commit(self) -> None:
"""Commit the current transaction."""
await self._execute(self._conn.commit)
async def rollback(self) -> None:
"""Roll back the current transaction."""
await self._execute(self._conn.rollback)
async def close(self) -> None:
"""Complete queued queries/cursors and close the connection."""
await self._execute(self._conn.close)
self._running = False
@contextmanager
async def execute(self, sql: str, parameters: Iterable[Any] = None) -> Cursor:
"""Helper to create a cursor and execute the given query."""
if parameters is None:
parameters = []
cursor = await self._execute(self._conn.execute, sql, parameters)
return Cursor(self, cursor)
@contextmanager
async def executemany(
self, sql: str, parameters: Iterable[Iterable[Any]]
) -> Cursor:
"""Helper to create a cursor and execute the given multiquery."""
cursor = await self._execute(self._conn.executemany, sql, parameters)
return Cursor(self, cursor)
@contextmanager
async def executescript(self, sql_script: str) -> Cursor:
"""Helper to create a cursor and execute a user script."""
cursor = await self._execute(self._conn.executescript, sql_script)
return Cursor(self, cursor)
async def interrupt(self) -> None:
"""Interrupt pending queries."""
return self._conn.interrupt()
@property
def isolation_level(self) -> str:
return self._conn.isolation_level
@isolation_level.setter
def isolation_level(self, value: str) -> None:
self._conn.isolation_level = value
@property
def in_transaction(self) -> bool:
return self._conn.in_transaction
def connect(
database: str, *, loop: asyncio.AbstractEventLoop = None, **kwargs: Any
) -> Connection:
"""Create and return a connection proxy to the sqlite database."""
if loop is None:
loop = asyncio.get_event_loop()
def connector() -> sqlite3.Connection:
return sqlite3.connect(database, **kwargs)
return Connection(connector, loop)
|
# update h5 files created by old versions of pyannote-speaker-embedding
# estimate mu/sigma and save it back to the file
# usage: update_data_h5.py /path/to/file.h5
import sys
import h5py
import numpy as np
from tqdm import tqdm
data_h5 = sys.argv[1]
with h5py.File(data_h5, mode='r') as fp:
X = fp['X']
weights, means, squared_means = zip(*(
(len(x), np.mean(x, axis=0), np.mean(x**2, axis=0))
for x in tqdm(X)))
mu = np.average(means, weights=weights, axis=0)
squared_mean = np.average(squared_means, weights=weights, axis=0)
sigma = np.sqrt(squared_mean - mu ** 2)
with h5py.File(data_h5, mode='r+') as fp:
X = fp['X']
X.attrs['mu'] = mu
X.attrs['sigma'] = sigma
chore: remove now useless conversion script
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
from builtins import str
from builtins import object, bytes
import copy
from collections import namedtuple
from datetime import datetime, timedelta
import dill
import functools
import getpass
import imp
import importlib
import itertools
import inspect
import zipfile
import jinja2
import json
import logging
import os
import pickle
import re
import signal
import socket
import sys
import textwrap
import traceback
import warnings
import hashlib
from urllib.parse import urlparse
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float)
from sqlalchemy import func, or_, and_
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.orm import reconstructor, relationship, synonym
from croniter import croniter
import six
from airflow import settings, utils
from airflow.executors import GetDefaultExecutor, LocalExecutor
from airflow import configuration
from airflow.exceptions import AirflowException, AirflowSkipException, AirflowTaskTimeout
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
from airflow.utils.helpers import (
as_tuple, is_container, is_in, validate_key, pprinttable)
from airflow.utils.logging import LoggingMixin
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.trigger_rule import TriggerRule
Base = declarative_base()
ID_LEN = 250
XCOM_RETURN_KEY = 'return_value'
Stats = settings.Stats
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
"""
from cryptography.fernet import Fernet
return Fernet(configuration.get('core', 'FERNET_KEY').encode('utf-8'))
if 'mysql' in settings.SQL_ALCHEMY_CONN:
LongText = LONGTEXT
else:
LongText = Text
# used by DAG context_managers
_CONTEXT_MANAGER_DAG = None
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = datetime.now()
class DagBag(BaseDagBag, LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: unicode
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
:param sync_to_db: whether to sync the properties of the DAGs to
the metadata DB while finding them, typically should be done
by the scheduler job only
:type sync_to_db: bool
"""
def __init__(
self,
dag_folder=None,
executor=None,
include_examples=configuration.getboolean('core', 'LOAD_EXAMPLES')):
# do not use default arg in signature, to fix import cycle on plugin load
if executor is None:
executor = GetDefaultExecutor()
dag_folder = dag_folder or settings.DAGS_FOLDER
self.logger.info("Filling up the DagBag from {}".format(dag_folder))
self.dag_folder = dag_folder
self.dags = {}
# the file's last modified timestamp when we last read it
self.file_last_changed = {}
self.executor = executor
self.import_errors = {}
if include_examples:
example_dag_folder = os.path.join(
os.path.dirname(__file__),
'example_dags')
self.collect_dags(example_dag_folder)
self.collect_dags(dag_folder)
def size(self):
"""
:return: the amount of dags contained in this dagbag
"""
return len(self.dags)
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [dag.dag_id for dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
found_dags = []
# todo: raise exception?
if not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
logging.exception(e)
return found_dags
mods = []
if not zipfile.is_zipfile(filepath):
if safe_mode and os.path.isfile(filepath):
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
self.logger.debug("Importing {}".format(filepath))
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(configuration.getint('core', "DAGBAG_IMPORT_TIMEOUT")):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
self.logger.exception("Failed to import: " + filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.logger.warning("Found __init__.{0} at root of {1}".
format(ext, filepath))
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.logger.debug("Reading {} from {}".
format(mod.filename, filepath))
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
return found_dags
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.logger.exception("Failed to import: " + filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
found_dags.append(dag)
found_dags += dag.subdags
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@provide_session
def kill_zombies(self, session=None):
"""
Fails tasks that haven't had a heartbeat in too long
"""
from airflow.jobs import LocalTaskJob as LJ
self.logger.info("Finding 'running' jobs without a recent heartbeat")
TI = TaskInstance
secs = (
configuration.getint('scheduler', 'scheduler_zombie_task_threshold'))
limit_dttm = datetime.now() - timedelta(seconds=secs)
self.logger.info(
"Failing jobs without heartbeat after {}".format(limit_dttm))
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
))
.all()
)
for ti in tis:
if ti and ti.dag_id in self.dags:
dag = self.dags[ti.dag_id]
if ti.task_id in dag.task_ids:
task = dag.get_task(ti.task_id)
ti.task = task
ti.handle_failure("{} killed as zombie".format(ti))
self.logger.info(
'Marked zombie job {} as failed'.format(ti))
Stats.incr('zombies_killed')
session.commit()
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
"""
self.dags[dag.dag_id] = dag
dag.resolve_template_files()
dag.last_loaded = datetime.now()
for task in dag.tasks:
settings.policy(task)
for subdag in dag.subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.logger.debug('Loaded DAG {dag}'.format(**locals()))
def collect_dags(
self,
dag_folder=None,
only_if_updated=True):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a .airflowignore file is found while processing,
the directory, it will behaves much like a .gitignore does,
ignoring files that match any of the regex patterns specified
in the file.
"""
start_dttm = datetime.now()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
if os.path.isfile(dag_folder):
self.process_file(dag_folder, only_if_updated=only_if_updated)
elif os.path.isdir(dag_folder):
patterns = []
for root, dirs, files in os.walk(dag_folder, followlinks=True):
ignore_file = [f for f in files if f == '.airflowignore']
if ignore_file:
f = open(os.path.join(root, ignore_file[0]), 'r')
patterns += [p for p in f.read().split('\n') if p]
f.close()
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(filepath):
continue
if not any(
[re.findall(p, filepath) for p in patterns]):
ts = datetime.now()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated)
td = datetime.now() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
logging.warning(e)
Stats.gauge(
'collect_dags', (datetime.now() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.dag_num for o in stats]),
table=pprinttable(stats),
)
def deactivate_inactive_dags(self):
active_dag_ids = [dag.dag_id for dag in list(self.dags.values())]
session = settings.Session()
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
session.close()
def paused_dags(self):
session = settings.Session()
dag_ids = [dp.dag_id for dp in session.query(DagModel).filter(
DagModel.is_paused.__eq__(True))]
session.commit()
session.close()
return dag_ids
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
superuser = False
def __repr__(self):
return self.username
def get_id(self):
return str(self.id)
def is_superuser(self):
return self.superuser
class Connection(Base):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA',),
('redis', 'Redis',),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks',),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
temp_uri = urlparse(uri)
hostname = temp_uri.hostname or ''
if '%2f' in hostname:
hostname = hostname.replace('%2f', '/').replace('%2F', '/')
conn_type = temp_uri.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
self.conn_type = conn_type
self.host = hostname
self.schema = temp_uri.path[1:]
self.login = temp_uri.username
self.password = temp_uri.password
self.port = temp_uri.port
def get_password(self):
if self._password and self.is_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
try:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except NameError:
self._password = value
self.is_encrypted = False
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
try:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = True
except NameError:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def get_hook(self):
try:
if self.conn_type == 'mysql':
from airflow.hooks.mysql_hook import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.hooks.postgres_hook import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.hooks.hive_hooks import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.hooks.presto_hook import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.hooks.hive_hooks import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.hooks.sqlite_hook import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.hooks.jdbc_hook import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.hooks.mssql_hook import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.hooks.oracle_hook import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.contrib.hooks.vertica_hook import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.contrib.hooks.cloudant_hook import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.contrib.hooks.jira_hook import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.contrib.hooks.redis_hook import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.contrib.hooks.wasb_hook import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
except:
pass
def __repr__(self):
return self.conn_id
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
logging.exception(e)
logging.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
class DagPickle(Base):
"""
Dags can originate from different places (user repos, master repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(DateTime, default=func.now())
pickle_hash = Column(Text)
__tablename__ = "dag_pickle"
def __init__(self, dag):
self.dag_id = dag.dag_id
if hasattr(dag, 'template_env'):
dag.template_env = None
self.pickle_hash = hash(dag)
self.pickle = dag
class TaskInstance(Base):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
start_date = Column(DateTime)
end_date = Column(DateTime)
duration = Column(Float)
state = Column(String(20))
try_number = Column(Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(50))
queue = Column(String(50))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(DateTime)
pid = Column(Integer)
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_state', state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
)
def __init__(self, task, execution_date, state=None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.task = task
self.queue = task.queue
self.pool = task.pool
self.priority_weight = task.priority_weight_total
self.try_number = 0
self.max_tries = self.task.retries
self.unixname = getpass.getuser()
self.run_as_user = task.run_as_user
if state:
self.state = state
self.hostname = ''
self.init_on_load()
@reconstructor
def init_on_load(self):
""" Initialize the attributes that aren't stored in the DB. """
self.test_mode = False # can be changed when calling 'run'
def command(
self,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
return " ".join(self.command_as_list(
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path))
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag = self.task.dag
should_pass_filepath = not pickle_id and dag
if should_pass_filepath and dag.full_filepath != dag.filepath:
path = "DAGS_FOLDER/{}".format(dag.filepath)
elif should_pass_filepath and dag.full_filepath:
path = dag.full_filepath
else:
path = None
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
self.execution_date,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path)
@staticmethod
def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:return: shell command that can be used to run the task instance
"""
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A "]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
return (
"{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
@property
def log_url(self):
iso = self.execution_date.isoformat()
BASE_URL = configuration.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/log"
"?dag_id={self.dag_id}"
"&task_id={self.task_id}"
"&execution_date={iso}"
).format(**locals())
@property
def mark_success_url(self):
iso = self.execution_date.isoformat()
BASE_URL = configuration.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/action"
"?action=success"
"&task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
@provide_session
def current_state(self, session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state
@provide_session
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
logging.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session=None, lock_for_update=False):
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
self.try_number = ti.try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
else:
self.state = None
@provide_session
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
@property
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date
def set_state(self, state, session):
self.state = state
self.start_date = datetime.now()
self.end_date = datetime.now()
session.merge(self)
session.commit()
@property
def is_premature(self):
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == State.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
dr = self.get_dagrun(session=session)
# LEGACY: most likely running from unit tests
if not dr:
# Means that this TI is NOT being run from a DR, but from a catchup
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task,
execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
@provide_session
def are_dependencies_met(
self,
dep_context=None,
session=None,
verbose=False):
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:type dep_context: DepContext
:param session: database session
:type session: Session
:param verbose: whether or not to print details on failed dependencies
:type verbose: boolean
"""
dep_context = dep_context or DepContext()
failed = False
for dep_status in self.get_failed_dep_statuses(
dep_context=dep_context,
session=session):
failed = True
if verbose:
logging.info("Dependencies not met for {}, dependency '{}' FAILED: {}"
.format(self, dep_status.dep_name, dep_status.reason))
if failed:
return False
if verbose:
logging.info("Dependencies all met for {}".format(self))
return True
@provide_session
def get_failed_dep_statuses(
self,
dep_context=None,
session=None):
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(
self,
session,
dep_context):
logging.debug("{} dependency '{}' PASSED: {}, {}"
.format(self,
dep_status.dep_name,
dep_status.passed,
dep_status.reason))
if not dep_status.passed:
yield dep_status
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id,
self.execution_date, self.try_number).encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < datetime.now())
@provide_session
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
@provide_session
def get_dagrun(self, session):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr
@provide_session
def run(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Runs the task instance.
:param verbose: whether to turn on more verbose logging
:type verbose: boolean
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Don't check the dependencies of this TI's task
:type ignore_task_deps: boolean
:param ignore_ti_state: Disregards previous task instance state
:type ignore_ti_state: boolean
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = socket.getfqdn()
self.operator = task.__class__.__name__
if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS:
Stats.incr('previously_succeeded', 1, 1)
queue_dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps)
if not self.are_dependencies_met(
dep_context=queue_dep_context,
session=session,
verbose=True):
session.commit()
return
hr = "\n" + ("-" * 80) + "\n" # Line break
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
msg = "Starting attempt {attempt} of {total}".format(
attempt=self.try_number + 1,
total=self.max_tries + 1)
self.start_date = datetime.now()
dep_context = DepContext(
deps=RUN_DEPS - QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
runnable = self.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True)
if not runnable and not mark_success:
# FIXME: we might have hit concurrency limits, which means we probably
# have been running prematurely. This should be handled in the
# scheduling mechanism.
self.state = State.NONE
msg = ("FIXME: Rescheduling due to concurrency limits reached at task "
"runtime. Attempt {attempt} of {total}. State set to NONE.").format(
attempt=self.try_number + 1,
total=self.max_tries + 1)
logging.warning(hr + msg + hr)
self.queued_dttm = datetime.now()
msg = "Queuing into pool {}".format(self.pool)
logging.info(msg)
session.merge(self)
session.commit()
return
# Another worker might have started running this task instance while
# the current worker process was blocked on refresh_from_db
if self.state == State.RUNNING:
msg = "Task Instance already running {}".format(self)
logging.warning(msg)
session.commit()
return
# print status message
logging.info(hr + msg + hr)
self.try_number += 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.pid = os.getpid()
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose()
if verbose:
if mark_success:
msg = "Marking success for "
else:
msg = "Executing "
msg += "{self.task} on {self.execution_date}"
context = {}
try:
logging.info(msg.format(self=self))
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
'''Setting kill signal handler'''
logging.error("Killing subprocess")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
self.render_templates()
task_copy.pre_execute(context=context)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
# TODO remove deprecated behavior in Airflow 2.0
try:
task_copy.post_execute(context=context, result=result)
except TypeError as e:
if 'unexpected keyword argument' in str(e):
warnings.warn(
'BaseOperator.post_execute() now takes two '
'arguments, `context` and `result`, but "{}" only '
'expected one. This behavior is deprecated and '
'will be removed in a future version of '
'Airflow.'.format(self.task_id),
category=DeprecationWarning)
task_copy.post_execute(context=context)
else:
raise
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
self.state = State.SUCCESS
except AirflowSkipException:
self.state = State.SKIPPED
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Recording SUCCESS
self.end_date = datetime.now()
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
logging.error("Failed when executing success callback")
logging.exception(e3)
session.commit()
def dry_run(self):
task = self.task
task_copy = copy.copy(task)
self.task = task_copy
self.render_templates()
task_copy.dry_run()
def handle_failure(self, error, test_mode=False, context=None):
logging.exception(error)
task = self.task
session = settings.Session()
self.end_date = datetime.now()
self.set_duration()
Stats.incr('operator_failures_{}'.format(task.__class__.__name__), 1, 1)
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
session.add(TaskFail(task, self.execution_date, self.start_date, self.end_date))
# Let's go deeper
try:
# try_number is incremented by 1 during task instance run. So the
# current task instance try_number is the try_number for the next
# task instance run. We only mark task instance as FAILED if the
# next task instance try_number exceeds the max_tries.
if task.retries and self.try_number <= self.max_tries:
self.state = State.UP_FOR_RETRY
logging.info('Marking task as UP_FOR_RETRY')
if task.email_on_retry and task.email:
self.email_alert(error, is_retry=True)
else:
self.state = State.FAILED
if task.retries:
logging.info('All retries failed; marking task as FAILED')
else:
logging.info('Marking task as FAILED.')
if task.email_on_failure and task.email:
self.email_alert(error, is_retry=False)
except Exception as e2:
logging.error(
'Failed to send email to: ' + str(task.email))
logging.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
logging.error("Failed at executing callback")
logging.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
logging.error(str(error))
@provide_session
def get_template_context(self, session=None):
task = self.task
from airflow import macros
tables = None
if 'tables' in task.params:
tables = task.params['tables']
ds = self.execution_date.isoformat()[:10]
ts = self.execution_date.isoformat()
yesterday_ds = (self.execution_date - timedelta(1)).isoformat()[:10]
tomorrow_ds = (self.execution_date + timedelta(1)).isoformat()[:10]
prev_execution_date = task.dag.previous_schedule(self.execution_date)
next_execution_date = task.dag.following_schedule(self.execution_date)
ds_nodash = ds.replace('-', '')
ts_nodash = ts.replace('-', '').replace(':', '')
yesterday_ds_nodash = yesterday_ds.replace('-', '')
tomorrow_ds_nodash = tomorrow_ds.replace('-', '')
ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}"
ti_key_str = ti_key_str.format(**locals())
params = {}
run_id = ''
dag_run = None
if hasattr(task, 'dag'):
if task.dag.params:
params.update(task.dag.params)
dag_run = (
session.query(DagRun)
.filter_by(
dag_id=task.dag.dag_id,
execution_date=self.execution_date)
.first()
)
run_id = dag_run.run_id if dag_run else None
session.expunge_all()
session.commit()
if task.params:
params.update(task.params)
class VariableAccessor:
"""
Wrapper around Variable. This way you can get variables in templates by using
{var.variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item)
return self.var
def __repr__(self):
return str(self.var)
class VariableJsonAccessor:
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item, deserialize_json=True)
return self.var
def __repr__(self):
return str(self.var)
return {
'dag': task.dag,
'ds': ds,
'ds_nodash': ds_nodash,
'ts': ts,
'ts_nodash': ts_nodash,
'yesterday_ds': yesterday_ds,
'yesterday_ds_nodash': yesterday_ds_nodash,
'tomorrow_ds': tomorrow_ds,
'tomorrow_ds_nodash': tomorrow_ds_nodash,
'END_DATE': ds,
'end_date': ds,
'dag_run': dag_run,
'run_id': run_id,
'execution_date': self.execution_date,
'prev_execution_date': prev_execution_date,
'next_execution_date': next_execution_date,
'latest_date': ds,
'macros': macros,
'params': params,
'tables': tables,
'task': task,
'task_instance': self,
'ti': self,
'task_instance_key_str': ti_key_str,
'conf': configuration,
'test_mode': self.test_mode,
'var': {
'value': VariableAccessor(),
'json': VariableJsonAccessor()
}
}
def render_templates(self):
task = self.task
jinja_context = self.get_template_context()
if hasattr(self, 'task') and hasattr(self.task, 'dag'):
if self.task.dag.user_defined_macros:
jinja_context.update(
self.task.dag.user_defined_macros)
rt = self.task.render_template # shortcut to method
for attr in task.__class__.template_fields:
content = getattr(task, attr)
if content:
rendered_content = rt(attr, content, jinja_context)
setattr(task, attr, rendered_content)
def email_alert(self, exception, is_retry=False):
task = self.task
title = "Airflow alert: {self}".format(**locals())
exception = str(exception).replace('\n', '<br>')
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
body = (
"Try {try_number} out of {max_tries}<br>"
"Exception:<br>{exception}<br>"
"Log: <a href='{self.log_url}'>Link</a><br>"
"Host: {self.hostname}<br>"
"Log file: {self.log_filepath}<br>"
"Mark success: <a href='{self.mark_success_url}'>Link</a><br>"
).format(try_number=self.try_number + 1, max_tries=self.max_tries + 1, **locals())
send_email(task.email, title, body)
def set_duration(self):
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
def xcom_push(
self,
key,
value,
execution_date=None):
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: string
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date)
def xcom_pull(
self,
task_ids,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: string
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: string or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: string
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids)
class TaskFail(Base):
"""
TaskFail tracks the failed run durations of each task instance.
"""
__tablename__ = "task_fail"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
start_date = Column(DateTime)
end_date = Column(DateTime)
duration = Column(Float)
def __init__(self, task, execution_date, start_date, end_date):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.start_date = start_date
self.end_date = end_date
self.duration = (self.end_date - self.start_date).total_seconds()
class Log(Base):
"""
Used to actively log events to the database
"""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(DateTime)
dag_id = Column(String(ID_LEN))
task_id = Column(String(ID_LEN))
event = Column(String(30))
execution_date = Column(DateTime)
owner = Column(String(500))
extra = Column(Text)
def __init__(self, event, task_instance, owner=None, extra=None, **kwargs):
self.dttm = datetime.now()
self.event = event
self.extra = extra
task_owner = None
if task_instance:
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
task_owner = task_instance.task.owner
if 'task_id' in kwargs:
self.task_id = kwargs['task_id']
if 'dag_id' in kwargs:
self.dag_id = kwargs['dag_id']
if 'execution_date' in kwargs:
if kwargs['execution_date']:
self.execution_date = kwargs['execution_date']
self.owner = owner or task_owner
class SkipMixin(object):
def skip(self, dag_run, execution_date, tasks):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = datetime.now()
session = settings.Session()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state : State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
logging.warning("No DAG RUN present this should not happen")
# this is defensive against dag runs that are not complete
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit()
session.close()
@functools.total_ordering
class BaseOperator(object):
"""
Abstract base class for all operators. Since operators create objects that
become node in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator the runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: string
:param owner: the owner of the task, using the unix username is recommended
:type owner: string
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: timedelta
:param retry_exponential_backoff: allow progressive longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries
:type max_retry_delay: timedelta
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up.
:type priority_weight: int
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send dan email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: callable
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | dummy}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
"""
# For derived classes to define which fields will get jinjaified
template_fields = []
# Defines which files extensions to look for in the templated fields
template_ext = []
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
@apply_defaults
def __init__(
self,
task_id,
owner=configuration.get('operators', 'DEFAULT_OWNER'),
email=None,
email_on_retry=True,
email_on_failure=True,
retries=0,
retry_delay=timedelta(seconds=300),
retry_exponential_backoff=False,
max_retry_delay=None,
start_date=None,
end_date=None,
schedule_interval=None, # not hooked as of now
depends_on_past=False,
wait_for_downstream=False,
dag=None,
params=None,
default_args=None,
adhoc=False,
priority_weight=1,
queue=configuration.get('celery', 'default_queue'),
pool=None,
sla=None,
execution_timeout=None,
on_failure_callback=None,
on_success_callback=None,
on_retry_callback=None,
trigger_rule=TriggerRule.ALL_SUCCESS,
resources=None,
run_as_user=None,
*args,
**kwargs):
if args or kwargs:
# TODO remove *args and **kwargs in Airflow 2.0
warnings.warn(
'Invalid arguments were passed to {c}. Support for '
'passing such arguments will be dropped in Airflow 2.0. '
'Invalid arguments were:'
'\n*args: {a}\n**kwargs: {k}'.format(
c=self.__class__.__name__, a=args, k=kwargs),
category=PendingDeprecationWarning
)
validate_key(task_id)
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
logging.warning(
"start_date for {} isn't datetime.datetime".format(self))
self.end_date = end_date
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
"The trigger_rule must be one of {all_triggers},"
"'{d}.{t}'; received '{tr}'."
.format(all_triggers=TriggerRule.all_triggers,
d=dag.dag_id, t=task_id, tr=trigger_rule))
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if schedule_interval:
logging.warning(
"schedule_interval is used for {}, though it has "
"been deprecated as a task parameter, you need to "
"specify it as a DAG parameter instead".format(self))
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
logging.debug("retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
self.params = params or {} # Available in templates!
self.adhoc = adhoc
self.priority_weight = priority_weight
self.resources = Resources(**(resources or {}))
self.run_as_user = run_as_user
# Private attributes
self._upstream_task_ids = []
self._downstream_task_ids = []
if not dag and _CONTEXT_MANAGER_DAG:
dag = _CONTEXT_MANAGER_DAG
if dag:
self.dag = dag
self._comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'schedule_interval',
'depends_on_past',
'wait_for_downstream',
'adhoc',
'priority_weight',
'sla',
'execution_timeout',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
}
def __eq__(self, other):
return (
type(self) == type(other) and
all(self.__dict__.get(c, None) == other.__dict__.get(c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.task_id < other.task_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Composing Operators -----------------------------------------------
def __rshift__(self, other):
"""
Implements Self >> Other == self.set_downstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_downstream(other)
return other
def __lshift__(self, other):
"""
Implements Self << Other == self.set_upstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_upstream(other)
return other
def __rrshift__(self, other):
"""
Called for [DAG] >> [Operator] because DAGs don't have
__rshift__ operators.
"""
self.__lshift__(other)
return self
def __rlshift__(self, other):
"""
Called for [DAG] << [Operator] because DAGs don't have
__lshift__ operators.
"""
self.__rshift__(other)
return self
# /Composing Operators ---------------------------------------------
@property
def dag(self):
"""
Returns the Operator's DAG if set, otherwise raises an error
"""
if self.has_dag():
return self._dag
else:
raise AirflowException(
'Operator {} has not been assigned to a DAG yet'.format(self))
@dag.setter
def dag(self, dag):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
if not isinstance(dag, DAG):
raise TypeError(
'Expected DAG; received {}'.format(dag.__class__.__name__))
elif self.has_dag() and self.dag is not dag:
raise AirflowException(
"The DAG assigned to {} can not be changed.".format(self))
elif self.task_id not in dag.task_dict:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""
Returns True if the Operator has been assigned to a DAG.
"""
return getattr(self, '_dag', None) is not None
@property
def dag_id(self):
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
@property
def deps(self):
"""
Returns the list of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
return {
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
}
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if self.has_dag():
return self.dag._schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
return sum([
t.priority_weight
for t in self.get_flat_relatives(upstream=False)
]) + self.priority_weight
def pre_execute(self, context):
"""
This hook is triggered right before self.execute() is called.
"""
pass
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
pass
def on_kill(self):
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
pass
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.params = self.params
if hasattr(self, 'user_defined_macros'):
result.user_defined_macros = self.user_defined_macros
if hasattr(self, 'user_defined_filters'):
result.user_defined_filters = self.user_defined_filters
return result
def render_template_from_field(self, attr, content, context, jinja_env):
"""
Renders a template from a field. If the field is a string, it will
simply render the string and return the result. If it is a collection or
nested set of collections, it will traverse the structure and render
all strings in it.
"""
rt = self.render_template
if isinstance(content, six.string_types):
result = jinja_env.from_string(content).render(**context)
elif isinstance(content, (list, tuple)):
result = [rt(attr, e, context) for e in content]
elif isinstance(content, dict):
result = {
k: rt("{}[{}]".format(attr, k), v, context)
for k, v in list(content.items())}
else:
param_type = type(content)
msg = (
"Type '{param_type}' used for parameter '{attr}' is "
"not supported for templating").format(**locals())
raise AirflowException(msg)
return result
def render_template(self, attr, content, context):
"""
Renders a template either from a file or directly in a field, and returns
the rendered result.
"""
jinja_env = self.dag.get_template_env() \
if hasattr(self, 'dag') \
else jinja2.Environment(cache_size=0)
exts = self.__class__.template_ext
if (
isinstance(content, six.string_types) and
any([content.endswith(ext) for ext in exts])):
return jinja_env.get_template(content).render(**context)
else:
return self.render_template_from_field(attr, content, context, jinja_env)
def prepare_template(self):
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
pass
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if content is not None and \
isinstance(content, six.string_types) and \
any([content.endswith(ext) for ext in self.template_ext]):
env = self.dag.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
logging.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self):
return self._upstream_task_ids
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self):
return self._downstream_task_ids
def clear(self, start_date=None, end_date=None, upstream=False, downstream=False):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
session = settings.Session()
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry.all(), session, dag=self.dag)
session.commit()
session.close()
return count
def get_task_instances(self, session, start_date=None, end_date=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
TI = TaskInstance
end_date = end_date or datetime.now()
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
).order_by(TI.execution_date).all()
def get_flat_relatives(self, upstream=False, l=None):
"""
Get a flat list of relatives, either upstream or downstream.
"""
if not l:
l = []
for t in self.get_direct_relatives(upstream):
if not is_in(t, l):
l.append(t)
t.get_flat_relatives(upstream, l)
return l
def detect_downstream_cycle(self, task=None):
"""
When invoked, this routine will raise an exception if a cycle is
detected downstream from self. It is invoked when tasks are added to
the DAG to detect cycles.
"""
if not task:
task = self
for t in self.get_direct_relatives():
if task is t:
msg = "Cycle detected in DAG. Faulty task: {0}".format(task)
raise AirflowException(msg)
else:
t.detect_downstream_cycle(task=task)
return False
def run(
self,
start_date=None,
end_date=None,
ignore_first_depends_on_past=False,
ignore_ti_state=False,
mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or datetime.now()
for dt in self.dag.date_range(start_date, end_date=end_date):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_depends_on_past=(
dt == start_date and ignore_first_depends_on_past),
ignore_ti_state=ignore_ti_state)
def dry_run(self):
logging.info('Dry run')
for attr in self.template_fields:
content = getattr(self, attr)
if content and isinstance(content, six.string_types):
logging.info('Rendering template for {0}'.format(attr))
logging.info(content)
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(
self=self)
@property
def task_type(self):
return self.__class__.__name__
def append_only_new(self, l, item):
if any([item is t for t in l]):
raise AirflowException(
'Dependency {self}, {item} already registered'
''.format(**locals()))
else:
l.append(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for t in task_list:
if not isinstance(t, BaseOperator):
raise AirflowException(
"Relationships can only be set between "
"Operators; received {}".format(t.__class__.__name__))
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = set(t.dag for t in [self] + task_list if t.has_dag())
if len(dags) > 1:
raise AirflowException(
'Tried to set relationships between tasks in '
'more than one DAG: {}'.format(dags))
elif len(dags) == 1:
dag = list(dags)[0]
else:
raise AirflowException(
"Tried to create relationships between tasks that don't have "
"DAGs yet. Set the DAG for at least one "
"task and try again: {}".format([self] + task_list))
if dag and not self.has_dag():
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
task.dag = dag
if upstream:
task.append_only_new(task._downstream_task_ids, self.task_id)
self.append_only_new(self._upstream_task_ids, task.task_id)
else:
self.append_only_new(self._downstream_task_ids, task.task_id)
task.append_only_new(task._upstream_task_ids, self.task_id)
self.detect_downstream_cycle()
def set_downstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
def xcom_push(
self,
context,
key,
value,
execution_date=None):
"""
See TaskInstance.xcom_push()
"""
context['ti'].xcom_push(
key=key,
value=value,
execution_date=execution_date)
def xcom_pull(
self,
context,
task_ids,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=None):
"""
See TaskInstance.xcom_pull()
"""
return context['ti'].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = configuration.getboolean('core',
'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(DateTime)
# Last time this DAG was pickled
last_pickled = Column(DateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(DateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@classmethod
def get_current(cls, dag_id):
session = settings.Session()
obj = session.query(cls).filter(cls.dag_id == dag_id).first()
session.expunge_all()
session.commit()
session.close()
return obj
@functools.total_ordering
class DAG(BaseDag, LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start end an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG
:type dag_id: string
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: string
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: string or list of stings
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration, gantt, landing_times)
:type default_view: string
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
:type orientation: string
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
"""
def __init__(
self, dag_id,
description='',
schedule_interval=timedelta(days=1),
start_date=None, end_date=None,
full_filepath=None,
template_searchpath=None,
user_defined_macros=None,
user_defined_filters=None,
default_args=None,
concurrency=configuration.getint('core', 'dag_concurrency'),
max_active_runs=configuration.getint(
'core', 'max_active_runs_per_dag'),
dagrun_timeout=None,
sla_miss_callback=None,
default_view=configuration.get('webserver', 'dag_default_view').lower(),
orientation=configuration.get('webserver', 'dag_orientation'),
catchup=configuration.getboolean('scheduler', 'catchup_by_default'),
params=None):
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = default_args or {}
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
# Properties from BaseDag
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id = None
self._description = description
# set file location to caller source path
self.fileloc = inspect.getsourcefile(inspect.stack()[1][0])
self.task_dict = dict()
self.start_date = start_date
self.end_date = end_date
self.schedule_interval = schedule_interval
if schedule_interval in cron_presets:
self._schedule_interval = cron_presets.get(schedule_interval)
elif schedule_interval == '@once':
self._schedule_interval = None
else:
self._schedule_interval = schedule_interval
if isinstance(template_searchpath, six.string_types):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = datetime.now()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
self.default_view = default_view
self.orientation = orientation
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self._comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
def __eq__(self, other):
return (
type(self) == type(other) and
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
all(getattr(self, c, None) == getattr(other, c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
global _CONTEXT_MANAGER_DAG
self._old_context_manager_dag = _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self
return self
def __exit__(self, _type, _value, _tb):
global _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self._old_context_manager_dag
# /Context Manager ----------------------------------------------
def date_range(self, start_date, num=None, end_date=datetime.now()):
if num:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date,
num=num, delta=self._schedule_interval)
def following_schedule(self, dttm):
if isinstance(self._schedule_interval, six.string_types):
cron = croniter(self._schedule_interval, dttm)
return cron.get_next(datetime)
elif isinstance(self._schedule_interval, timedelta):
return dttm + self._schedule_interval
def previous_schedule(self, dttm):
if isinstance(self._schedule_interval, six.string_types):
cron = croniter(self._schedule_interval, dttm)
return cron.get_prev(datetime)
elif isinstance(self._schedule_interval, timedelta):
return dttm - self._schedule_interval
def normalize_schedule(self, dttm):
"""
Returns dttm + interval unless dttm is first interval then it returns dttm
"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
"""
Returns the last dag run for this dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored
"""
DR = DagRun
qry = session.query(DR).filter(
DR.dag_id == self.dag_id,
)
if not include_externally_triggered:
qry = qry.filter(DR.external_trigger.__eq__(False))
qry = qry.order_by(DR.execution_date.desc())
last = qry.first()
return last
@property
def dag_id(self):
return self._dag_id
@dag_id.setter
def dag_id(self, value):
self._dag_id = value
@property
def full_filepath(self):
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value):
self._full_filepath = value
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, value):
self._concurrency = value
@property
def description(self):
return self._description
@property
def pickle_id(self):
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value):
self._pickle_id = value
@property
def tasks(self):
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError(
'DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self):
return list(self.task_dict.keys())
@property
def active_task_ids(self):
return list(k for k, v in self.task_dict.items() if not v.adhoc)
@property
def active_tasks(self):
return [t for t in self.tasks if not t.adhoc]
@property
def filepath(self):
"""
File location of where the dag object is instantiated
"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self):
"""
Folder location of where the dag object is instantiated
"""
return os.path.dirname(self.full_filepath)
@property
def owner(self):
return ", ".join(list(set([t.owner for t in self.tasks])))
@property
@provide_session
def concurrency_reached(self, session=None):
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.task_id.in_(self.task_ids),
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
@provide_session
def is_paused(self, session=None):
"""
Returns a boolean indicating whether this DAG is paused
"""
qry = session.query(DagModel).filter(
DagModel.dag_id == self.dag_id)
return qry.value('is_paused')
@provide_session
def get_active_runs(self, session=None):
"""
Returns a list of "running" tasks
:param session:
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == execution_date)
.first())
return dagrun
@property
def latest_execution_date(self):
"""
Returns the latest date for which at least one dag run exists
"""
session = settings.Session()
execution_date = session.query(func.max(DagRun.execution_date)).filter(
DagRun.dag_id == self.dag_id
).scalar()
session.commit()
session.close()
return execution_date
def descendants(self, dagbag, task_ids=None, include_downstream=False,
include_upstream=False, recursive=False):
from airflow.operators.sensors import ExternalTaskSensor
if not task_ids:
task_ids = self.task_ids
descendants = []
for _, dag in dagbag.dags.items():
tasks = [task for task in dag.tasks if
isinstance(task, ExternalTaskSensor) and
task.external_dag_id == self.dag_id and
task.external_task_id in task_ids]
if not tasks:
continue
task_regex = "|".join(map(
lambda x: "^{0}$".format(x.task_id), tasks))
dependent_dag = dag.sub_dag(
task_regex=r"{0}".format(task_regex),
include_downstream=include_downstream,
include_upstream=include_upstream)
descendants.append(dependent_dag)
if recursive:
descendants.extend(dependent_dag.descendants(
dagbag,
include_downstream=include_downstream,
include_upstream=include_upstream,
recursive=recursive))
return descendants
@property
def subdags(self):
"""
Returns a list of the subdag objects associated to this DAG
"""
# Check SubDag for class but don't check class directly, see
# https://github.com/airbnb/airflow/issues/1168
from airflow.operators.subdag_operator import SubDagOperator
l = []
for task in self.tasks:
if (isinstance(task, SubDagOperator) or
#TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator'):
l.append(task.subdag)
l += task.subdag.subdags
return l
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self):
"""
Returns a jinja2 Environment while taking into account the DAGs
template_searchpath, user_defined_macros and user_defined_filters
"""
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath),
extensions=["jinja2.ext.do"],
cache_size=0)
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
def get_task_instances(
self, session, start_date=None, end_date=None, state=None):
TI = TaskInstance
if not start_date:
start_date = (datetime.today() - timedelta(30)).date()
start_date = datetime.combine(start_date, datetime.min.time())
end_date = end_date or datetime.now()
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
TI.task_id.in_([t.task_id for t in self.tasks]),
)
if state:
tis = tis.filter(TI.state == state)
tis = tis.order_by(TI.execution_date).all()
return tis
@property
def roots(self):
return [t for t in self.tasks if not t.downstream_list]
def topological_sort(self):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:return: list of tasks in topological order
"""
# copy the the tasks so we leave it unmodified
graph_unsorted = self.tasks[:]
graph_sorted = []
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted):
for edge in node.upstream_list:
if edge in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
graph_unsorted.remove(node)
graph_sorted.append(node)
if not acyclic:
raise AirflowException("A cyclic dependency occurred in dag: {}"
.format(self.dag_id))
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self, state=State.RUNNING, session=None):
drs = session.query(DagModel).filter_by(dag_id=self.dag_id).all()
dirty_ids = []
for dr in drs:
dr.state = state
dirty_ids.append(dr.dag_id)
DagStat.update(dirty_ids, session=session)
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
if dag.task_ids:
conditions.append(
TI.dag_id.like(dag.dag_id) &
TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(TI.state == State.FAILED)
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(**locals())
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(tis.all(), session, dag=self)
if reset_dag_runs:
self.set_dag_runs_state(session=session)
else:
count = 0
print("Bail. Nothing was cleared.")
session.commit()
session.close()
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
return result
def sub_dag(self, task_regex, include_downstream=False,
include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
dag = copy.deepcopy(self)
regex_match = [
t for t in dag.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
dag.task_dict = {t.task_id: t for t in regex_match + also_include}
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_task_ids = [
tid for tid in t._upstream_task_ids if tid in dag.task_ids]
t._downstream_task_ids = [
tid for tid in t._downstream_task_ids if tid in dag.task_ids]
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id):
if task_id in self.task_dict:
return self.task_dict[task_id]
raise AirflowException("Task {task_id} not found".format(**locals()))
@provide_session
def pickle_info(self, session=None):
d = {}
d['is_picklable'] = True
try:
dttm = datetime.now()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = "{}".format(datetime.now() - dttm)
except Exception as e:
logging.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None):
dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = datetime.now()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self):
"""
Shows an ascii tree representation of the DAG
"""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.upstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if task.task_id in self.task_dict:
# TODO: raise an error in Airflow 2.0
warnings.warn(
'The requested task could not be added to the DAG because a '
'task with task_id {} is already in the DAG. Starting in '
'Airflow 2.0, trying to overwrite a task will raise an '
'exception.'.format(task.task_id),
category=PendingDeprecationWarning)
else:
self.tasks.append(task)
self.task_dict[task.task_id] = task
task.dag = self
self.task_count = len(self.tasks)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
def db_merge(self):
BO = BaseOperator
session = settings.Session()
tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all()
for t in tasks:
session.delete(t)
session.commit()
session.merge(self)
session.commit()
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
include_adhoc=False,
local=False,
executor=None,
donot_pickle=configuration.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=False,
pool=None):
"""
Runs the DAG.
"""
from airflow.jobs import BackfillJob
if not executor and local:
executor = LocalExecutor()
elif not executor:
executor = GetDefaultExecutor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
include_adhoc=include_adhoc,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool)
job.run()
def cli(self):
"""
Exposes a CLI specific to this DAG
"""
from airflow.bin import cli
parser = cli.CLIFactory.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date=None,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date of this dag run
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: Session
"""
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state
)
session.add(run)
DagStat.set_dirty(dag_id=self.dag_id, session=session)
session.commit()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
run.refresh_from_db()
return run
@staticmethod
@provide_session
def sync_to_db(dag, owner, sync_time, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param dag: the DAG object to save to the DB
:type dag: DAG
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
if not orm_dag:
orm_dag = DagModel(dag_id=dag.dag_id)
logging.info("Creating ORM DAG for %s",
dag.dag_id)
orm_dag.fileloc = dag.fileloc
orm_dag.is_subdag = dag.is_subdag
orm_dag.owners = owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = sync_time
session.merge(orm_dag)
session.commit()
for subdag in dag.subdags:
DAG.sync_to_db(subdag, owner, sync_time, session=session)
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
for dag in session.query(
DagModel).filter(DagModel.last_scheduler_run < expiration_date,
DagModel.is_active).all():
logging.info("Deactivating DAG ID %s since it was last touched "
"by the scheduler at %s",
dag.dag_id,
dag.last_scheduler_run.isoformat())
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id.in_(task_ids))
if states is not None:
if None in states:
qry = qry.filter(or_(
TaskInstance.state.in_(states),
TaskInstance.state.is_(None)))
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
class Chart(Base):
__tablename__ = "chart"
id = Column(Integer, primary_key=True)
label = Column(String(200))
conn_id = Column(String(ID_LEN), nullable=False)
user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
chart_type = Column(String(100), default="line")
sql_layout = Column(String(50), default="series")
sql = Column(Text, default="SELECT series, x, y FROM table")
y_log_scale = Column(Boolean)
show_datatable = Column(Boolean)
show_sql = Column(Boolean, default=True)
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
"User", cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(DateTime, default=func.now())
def __repr__(self):
return self.label
class KnownEventType(Base):
__tablename__ = "known_event_type"
id = Column(Integer, primary_key=True)
know_event_type = Column(String(200))
def __repr__(self):
return self.know_event_type
class KnownEvent(Base):
__tablename__ = "known_event"
id = Column(Integer, primary_key=True)
label = Column(String(200))
start_date = Column(DateTime)
end_date = Column(DateTime)
user_id = Column(Integer(), ForeignKey('users.id'),)
known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),)
reported_by = relationship(
"User", cascade=False, cascade_backrefs=False, backref='known_events')
event_type = relationship(
"KnownEventType",
cascade=False,
cascade_backrefs=False, backref='known_events')
description = Column(Text)
def __repr__(self):
return self.label
class Variable(Base):
__tablename__ = "variable"
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __repr__(self):
# Hiding the value
return '{} : {}'.format(self.key, self._val)
def get_val(self):
if self._val and self.is_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt _val for key={}, FERNET_KEY configuration \
missing".format(self.key))
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
else:
return self._val
def set_val(self, value):
if value:
try:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except NameError:
self._val = value
self.is_encrypted = False
@declared_attr
def val(cls):
return synonym('_val',
descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: String
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
default_sentinel = object()
obj = Variable.get(key, default_var=default_sentinel, deserialize_json=False)
if obj is default_sentinel:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def get(cls, key, default_var=None, deserialize_json=False, session=None):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not None:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(cls, key, value, serialize_json=False, session=None):
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query(cls).filter(cls.key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
class XCom(Base):
"""
Base class for XCom objects.
"""
__tablename__ = "xcom"
id = Column(Integer, primary_key=True)
key = Column(String(512))
value = Column(PickleType(pickler=dill))
timestamp = Column(
DateTime, default=func.now(), nullable=False)
execution_date = Column(DateTime, nullable=False)
# source information
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
__table_args__ = (
Index('idx_xcom_dag_task_date', dag_id, task_id, execution_date, unique=False),
)
def __repr__(self):
return '<XCom "{key}" ({task_id} @ {execution_date})>'.format(
key=self.key,
task_id=self.task_id,
execution_date=self.execution_date)
@classmethod
@provide_session
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
session=None):
"""
Store an XCom value.
"""
session.expunge_all()
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
@classmethod
@provide_session
def get_one(
cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value)
.filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(1))
result = query.first()
if result:
return result.value
@classmethod
@provide_session
def get_many(
cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls)
.filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
return query.all()
@classmethod
@provide_session
def delete(cls, xcoms, session=None):
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(
'Expected XCom; received {}'.format(xcom.__class__.__name__)
)
session.delete(xcom)
session.commit()
class DagStat(Base):
__tablename__ = "dag_stats"
dag_id = Column(String(ID_LEN), primary_key=True)
state = Column(String(50), primary_key=True)
count = Column(Integer, default=0)
dirty = Column(Boolean, default=False)
def __init__(self, dag_id, state, count=0, dirty=False):
self.dag_id = dag_id
self.state = state
self.count = count
self.dirty = dirty
@staticmethod
@provide_session
def set_dirty(dag_id, session=None):
"""
:param dag_id: the dag_id to mark dirty
:param session: database session
:return:
"""
DagStat.create(dag_id=dag_id, session=session)
try:
stats = session.query(DagStat).filter(
DagStat.dag_id == dag_id
).with_for_update().all()
for stat in stats:
stat.dirty = True
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not update dag stats for {}".format(dag_id))
logging.exception(e)
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True)
qry = qry.with_for_update().all()
ids = set([dag_stat.dag_id for dag_stat in qry])
# avoid querying with an empty IN clause
if len(ids) == 0:
session.commit()
return
dagstat_states = set(itertools.product(ids, State.dag_states))
qry = (
session.query(DagRun.dag_id, DagRun.state, func.count('*'))
.filter(DagRun.dag_id.in_(ids))
.group_by(DagRun.dag_id, DagRun.state)
)
counts = {(dag_id, state): count for dag_id, state, count in qry}
for dag_id, state in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[(dag_id, state)]
session.merge(
DagStat(dag_id=dag_id, state=state, count=count, dirty=False)
)
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not update dag stat table")
logging.exception(e)
@staticmethod
@provide_session
def create(dag_id, session=None):
"""
Creates the missing states the stats table for the dag specified
:param dag_id: dag id of the dag to create stats for
:param session: database session
:return:
"""
# unfortunately sqlalchemy does not know upsert
qry = session.query(DagStat).filter(DagStat.dag_id == dag_id).all()
states = [dag_stat.state for dag_stat in qry]
for state in State.dag_states:
if state not in states:
try:
session.merge(DagStat(dag_id=dag_id, state=state))
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not create stat record")
logging.exception(e)
class DagRun(Base):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
ID_PREFIX = 'scheduled__'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
DEADLOCK_CHECK_DEP_CONTEXT = DepContext(ignore_in_retry_period=True)
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(DateTime, default=func.now())
start_date = Column(DateTime, default=func.now())
end_date = Column(DateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dr_run_id', dag_id, run_id, unique=True),
)
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
if self.dag_id is not None:
# something really weird goes on here: if you try to close the session
# dag runs will end up detached
session = settings.Session()
DagStat.set_dirty(self.dag_id, session=session)
@declared_attr
def state(self):
return synonym('_state',
descriptor=property(self.get_state, self.set_state))
@classmethod
def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX):
return prefix.format(date.isoformat()[:19])
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: integer, list
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
TI = TaskInstance
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, six.string_types):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
tis = tis.filter(
or_(TI.state.in_(state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
logging.info("Updating state for {} considering {} task(s)"
.format(self, len(tis)))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = datetime.now()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = all(
# Use a special dependency context that ignores task's up for retry
# dependency, since a task that is up for retry is not necessarily
# deadlocked.
not t.are_dependencies_met(dep_context=self.DEADLOCK_CHECK_DEP_CONTEXT,
session=session)
for t in unfinished_tasks)
duration = (datetime.now() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}.{}".
format(self.dag_id, self.execution_date), duration)
# future: remove the check on adhoc tasks (=active_tasks)
if len(tis) == len(dag.active_tasks):
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least on failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
logging.info('Marking run {} failed'.format(self))
self.state = State.FAILED
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
logging.info('Marking run {} successful'.format(self))
self.state = State.SUCCESS
# if *all tasks* are deadlocked, the run failed
elif unfinished_tasks and none_depends_on_past and no_dependencies_met:
logging.info(
'Deadlock; marking run {} failed'.format(self))
self.state = State.FAILED
# finally, if the roots aren't done, the dag is still running
else:
self.state = State.RUNNING
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
try:
dag.get_task(ti.task_id)
except AirflowException:
if self.state is not State.RUNNING and not dag.partial:
ti.state = State.REMOVED
# check for missing tasks
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in task_ids:
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False,
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
from airflow.jobs import BackfillJob
return self.run_id.startswith(BackfillJob.ID_PREFIX)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest running DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.filter(cls.state == State.RUNNING)
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
class Pool(Base):
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(50), unique=True)
slots = Column(Integer, default=0)
description = Column(Text)
def __repr__(self):
return self.pool
def to_json(self):
return {
'id': self.id,
'pool': self.pool,
'slots': self.slots,
'description': self.description,
}
@provide_session
def used_slots(self, session):
"""
Returns the number of slots used at the moment
"""
running = (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.RUNNING)
.count()
)
return running
@provide_session
def queued_slots(self, session):
"""
Returns the number of slots used at the moment
"""
return (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.QUEUED)
.count()
)
@provide_session
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
used_slots = self.used_slots(session=session)
queued_slots = self.queued_slots(session=session)
return self.slots - used_slots - queued_slots
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(DateTime)
description = Column(Text)
notification_sent = Column(Boolean, default=False)
def __repr__(self):
return str((
self.dag_id, self.task_id, self.execution_date.isoformat()))
class ImportError(Base):
__tablename__ = "import_error"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
filename = Column(String(1024))
stacktrace = Column(Text)
Add hostname to the "stacktrace" in ImportErrors
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
from builtins import str
from builtins import object, bytes
import copy
from collections import namedtuple
from datetime import datetime, timedelta
import dill
import functools
import getpass
import imp
import importlib
import itertools
import inspect
import zipfile
import jinja2
import json
import logging
import os
import pickle
import re
import signal
import socket
import sys
import textwrap
import traceback
import warnings
import hashlib
from urllib.parse import urlparse
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float)
from sqlalchemy import func, or_, and_
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.orm import reconstructor, relationship, synonym
from croniter import croniter
import six
from airflow import settings, utils
from airflow.executors import GetDefaultExecutor, LocalExecutor
from airflow import configuration
from airflow.exceptions import AirflowException, AirflowSkipException, AirflowTaskTimeout
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
from airflow.utils.helpers import (
as_tuple, is_container, is_in, validate_key, pprinttable)
from airflow.utils.logging import LoggingMixin
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.trigger_rule import TriggerRule
Base = declarative_base()
ID_LEN = 250
XCOM_RETURN_KEY = 'return_value'
Stats = settings.Stats
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
"""
from cryptography.fernet import Fernet
return Fernet(configuration.get('core', 'FERNET_KEY').encode('utf-8'))
if 'mysql' in settings.SQL_ALCHEMY_CONN:
LongText = LONGTEXT
else:
LongText = Text
# used by DAG context_managers
_CONTEXT_MANAGER_DAG = None
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = datetime.now()
class DagBag(BaseDagBag, LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: unicode
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
:param sync_to_db: whether to sync the properties of the DAGs to
the metadata DB while finding them, typically should be done
by the scheduler job only
:type sync_to_db: bool
"""
def __init__(
self,
dag_folder=None,
executor=None,
include_examples=configuration.getboolean('core', 'LOAD_EXAMPLES')):
# do not use default arg in signature, to fix import cycle on plugin load
if executor is None:
executor = GetDefaultExecutor()
dag_folder = dag_folder or settings.DAGS_FOLDER
self.logger.info("Filling up the DagBag from {}".format(dag_folder))
self.dag_folder = dag_folder
self.dags = {}
# the file's last modified timestamp when we last read it
self.file_last_changed = {}
self.executor = executor
self.import_errors = {}
if include_examples:
example_dag_folder = os.path.join(
os.path.dirname(__file__),
'example_dags')
self.collect_dags(example_dag_folder)
self.collect_dags(dag_folder)
def size(self):
"""
:return: the amount of dags contained in this dagbag
"""
return len(self.dags)
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [dag.dag_id for dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
found_dags = []
# todo: raise exception?
if not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
logging.exception(e)
return found_dags
mods = []
if not zipfile.is_zipfile(filepath):
if safe_mode and os.path.isfile(filepath):
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
self.logger.debug("Importing {}".format(filepath))
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(configuration.getint('core', "DAGBAG_IMPORT_TIMEOUT")):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
import socket
host = socket.gethostname()
self.logger.exception("Failed to import: " + filepath)
self.import_errors[filepath] = "{}: {}".format(host, str(e))
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.logger.warning("Found __init__.{0} at root of {1}".
format(ext, filepath))
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.logger.debug("Reading {} from {}".
format(mod.filename, filepath))
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
return found_dags
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.logger.exception("Failed to import: " + filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
found_dags.append(dag)
found_dags += dag.subdags
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@provide_session
def kill_zombies(self, session=None):
"""
Fails tasks that haven't had a heartbeat in too long
"""
from airflow.jobs import LocalTaskJob as LJ
self.logger.info("Finding 'running' jobs without a recent heartbeat")
TI = TaskInstance
secs = (
configuration.getint('scheduler', 'scheduler_zombie_task_threshold'))
limit_dttm = datetime.now() - timedelta(seconds=secs)
self.logger.info(
"Failing jobs without heartbeat after {}".format(limit_dttm))
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
))
.all()
)
for ti in tis:
if ti and ti.dag_id in self.dags:
dag = self.dags[ti.dag_id]
if ti.task_id in dag.task_ids:
task = dag.get_task(ti.task_id)
ti.task = task
ti.handle_failure("{} killed as zombie".format(ti))
self.logger.info(
'Marked zombie job {} as failed'.format(ti))
Stats.incr('zombies_killed')
session.commit()
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
"""
self.dags[dag.dag_id] = dag
dag.resolve_template_files()
dag.last_loaded = datetime.now()
for task in dag.tasks:
settings.policy(task)
for subdag in dag.subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.logger.debug('Loaded DAG {dag}'.format(**locals()))
def collect_dags(
self,
dag_folder=None,
only_if_updated=True):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a .airflowignore file is found while processing,
the directory, it will behaves much like a .gitignore does,
ignoring files that match any of the regex patterns specified
in the file.
"""
start_dttm = datetime.now()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
if os.path.isfile(dag_folder):
self.process_file(dag_folder, only_if_updated=only_if_updated)
elif os.path.isdir(dag_folder):
patterns = []
for root, dirs, files in os.walk(dag_folder, followlinks=True):
ignore_file = [f for f in files if f == '.airflowignore']
if ignore_file:
f = open(os.path.join(root, ignore_file[0]), 'r')
patterns += [p for p in f.read().split('\n') if p]
f.close()
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(filepath):
continue
if not any(
[re.findall(p, filepath) for p in patterns]):
ts = datetime.now()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated)
td = datetime.now() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
logging.warning(e)
Stats.gauge(
'collect_dags', (datetime.now() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.dag_num for o in stats]),
table=pprinttable(stats),
)
def deactivate_inactive_dags(self):
active_dag_ids = [dag.dag_id for dag in list(self.dags.values())]
session = settings.Session()
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
session.close()
def paused_dags(self):
session = settings.Session()
dag_ids = [dp.dag_id for dp in session.query(DagModel).filter(
DagModel.is_paused.__eq__(True))]
session.commit()
session.close()
return dag_ids
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
superuser = False
def __repr__(self):
return self.username
def get_id(self):
return str(self.id)
def is_superuser(self):
return self.superuser
class Connection(Base):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA',),
('redis', 'Redis',),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks',),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
temp_uri = urlparse(uri)
hostname = temp_uri.hostname or ''
if '%2f' in hostname:
hostname = hostname.replace('%2f', '/').replace('%2F', '/')
conn_type = temp_uri.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
self.conn_type = conn_type
self.host = hostname
self.schema = temp_uri.path[1:]
self.login = temp_uri.username
self.password = temp_uri.password
self.port = temp_uri.port
def get_password(self):
if self._password and self.is_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
try:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except NameError:
self._password = value
self.is_encrypted = False
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
try:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = True
except NameError:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def get_hook(self):
try:
if self.conn_type == 'mysql':
from airflow.hooks.mysql_hook import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.hooks.postgres_hook import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.hooks.hive_hooks import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.hooks.presto_hook import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.hooks.hive_hooks import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.hooks.sqlite_hook import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.hooks.jdbc_hook import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.hooks.mssql_hook import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.hooks.oracle_hook import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.contrib.hooks.vertica_hook import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.contrib.hooks.cloudant_hook import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.contrib.hooks.jira_hook import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.contrib.hooks.redis_hook import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.contrib.hooks.wasb_hook import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
except:
pass
def __repr__(self):
return self.conn_id
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
logging.exception(e)
logging.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
class DagPickle(Base):
"""
Dags can originate from different places (user repos, master repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(DateTime, default=func.now())
pickle_hash = Column(Text)
__tablename__ = "dag_pickle"
def __init__(self, dag):
self.dag_id = dag.dag_id
if hasattr(dag, 'template_env'):
dag.template_env = None
self.pickle_hash = hash(dag)
self.pickle = dag
class TaskInstance(Base):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
start_date = Column(DateTime)
end_date = Column(DateTime)
duration = Column(Float)
state = Column(String(20))
try_number = Column(Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(50))
queue = Column(String(50))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(DateTime)
pid = Column(Integer)
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_state', state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
)
def __init__(self, task, execution_date, state=None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.task = task
self.queue = task.queue
self.pool = task.pool
self.priority_weight = task.priority_weight_total
self.try_number = 0
self.max_tries = self.task.retries
self.unixname = getpass.getuser()
self.run_as_user = task.run_as_user
if state:
self.state = state
self.hostname = ''
self.init_on_load()
@reconstructor
def init_on_load(self):
""" Initialize the attributes that aren't stored in the DB. """
self.test_mode = False # can be changed when calling 'run'
def command(
self,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
return " ".join(self.command_as_list(
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path))
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag = self.task.dag
should_pass_filepath = not pickle_id and dag
if should_pass_filepath and dag.full_filepath != dag.filepath:
path = "DAGS_FOLDER/{}".format(dag.filepath)
elif should_pass_filepath and dag.full_filepath:
path = dag.full_filepath
else:
path = None
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
self.execution_date,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path)
@staticmethod
def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:return: shell command that can be used to run the task instance
"""
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A "]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
return (
"{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
@property
def log_url(self):
iso = self.execution_date.isoformat()
BASE_URL = configuration.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/log"
"?dag_id={self.dag_id}"
"&task_id={self.task_id}"
"&execution_date={iso}"
).format(**locals())
@property
def mark_success_url(self):
iso = self.execution_date.isoformat()
BASE_URL = configuration.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/action"
"?action=success"
"&task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
@provide_session
def current_state(self, session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state
@provide_session
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
logging.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session=None, lock_for_update=False):
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
self.try_number = ti.try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
else:
self.state = None
@provide_session
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
@property
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date
def set_state(self, state, session):
self.state = state
self.start_date = datetime.now()
self.end_date = datetime.now()
session.merge(self)
session.commit()
@property
def is_premature(self):
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == State.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
dr = self.get_dagrun(session=session)
# LEGACY: most likely running from unit tests
if not dr:
# Means that this TI is NOT being run from a DR, but from a catchup
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task,
execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
@provide_session
def are_dependencies_met(
self,
dep_context=None,
session=None,
verbose=False):
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:type dep_context: DepContext
:param session: database session
:type session: Session
:param verbose: whether or not to print details on failed dependencies
:type verbose: boolean
"""
dep_context = dep_context or DepContext()
failed = False
for dep_status in self.get_failed_dep_statuses(
dep_context=dep_context,
session=session):
failed = True
if verbose:
logging.info("Dependencies not met for {}, dependency '{}' FAILED: {}"
.format(self, dep_status.dep_name, dep_status.reason))
if failed:
return False
if verbose:
logging.info("Dependencies all met for {}".format(self))
return True
@provide_session
def get_failed_dep_statuses(
self,
dep_context=None,
session=None):
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(
self,
session,
dep_context):
logging.debug("{} dependency '{}' PASSED: {}, {}"
.format(self,
dep_status.dep_name,
dep_status.passed,
dep_status.reason))
if not dep_status.passed:
yield dep_status
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id,
self.execution_date, self.try_number).encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < datetime.now())
@provide_session
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
@provide_session
def get_dagrun(self, session):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr
@provide_session
def run(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Runs the task instance.
:param verbose: whether to turn on more verbose logging
:type verbose: boolean
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Don't check the dependencies of this TI's task
:type ignore_task_deps: boolean
:param ignore_ti_state: Disregards previous task instance state
:type ignore_ti_state: boolean
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = socket.getfqdn()
self.operator = task.__class__.__name__
if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS:
Stats.incr('previously_succeeded', 1, 1)
queue_dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps)
if not self.are_dependencies_met(
dep_context=queue_dep_context,
session=session,
verbose=True):
session.commit()
return
hr = "\n" + ("-" * 80) + "\n" # Line break
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
msg = "Starting attempt {attempt} of {total}".format(
attempt=self.try_number + 1,
total=self.max_tries + 1)
self.start_date = datetime.now()
dep_context = DepContext(
deps=RUN_DEPS - QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
runnable = self.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True)
if not runnable and not mark_success:
# FIXME: we might have hit concurrency limits, which means we probably
# have been running prematurely. This should be handled in the
# scheduling mechanism.
self.state = State.NONE
msg = ("FIXME: Rescheduling due to concurrency limits reached at task "
"runtime. Attempt {attempt} of {total}. State set to NONE.").format(
attempt=self.try_number + 1,
total=self.max_tries + 1)
logging.warning(hr + msg + hr)
self.queued_dttm = datetime.now()
msg = "Queuing into pool {}".format(self.pool)
logging.info(msg)
session.merge(self)
session.commit()
return
# Another worker might have started running this task instance while
# the current worker process was blocked on refresh_from_db
if self.state == State.RUNNING:
msg = "Task Instance already running {}".format(self)
logging.warning(msg)
session.commit()
return
# print status message
logging.info(hr + msg + hr)
self.try_number += 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.pid = os.getpid()
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose()
if verbose:
if mark_success:
msg = "Marking success for "
else:
msg = "Executing "
msg += "{self.task} on {self.execution_date}"
context = {}
try:
logging.info(msg.format(self=self))
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
'''Setting kill signal handler'''
logging.error("Killing subprocess")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
self.render_templates()
task_copy.pre_execute(context=context)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
# TODO remove deprecated behavior in Airflow 2.0
try:
task_copy.post_execute(context=context, result=result)
except TypeError as e:
if 'unexpected keyword argument' in str(e):
warnings.warn(
'BaseOperator.post_execute() now takes two '
'arguments, `context` and `result`, but "{}" only '
'expected one. This behavior is deprecated and '
'will be removed in a future version of '
'Airflow.'.format(self.task_id),
category=DeprecationWarning)
task_copy.post_execute(context=context)
else:
raise
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
self.state = State.SUCCESS
except AirflowSkipException:
self.state = State.SKIPPED
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Recording SUCCESS
self.end_date = datetime.now()
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
logging.error("Failed when executing success callback")
logging.exception(e3)
session.commit()
def dry_run(self):
task = self.task
task_copy = copy.copy(task)
self.task = task_copy
self.render_templates()
task_copy.dry_run()
def handle_failure(self, error, test_mode=False, context=None):
logging.exception(error)
task = self.task
session = settings.Session()
self.end_date = datetime.now()
self.set_duration()
Stats.incr('operator_failures_{}'.format(task.__class__.__name__), 1, 1)
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
session.add(TaskFail(task, self.execution_date, self.start_date, self.end_date))
# Let's go deeper
try:
# try_number is incremented by 1 during task instance run. So the
# current task instance try_number is the try_number for the next
# task instance run. We only mark task instance as FAILED if the
# next task instance try_number exceeds the max_tries.
if task.retries and self.try_number <= self.max_tries:
self.state = State.UP_FOR_RETRY
logging.info('Marking task as UP_FOR_RETRY')
if task.email_on_retry and task.email:
self.email_alert(error, is_retry=True)
else:
self.state = State.FAILED
if task.retries:
logging.info('All retries failed; marking task as FAILED')
else:
logging.info('Marking task as FAILED.')
if task.email_on_failure and task.email:
self.email_alert(error, is_retry=False)
except Exception as e2:
logging.error(
'Failed to send email to: ' + str(task.email))
logging.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
logging.error("Failed at executing callback")
logging.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
logging.error(str(error))
@provide_session
def get_template_context(self, session=None):
task = self.task
from airflow import macros
tables = None
if 'tables' in task.params:
tables = task.params['tables']
ds = self.execution_date.isoformat()[:10]
ts = self.execution_date.isoformat()
yesterday_ds = (self.execution_date - timedelta(1)).isoformat()[:10]
tomorrow_ds = (self.execution_date + timedelta(1)).isoformat()[:10]
prev_execution_date = task.dag.previous_schedule(self.execution_date)
next_execution_date = task.dag.following_schedule(self.execution_date)
ds_nodash = ds.replace('-', '')
ts_nodash = ts.replace('-', '').replace(':', '')
yesterday_ds_nodash = yesterday_ds.replace('-', '')
tomorrow_ds_nodash = tomorrow_ds.replace('-', '')
ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}"
ti_key_str = ti_key_str.format(**locals())
params = {}
run_id = ''
dag_run = None
if hasattr(task, 'dag'):
if task.dag.params:
params.update(task.dag.params)
dag_run = (
session.query(DagRun)
.filter_by(
dag_id=task.dag.dag_id,
execution_date=self.execution_date)
.first()
)
run_id = dag_run.run_id if dag_run else None
session.expunge_all()
session.commit()
if task.params:
params.update(task.params)
class VariableAccessor:
"""
Wrapper around Variable. This way you can get variables in templates by using
{var.variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item)
return self.var
def __repr__(self):
return str(self.var)
class VariableJsonAccessor:
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item, deserialize_json=True)
return self.var
def __repr__(self):
return str(self.var)
return {
'dag': task.dag,
'ds': ds,
'ds_nodash': ds_nodash,
'ts': ts,
'ts_nodash': ts_nodash,
'yesterday_ds': yesterday_ds,
'yesterday_ds_nodash': yesterday_ds_nodash,
'tomorrow_ds': tomorrow_ds,
'tomorrow_ds_nodash': tomorrow_ds_nodash,
'END_DATE': ds,
'end_date': ds,
'dag_run': dag_run,
'run_id': run_id,
'execution_date': self.execution_date,
'prev_execution_date': prev_execution_date,
'next_execution_date': next_execution_date,
'latest_date': ds,
'macros': macros,
'params': params,
'tables': tables,
'task': task,
'task_instance': self,
'ti': self,
'task_instance_key_str': ti_key_str,
'conf': configuration,
'test_mode': self.test_mode,
'var': {
'value': VariableAccessor(),
'json': VariableJsonAccessor()
}
}
def render_templates(self):
task = self.task
jinja_context = self.get_template_context()
if hasattr(self, 'task') and hasattr(self.task, 'dag'):
if self.task.dag.user_defined_macros:
jinja_context.update(
self.task.dag.user_defined_macros)
rt = self.task.render_template # shortcut to method
for attr in task.__class__.template_fields:
content = getattr(task, attr)
if content:
rendered_content = rt(attr, content, jinja_context)
setattr(task, attr, rendered_content)
def email_alert(self, exception, is_retry=False):
task = self.task
title = "Airflow alert: {self}".format(**locals())
exception = str(exception).replace('\n', '<br>')
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
body = (
"Try {try_number} out of {max_tries}<br>"
"Exception:<br>{exception}<br>"
"Log: <a href='{self.log_url}'>Link</a><br>"
"Host: {self.hostname}<br>"
"Log file: {self.log_filepath}<br>"
"Mark success: <a href='{self.mark_success_url}'>Link</a><br>"
).format(try_number=self.try_number + 1, max_tries=self.max_tries + 1, **locals())
send_email(task.email, title, body)
def set_duration(self):
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
def xcom_push(
self,
key,
value,
execution_date=None):
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: string
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date)
def xcom_pull(
self,
task_ids,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: string
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: string or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: string
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids)
class TaskFail(Base):
"""
TaskFail tracks the failed run durations of each task instance.
"""
__tablename__ = "task_fail"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
start_date = Column(DateTime)
end_date = Column(DateTime)
duration = Column(Float)
def __init__(self, task, execution_date, start_date, end_date):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.start_date = start_date
self.end_date = end_date
self.duration = (self.end_date - self.start_date).total_seconds()
class Log(Base):
"""
Used to actively log events to the database
"""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(DateTime)
dag_id = Column(String(ID_LEN))
task_id = Column(String(ID_LEN))
event = Column(String(30))
execution_date = Column(DateTime)
owner = Column(String(500))
extra = Column(Text)
def __init__(self, event, task_instance, owner=None, extra=None, **kwargs):
self.dttm = datetime.now()
self.event = event
self.extra = extra
task_owner = None
if task_instance:
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
task_owner = task_instance.task.owner
if 'task_id' in kwargs:
self.task_id = kwargs['task_id']
if 'dag_id' in kwargs:
self.dag_id = kwargs['dag_id']
if 'execution_date' in kwargs:
if kwargs['execution_date']:
self.execution_date = kwargs['execution_date']
self.owner = owner or task_owner
class SkipMixin(object):
def skip(self, dag_run, execution_date, tasks):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = datetime.now()
session = settings.Session()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state : State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
logging.warning("No DAG RUN present this should not happen")
# this is defensive against dag runs that are not complete
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit()
session.close()
@functools.total_ordering
class BaseOperator(object):
"""
Abstract base class for all operators. Since operators create objects that
become node in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator the runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: string
:param owner: the owner of the task, using the unix username is recommended
:type owner: string
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: timedelta
:param retry_exponential_backoff: allow progressive longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries
:type max_retry_delay: timedelta
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up.
:type priority_weight: int
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send dan email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: callable
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | dummy}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
"""
# For derived classes to define which fields will get jinjaified
template_fields = []
# Defines which files extensions to look for in the templated fields
template_ext = []
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
@apply_defaults
def __init__(
self,
task_id,
owner=configuration.get('operators', 'DEFAULT_OWNER'),
email=None,
email_on_retry=True,
email_on_failure=True,
retries=0,
retry_delay=timedelta(seconds=300),
retry_exponential_backoff=False,
max_retry_delay=None,
start_date=None,
end_date=None,
schedule_interval=None, # not hooked as of now
depends_on_past=False,
wait_for_downstream=False,
dag=None,
params=None,
default_args=None,
adhoc=False,
priority_weight=1,
queue=configuration.get('celery', 'default_queue'),
pool=None,
sla=None,
execution_timeout=None,
on_failure_callback=None,
on_success_callback=None,
on_retry_callback=None,
trigger_rule=TriggerRule.ALL_SUCCESS,
resources=None,
run_as_user=None,
*args,
**kwargs):
if args or kwargs:
# TODO remove *args and **kwargs in Airflow 2.0
warnings.warn(
'Invalid arguments were passed to {c}. Support for '
'passing such arguments will be dropped in Airflow 2.0. '
'Invalid arguments were:'
'\n*args: {a}\n**kwargs: {k}'.format(
c=self.__class__.__name__, a=args, k=kwargs),
category=PendingDeprecationWarning
)
validate_key(task_id)
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
logging.warning(
"start_date for {} isn't datetime.datetime".format(self))
self.end_date = end_date
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
"The trigger_rule must be one of {all_triggers},"
"'{d}.{t}'; received '{tr}'."
.format(all_triggers=TriggerRule.all_triggers,
d=dag.dag_id, t=task_id, tr=trigger_rule))
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if schedule_interval:
logging.warning(
"schedule_interval is used for {}, though it has "
"been deprecated as a task parameter, you need to "
"specify it as a DAG parameter instead".format(self))
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
logging.debug("retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
self.params = params or {} # Available in templates!
self.adhoc = adhoc
self.priority_weight = priority_weight
self.resources = Resources(**(resources or {}))
self.run_as_user = run_as_user
# Private attributes
self._upstream_task_ids = []
self._downstream_task_ids = []
if not dag and _CONTEXT_MANAGER_DAG:
dag = _CONTEXT_MANAGER_DAG
if dag:
self.dag = dag
self._comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'schedule_interval',
'depends_on_past',
'wait_for_downstream',
'adhoc',
'priority_weight',
'sla',
'execution_timeout',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
}
def __eq__(self, other):
return (
type(self) == type(other) and
all(self.__dict__.get(c, None) == other.__dict__.get(c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.task_id < other.task_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Composing Operators -----------------------------------------------
def __rshift__(self, other):
"""
Implements Self >> Other == self.set_downstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_downstream(other)
return other
def __lshift__(self, other):
"""
Implements Self << Other == self.set_upstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_upstream(other)
return other
def __rrshift__(self, other):
"""
Called for [DAG] >> [Operator] because DAGs don't have
__rshift__ operators.
"""
self.__lshift__(other)
return self
def __rlshift__(self, other):
"""
Called for [DAG] << [Operator] because DAGs don't have
__lshift__ operators.
"""
self.__rshift__(other)
return self
# /Composing Operators ---------------------------------------------
@property
def dag(self):
"""
Returns the Operator's DAG if set, otherwise raises an error
"""
if self.has_dag():
return self._dag
else:
raise AirflowException(
'Operator {} has not been assigned to a DAG yet'.format(self))
@dag.setter
def dag(self, dag):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
if not isinstance(dag, DAG):
raise TypeError(
'Expected DAG; received {}'.format(dag.__class__.__name__))
elif self.has_dag() and self.dag is not dag:
raise AirflowException(
"The DAG assigned to {} can not be changed.".format(self))
elif self.task_id not in dag.task_dict:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""
Returns True if the Operator has been assigned to a DAG.
"""
return getattr(self, '_dag', None) is not None
@property
def dag_id(self):
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
@property
def deps(self):
"""
Returns the list of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
return {
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
}
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if self.has_dag():
return self.dag._schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
return sum([
t.priority_weight
for t in self.get_flat_relatives(upstream=False)
]) + self.priority_weight
def pre_execute(self, context):
"""
This hook is triggered right before self.execute() is called.
"""
pass
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
pass
def on_kill(self):
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
pass
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.params = self.params
if hasattr(self, 'user_defined_macros'):
result.user_defined_macros = self.user_defined_macros
if hasattr(self, 'user_defined_filters'):
result.user_defined_filters = self.user_defined_filters
return result
def render_template_from_field(self, attr, content, context, jinja_env):
"""
Renders a template from a field. If the field is a string, it will
simply render the string and return the result. If it is a collection or
nested set of collections, it will traverse the structure and render
all strings in it.
"""
rt = self.render_template
if isinstance(content, six.string_types):
result = jinja_env.from_string(content).render(**context)
elif isinstance(content, (list, tuple)):
result = [rt(attr, e, context) for e in content]
elif isinstance(content, dict):
result = {
k: rt("{}[{}]".format(attr, k), v, context)
for k, v in list(content.items())}
else:
param_type = type(content)
msg = (
"Type '{param_type}' used for parameter '{attr}' is "
"not supported for templating").format(**locals())
raise AirflowException(msg)
return result
def render_template(self, attr, content, context):
"""
Renders a template either from a file or directly in a field, and returns
the rendered result.
"""
jinja_env = self.dag.get_template_env() \
if hasattr(self, 'dag') \
else jinja2.Environment(cache_size=0)
exts = self.__class__.template_ext
if (
isinstance(content, six.string_types) and
any([content.endswith(ext) for ext in exts])):
return jinja_env.get_template(content).render(**context)
else:
return self.render_template_from_field(attr, content, context, jinja_env)
def prepare_template(self):
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
pass
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if content is not None and \
isinstance(content, six.string_types) and \
any([content.endswith(ext) for ext in self.template_ext]):
env = self.dag.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
logging.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self):
return self._upstream_task_ids
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self):
return self._downstream_task_ids
def clear(self, start_date=None, end_date=None, upstream=False, downstream=False):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
session = settings.Session()
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry.all(), session, dag=self.dag)
session.commit()
session.close()
return count
def get_task_instances(self, session, start_date=None, end_date=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
TI = TaskInstance
end_date = end_date or datetime.now()
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
).order_by(TI.execution_date).all()
def get_flat_relatives(self, upstream=False, l=None):
"""
Get a flat list of relatives, either upstream or downstream.
"""
if not l:
l = []
for t in self.get_direct_relatives(upstream):
if not is_in(t, l):
l.append(t)
t.get_flat_relatives(upstream, l)
return l
def detect_downstream_cycle(self, task=None):
"""
When invoked, this routine will raise an exception if a cycle is
detected downstream from self. It is invoked when tasks are added to
the DAG to detect cycles.
"""
if not task:
task = self
for t in self.get_direct_relatives():
if task is t:
msg = "Cycle detected in DAG. Faulty task: {0}".format(task)
raise AirflowException(msg)
else:
t.detect_downstream_cycle(task=task)
return False
def run(
self,
start_date=None,
end_date=None,
ignore_first_depends_on_past=False,
ignore_ti_state=False,
mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or datetime.now()
for dt in self.dag.date_range(start_date, end_date=end_date):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_depends_on_past=(
dt == start_date and ignore_first_depends_on_past),
ignore_ti_state=ignore_ti_state)
def dry_run(self):
logging.info('Dry run')
for attr in self.template_fields:
content = getattr(self, attr)
if content and isinstance(content, six.string_types):
logging.info('Rendering template for {0}'.format(attr))
logging.info(content)
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(
self=self)
@property
def task_type(self):
return self.__class__.__name__
def append_only_new(self, l, item):
if any([item is t for t in l]):
raise AirflowException(
'Dependency {self}, {item} already registered'
''.format(**locals()))
else:
l.append(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for t in task_list:
if not isinstance(t, BaseOperator):
raise AirflowException(
"Relationships can only be set between "
"Operators; received {}".format(t.__class__.__name__))
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = set(t.dag for t in [self] + task_list if t.has_dag())
if len(dags) > 1:
raise AirflowException(
'Tried to set relationships between tasks in '
'more than one DAG: {}'.format(dags))
elif len(dags) == 1:
dag = list(dags)[0]
else:
raise AirflowException(
"Tried to create relationships between tasks that don't have "
"DAGs yet. Set the DAG for at least one "
"task and try again: {}".format([self] + task_list))
if dag and not self.has_dag():
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
task.dag = dag
if upstream:
task.append_only_new(task._downstream_task_ids, self.task_id)
self.append_only_new(self._upstream_task_ids, task.task_id)
else:
self.append_only_new(self._downstream_task_ids, task.task_id)
task.append_only_new(task._upstream_task_ids, self.task_id)
self.detect_downstream_cycle()
def set_downstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
def xcom_push(
self,
context,
key,
value,
execution_date=None):
"""
See TaskInstance.xcom_push()
"""
context['ti'].xcom_push(
key=key,
value=value,
execution_date=execution_date)
def xcom_pull(
self,
context,
task_ids,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=None):
"""
See TaskInstance.xcom_pull()
"""
return context['ti'].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = configuration.getboolean('core',
'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(DateTime)
# Last time this DAG was pickled
last_pickled = Column(DateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(DateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@classmethod
def get_current(cls, dag_id):
session = settings.Session()
obj = session.query(cls).filter(cls.dag_id == dag_id).first()
session.expunge_all()
session.commit()
session.close()
return obj
@functools.total_ordering
class DAG(BaseDag, LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start end an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG
:type dag_id: string
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: string
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: string or list of stings
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration, gantt, landing_times)
:type default_view: string
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
:type orientation: string
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
"""
def __init__(
self, dag_id,
description='',
schedule_interval=timedelta(days=1),
start_date=None, end_date=None,
full_filepath=None,
template_searchpath=None,
user_defined_macros=None,
user_defined_filters=None,
default_args=None,
concurrency=configuration.getint('core', 'dag_concurrency'),
max_active_runs=configuration.getint(
'core', 'max_active_runs_per_dag'),
dagrun_timeout=None,
sla_miss_callback=None,
default_view=configuration.get('webserver', 'dag_default_view').lower(),
orientation=configuration.get('webserver', 'dag_orientation'),
catchup=configuration.getboolean('scheduler', 'catchup_by_default'),
params=None):
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = default_args or {}
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
# Properties from BaseDag
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id = None
self._description = description
# set file location to caller source path
self.fileloc = inspect.getsourcefile(inspect.stack()[1][0])
self.task_dict = dict()
self.start_date = start_date
self.end_date = end_date
self.schedule_interval = schedule_interval
if schedule_interval in cron_presets:
self._schedule_interval = cron_presets.get(schedule_interval)
elif schedule_interval == '@once':
self._schedule_interval = None
else:
self._schedule_interval = schedule_interval
if isinstance(template_searchpath, six.string_types):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = datetime.now()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
self.default_view = default_view
self.orientation = orientation
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self._comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
def __eq__(self, other):
return (
type(self) == type(other) and
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
all(getattr(self, c, None) == getattr(other, c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
global _CONTEXT_MANAGER_DAG
self._old_context_manager_dag = _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self
return self
def __exit__(self, _type, _value, _tb):
global _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self._old_context_manager_dag
# /Context Manager ----------------------------------------------
def date_range(self, start_date, num=None, end_date=datetime.now()):
if num:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date,
num=num, delta=self._schedule_interval)
def following_schedule(self, dttm):
if isinstance(self._schedule_interval, six.string_types):
cron = croniter(self._schedule_interval, dttm)
return cron.get_next(datetime)
elif isinstance(self._schedule_interval, timedelta):
return dttm + self._schedule_interval
def previous_schedule(self, dttm):
if isinstance(self._schedule_interval, six.string_types):
cron = croniter(self._schedule_interval, dttm)
return cron.get_prev(datetime)
elif isinstance(self._schedule_interval, timedelta):
return dttm - self._schedule_interval
def normalize_schedule(self, dttm):
"""
Returns dttm + interval unless dttm is first interval then it returns dttm
"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
"""
Returns the last dag run for this dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored
"""
DR = DagRun
qry = session.query(DR).filter(
DR.dag_id == self.dag_id,
)
if not include_externally_triggered:
qry = qry.filter(DR.external_trigger.__eq__(False))
qry = qry.order_by(DR.execution_date.desc())
last = qry.first()
return last
@property
def dag_id(self):
return self._dag_id
@dag_id.setter
def dag_id(self, value):
self._dag_id = value
@property
def full_filepath(self):
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value):
self._full_filepath = value
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, value):
self._concurrency = value
@property
def description(self):
return self._description
@property
def pickle_id(self):
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value):
self._pickle_id = value
@property
def tasks(self):
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError(
'DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self):
return list(self.task_dict.keys())
@property
def active_task_ids(self):
return list(k for k, v in self.task_dict.items() if not v.adhoc)
@property
def active_tasks(self):
return [t for t in self.tasks if not t.adhoc]
@property
def filepath(self):
"""
File location of where the dag object is instantiated
"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self):
"""
Folder location of where the dag object is instantiated
"""
return os.path.dirname(self.full_filepath)
@property
def owner(self):
return ", ".join(list(set([t.owner for t in self.tasks])))
@property
@provide_session
def concurrency_reached(self, session=None):
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.task_id.in_(self.task_ids),
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
@provide_session
def is_paused(self, session=None):
"""
Returns a boolean indicating whether this DAG is paused
"""
qry = session.query(DagModel).filter(
DagModel.dag_id == self.dag_id)
return qry.value('is_paused')
@provide_session
def get_active_runs(self, session=None):
"""
Returns a list of "running" tasks
:param session:
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == execution_date)
.first())
return dagrun
@property
def latest_execution_date(self):
"""
Returns the latest date for which at least one dag run exists
"""
session = settings.Session()
execution_date = session.query(func.max(DagRun.execution_date)).filter(
DagRun.dag_id == self.dag_id
).scalar()
session.commit()
session.close()
return execution_date
def descendants(self, dagbag, task_ids=None, include_downstream=False,
include_upstream=False, recursive=False):
from airflow.operators.sensors import ExternalTaskSensor
if not task_ids:
task_ids = self.task_ids
descendants = []
for _, dag in dagbag.dags.items():
tasks = [task for task in dag.tasks if
isinstance(task, ExternalTaskSensor) and
task.external_dag_id == self.dag_id and
task.external_task_id in task_ids]
if not tasks:
continue
task_regex = "|".join(map(
lambda x: "^{0}$".format(x.task_id), tasks))
dependent_dag = dag.sub_dag(
task_regex=r"{0}".format(task_regex),
include_downstream=include_downstream,
include_upstream=include_upstream)
descendants.append(dependent_dag)
if recursive:
descendants.extend(dependent_dag.descendants(
dagbag,
include_downstream=include_downstream,
include_upstream=include_upstream,
recursive=recursive))
return descendants
@property
def subdags(self):
"""
Returns a list of the subdag objects associated to this DAG
"""
# Check SubDag for class but don't check class directly, see
# https://github.com/airbnb/airflow/issues/1168
from airflow.operators.subdag_operator import SubDagOperator
l = []
for task in self.tasks:
if (isinstance(task, SubDagOperator) or
#TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator'):
l.append(task.subdag)
l += task.subdag.subdags
return l
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self):
"""
Returns a jinja2 Environment while taking into account the DAGs
template_searchpath, user_defined_macros and user_defined_filters
"""
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath),
extensions=["jinja2.ext.do"],
cache_size=0)
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
def get_task_instances(
self, session, start_date=None, end_date=None, state=None):
TI = TaskInstance
if not start_date:
start_date = (datetime.today() - timedelta(30)).date()
start_date = datetime.combine(start_date, datetime.min.time())
end_date = end_date or datetime.now()
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
TI.task_id.in_([t.task_id for t in self.tasks]),
)
if state:
tis = tis.filter(TI.state == state)
tis = tis.order_by(TI.execution_date).all()
return tis
@property
def roots(self):
return [t for t in self.tasks if not t.downstream_list]
def topological_sort(self):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:return: list of tasks in topological order
"""
# copy the the tasks so we leave it unmodified
graph_unsorted = self.tasks[:]
graph_sorted = []
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted):
for edge in node.upstream_list:
if edge in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
graph_unsorted.remove(node)
graph_sorted.append(node)
if not acyclic:
raise AirflowException("A cyclic dependency occurred in dag: {}"
.format(self.dag_id))
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self, state=State.RUNNING, session=None):
drs = session.query(DagModel).filter_by(dag_id=self.dag_id).all()
dirty_ids = []
for dr in drs:
dr.state = state
dirty_ids.append(dr.dag_id)
DagStat.update(dirty_ids, session=session)
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
if dag.task_ids:
conditions.append(
TI.dag_id.like(dag.dag_id) &
TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(TI.state == State.FAILED)
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(**locals())
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(tis.all(), session, dag=self)
if reset_dag_runs:
self.set_dag_runs_state(session=session)
else:
count = 0
print("Bail. Nothing was cleared.")
session.commit()
session.close()
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
return result
def sub_dag(self, task_regex, include_downstream=False,
include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
dag = copy.deepcopy(self)
regex_match = [
t for t in dag.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
dag.task_dict = {t.task_id: t for t in regex_match + also_include}
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_task_ids = [
tid for tid in t._upstream_task_ids if tid in dag.task_ids]
t._downstream_task_ids = [
tid for tid in t._downstream_task_ids if tid in dag.task_ids]
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id):
if task_id in self.task_dict:
return self.task_dict[task_id]
raise AirflowException("Task {task_id} not found".format(**locals()))
@provide_session
def pickle_info(self, session=None):
d = {}
d['is_picklable'] = True
try:
dttm = datetime.now()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = "{}".format(datetime.now() - dttm)
except Exception as e:
logging.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None):
dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = datetime.now()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self):
"""
Shows an ascii tree representation of the DAG
"""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.upstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if task.task_id in self.task_dict:
# TODO: raise an error in Airflow 2.0
warnings.warn(
'The requested task could not be added to the DAG because a '
'task with task_id {} is already in the DAG. Starting in '
'Airflow 2.0, trying to overwrite a task will raise an '
'exception.'.format(task.task_id),
category=PendingDeprecationWarning)
else:
self.tasks.append(task)
self.task_dict[task.task_id] = task
task.dag = self
self.task_count = len(self.tasks)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
def db_merge(self):
BO = BaseOperator
session = settings.Session()
tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all()
for t in tasks:
session.delete(t)
session.commit()
session.merge(self)
session.commit()
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
include_adhoc=False,
local=False,
executor=None,
donot_pickle=configuration.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=False,
pool=None):
"""
Runs the DAG.
"""
from airflow.jobs import BackfillJob
if not executor and local:
executor = LocalExecutor()
elif not executor:
executor = GetDefaultExecutor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
include_adhoc=include_adhoc,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool)
job.run()
def cli(self):
"""
Exposes a CLI specific to this DAG
"""
from airflow.bin import cli
parser = cli.CLIFactory.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date=None,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date of this dag run
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: Session
"""
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state
)
session.add(run)
DagStat.set_dirty(dag_id=self.dag_id, session=session)
session.commit()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
run.refresh_from_db()
return run
@staticmethod
@provide_session
def sync_to_db(dag, owner, sync_time, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param dag: the DAG object to save to the DB
:type dag: DAG
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
if not orm_dag:
orm_dag = DagModel(dag_id=dag.dag_id)
logging.info("Creating ORM DAG for %s",
dag.dag_id)
orm_dag.fileloc = dag.fileloc
orm_dag.is_subdag = dag.is_subdag
orm_dag.owners = owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = sync_time
session.merge(orm_dag)
session.commit()
for subdag in dag.subdags:
DAG.sync_to_db(subdag, owner, sync_time, session=session)
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
for dag in session.query(
DagModel).filter(DagModel.last_scheduler_run < expiration_date,
DagModel.is_active).all():
logging.info("Deactivating DAG ID %s since it was last touched "
"by the scheduler at %s",
dag.dag_id,
dag.last_scheduler_run.isoformat())
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id.in_(task_ids))
if states is not None:
if None in states:
qry = qry.filter(or_(
TaskInstance.state.in_(states),
TaskInstance.state.is_(None)))
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
class Chart(Base):
__tablename__ = "chart"
id = Column(Integer, primary_key=True)
label = Column(String(200))
conn_id = Column(String(ID_LEN), nullable=False)
user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
chart_type = Column(String(100), default="line")
sql_layout = Column(String(50), default="series")
sql = Column(Text, default="SELECT series, x, y FROM table")
y_log_scale = Column(Boolean)
show_datatable = Column(Boolean)
show_sql = Column(Boolean, default=True)
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
"User", cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(DateTime, default=func.now())
def __repr__(self):
return self.label
class KnownEventType(Base):
__tablename__ = "known_event_type"
id = Column(Integer, primary_key=True)
know_event_type = Column(String(200))
def __repr__(self):
return self.know_event_type
class KnownEvent(Base):
__tablename__ = "known_event"
id = Column(Integer, primary_key=True)
label = Column(String(200))
start_date = Column(DateTime)
end_date = Column(DateTime)
user_id = Column(Integer(), ForeignKey('users.id'),)
known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),)
reported_by = relationship(
"User", cascade=False, cascade_backrefs=False, backref='known_events')
event_type = relationship(
"KnownEventType",
cascade=False,
cascade_backrefs=False, backref='known_events')
description = Column(Text)
def __repr__(self):
return self.label
class Variable(Base):
__tablename__ = "variable"
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __repr__(self):
# Hiding the value
return '{} : {}'.format(self.key, self._val)
def get_val(self):
if self._val and self.is_encrypted:
try:
fernet = get_fernet()
except:
raise AirflowException(
"Can't decrypt _val for key={}, FERNET_KEY configuration \
missing".format(self.key))
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
else:
return self._val
def set_val(self, value):
if value:
try:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except NameError:
self._val = value
self.is_encrypted = False
@declared_attr
def val(cls):
return synonym('_val',
descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: String
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
default_sentinel = object()
obj = Variable.get(key, default_var=default_sentinel, deserialize_json=False)
if obj is default_sentinel:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def get(cls, key, default_var=None, deserialize_json=False, session=None):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not None:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(cls, key, value, serialize_json=False, session=None):
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query(cls).filter(cls.key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
class XCom(Base):
"""
Base class for XCom objects.
"""
__tablename__ = "xcom"
id = Column(Integer, primary_key=True)
key = Column(String(512))
value = Column(PickleType(pickler=dill))
timestamp = Column(
DateTime, default=func.now(), nullable=False)
execution_date = Column(DateTime, nullable=False)
# source information
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
__table_args__ = (
Index('idx_xcom_dag_task_date', dag_id, task_id, execution_date, unique=False),
)
def __repr__(self):
return '<XCom "{key}" ({task_id} @ {execution_date})>'.format(
key=self.key,
task_id=self.task_id,
execution_date=self.execution_date)
@classmethod
@provide_session
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
session=None):
"""
Store an XCom value.
"""
session.expunge_all()
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
@classmethod
@provide_session
def get_one(
cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value)
.filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(1))
result = query.first()
if result:
return result.value
@classmethod
@provide_session
def get_many(
cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls)
.filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
return query.all()
@classmethod
@provide_session
def delete(cls, xcoms, session=None):
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(
'Expected XCom; received {}'.format(xcom.__class__.__name__)
)
session.delete(xcom)
session.commit()
class DagStat(Base):
__tablename__ = "dag_stats"
dag_id = Column(String(ID_LEN), primary_key=True)
state = Column(String(50), primary_key=True)
count = Column(Integer, default=0)
dirty = Column(Boolean, default=False)
def __init__(self, dag_id, state, count=0, dirty=False):
self.dag_id = dag_id
self.state = state
self.count = count
self.dirty = dirty
@staticmethod
@provide_session
def set_dirty(dag_id, session=None):
"""
:param dag_id: the dag_id to mark dirty
:param session: database session
:return:
"""
DagStat.create(dag_id=dag_id, session=session)
try:
stats = session.query(DagStat).filter(
DagStat.dag_id == dag_id
).with_for_update().all()
for stat in stats:
stat.dirty = True
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not update dag stats for {}".format(dag_id))
logging.exception(e)
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True)
qry = qry.with_for_update().all()
ids = set([dag_stat.dag_id for dag_stat in qry])
# avoid querying with an empty IN clause
if len(ids) == 0:
session.commit()
return
dagstat_states = set(itertools.product(ids, State.dag_states))
qry = (
session.query(DagRun.dag_id, DagRun.state, func.count('*'))
.filter(DagRun.dag_id.in_(ids))
.group_by(DagRun.dag_id, DagRun.state)
)
counts = {(dag_id, state): count for dag_id, state, count in qry}
for dag_id, state in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[(dag_id, state)]
session.merge(
DagStat(dag_id=dag_id, state=state, count=count, dirty=False)
)
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not update dag stat table")
logging.exception(e)
@staticmethod
@provide_session
def create(dag_id, session=None):
"""
Creates the missing states the stats table for the dag specified
:param dag_id: dag id of the dag to create stats for
:param session: database session
:return:
"""
# unfortunately sqlalchemy does not know upsert
qry = session.query(DagStat).filter(DagStat.dag_id == dag_id).all()
states = [dag_stat.state for dag_stat in qry]
for state in State.dag_states:
if state not in states:
try:
session.merge(DagStat(dag_id=dag_id, state=state))
session.commit()
except Exception as e:
session.rollback()
logging.warning("Could not create stat record")
logging.exception(e)
class DagRun(Base):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
ID_PREFIX = 'scheduled__'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
DEADLOCK_CHECK_DEP_CONTEXT = DepContext(ignore_in_retry_period=True)
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(DateTime, default=func.now())
start_date = Column(DateTime, default=func.now())
end_date = Column(DateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dr_run_id', dag_id, run_id, unique=True),
)
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
if self.dag_id is not None:
# something really weird goes on here: if you try to close the session
# dag runs will end up detached
session = settings.Session()
DagStat.set_dirty(self.dag_id, session=session)
@declared_attr
def state(self):
return synonym('_state',
descriptor=property(self.get_state, self.set_state))
@classmethod
def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX):
return prefix.format(date.isoformat()[:19])
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: integer, list
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
TI = TaskInstance
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, six.string_types):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
tis = tis.filter(
or_(TI.state.in_(state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
logging.info("Updating state for {} considering {} task(s)"
.format(self, len(tis)))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = datetime.now()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = all(
# Use a special dependency context that ignores task's up for retry
# dependency, since a task that is up for retry is not necessarily
# deadlocked.
not t.are_dependencies_met(dep_context=self.DEADLOCK_CHECK_DEP_CONTEXT,
session=session)
for t in unfinished_tasks)
duration = (datetime.now() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}.{}".
format(self.dag_id, self.execution_date), duration)
# future: remove the check on adhoc tasks (=active_tasks)
if len(tis) == len(dag.active_tasks):
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least on failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
logging.info('Marking run {} failed'.format(self))
self.state = State.FAILED
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
logging.info('Marking run {} successful'.format(self))
self.state = State.SUCCESS
# if *all tasks* are deadlocked, the run failed
elif unfinished_tasks and none_depends_on_past and no_dependencies_met:
logging.info(
'Deadlock; marking run {} failed'.format(self))
self.state = State.FAILED
# finally, if the roots aren't done, the dag is still running
else:
self.state = State.RUNNING
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
try:
dag.get_task(ti.task_id)
except AirflowException:
if self.state is not State.RUNNING and not dag.partial:
ti.state = State.REMOVED
# check for missing tasks
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in task_ids:
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False,
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
from airflow.jobs import BackfillJob
return self.run_id.startswith(BackfillJob.ID_PREFIX)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest running DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.filter(cls.state == State.RUNNING)
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
class Pool(Base):
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(50), unique=True)
slots = Column(Integer, default=0)
description = Column(Text)
def __repr__(self):
return self.pool
def to_json(self):
return {
'id': self.id,
'pool': self.pool,
'slots': self.slots,
'description': self.description,
}
@provide_session
def used_slots(self, session):
"""
Returns the number of slots used at the moment
"""
running = (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.RUNNING)
.count()
)
return running
@provide_session
def queued_slots(self, session):
"""
Returns the number of slots used at the moment
"""
return (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.QUEUED)
.count()
)
@provide_session
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
used_slots = self.used_slots(session=session)
queued_slots = self.queued_slots(session=session)
return self.slots - used_slots - queued_slots
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(DateTime)
description = Column(Text)
notification_sent = Column(Boolean, default=False)
def __repr__(self):
return str((
self.dag_id, self.task_id, self.execution_date.isoformat()))
class ImportError(Base):
__tablename__ = "import_error"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
filename = Column(String(1024))
stacktrace = Column(Text)
|
#!/usr/bin/env python
import argparse
import requests
import time
import libcloud.security
from threepio import logger
from authentication.protocol.ldap import is_atmo_user, get_members
from core.models import AtmosphereUser as User
from core.models import Provider, Quota
from service.driver import get_account_driver
libcloud.security.VERIFY_SSL_CERT = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--provider-list", action="store_true",
help="List of provider names and IDs")
parser.add_argument("--quota-list", action="store_true",
help="List of provider names and IDs")
parser.add_argument("--provider-id", type=int,
help="Atmosphere provider ID"
" to use when importing users.")
parser.add_argument("--quota-id",
help="Atmosphere Quota ID to assign (Optional)")
parser.add_argument("--groups",
help="LDAP groups to import. (comma separated)")
parser.add_argument("--dry-run", action="store_true",
help="A 'dry-run' so you know what will happen,"
" before it happens")
parser.add_argument("--users",
help="LDAP usernames to import. (comma separated)")
parser.add_argument("--admin", action="store_true",
help="ALL Users addded are treated as admin and staff "
"users. They also receive the maximum quota.")
args = parser.parse_args()
make_admins = args.admin
dry_run = args.dry_run
users = None
quota = None
if args.provider_list:
print "ID\tName"
for p in Provider.objects.all().order_by('id'):
print "%d\t%s" % (p.id, p.location)
return
elif args.quota_list:
print "ID\tSpecs"
for q in Quota.objects.all().order_by('id'):
print "%s\t%s" % (q.id, q)
return
#Debugging args
if dry_run:
print "Dry run initialized.."
#Optional args
if args.quota_id:
quota = Quota.objects.get(id=args.quota_id)
if not args.provider_id:
print "ERROR: provider-id is required. To get a list of providers use"\
" --provider-list"
provider = Provider.objects.get(id=args.provider_id)
print "Provider Selected:%s" % provider
acct_driver = get_account_driver(provider)
groups = args.groups.split(",") if args.groups else []
total_added = process_groups(acct_driver, groups, quota, make_admins)
users = args.users.split(",") if args.users else []
total_added += process_users(acct_driver, users, quota, make_admins)
print "Processing complete. %d users processed." % total_added
def process_groups(acct_driver, groups, quota=None, make_admin=False):
total_added = 0
for groupname in groups:
group_add = 0
users = get_members(groupname)
print "Total users in group %s:%s" % (groupname, len(users))
group_add = process_users(acct_driver, users, quota, make_admin)
total_added += group_add
return total_added
def process_users(acct_driver, users, quota=None, admin_user=False):
total_added = 0
for user in users:
success = process_user(acct_driver, user, quota=quota,
admin_user=admin_user)
if success:
total_added += 1
print "Total users added:%s" % (total_added)
return total_added
def process_user(acct_driver, username, quota=None, admin_user=False):
try:
if not atmo_user(username):
print "%s is not in the LDAP atmosphere group (atmo-user)." %\
(username)
return False
if not dry_run:
acct_driver.create_account(username,
quota=quota,
# Admin users get maximum quota
max_quota=admin_user)
if admin_user:
if not dry_run:
make_admin(username)
print "%s added as admin." % (username)
else:
print "%s added." % (username)
return True
except Exception as e:
print "Problem adding %s." % (username)
print e.message
return False
def make_admin(user):
u = User.objects.get(username=user)
u.is_superuser = True
u.is_staff = True
u.save()
if __name__ == "__main__":
main()
Fix typo. is_atmo_user not atmo_user.
modified: scripts/import_users_from_ldap.py
#!/usr/bin/env python
import argparse
import requests
import time
import libcloud.security
from threepio import logger
from authentication.protocol.ldap import is_atmo_user, get_members
from core.models import AtmosphereUser as User
from core.models import Provider, Quota
from service.driver import get_account_driver
libcloud.security.VERIFY_SSL_CERT = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--provider-list", action="store_true",
help="List of provider names and IDs")
parser.add_argument("--quota-list", action="store_true",
help="List of provider names and IDs")
parser.add_argument("--provider-id", type=int,
help="Atmosphere provider ID"
" to use when importing users.")
parser.add_argument("--quota-id",
help="Atmosphere Quota ID to assign (Optional)")
parser.add_argument("--groups",
help="LDAP groups to import. (comma separated)")
parser.add_argument("--dry-run", action="store_true",
help="A 'dry-run' so you know what will happen,"
" before it happens")
parser.add_argument("--users",
help="LDAP usernames to import. (comma separated)")
parser.add_argument("--admin", action="store_true",
help="ALL Users addded are treated as admin and staff "
"users. They also receive the maximum quota.")
args = parser.parse_args()
make_admins = args.admin
dry_run = args.dry_run
users = None
quota = None
if args.provider_list:
print "ID\tName"
for p in Provider.objects.all().order_by('id'):
print "%d\t%s" % (p.id, p.location)
return
elif args.quota_list:
print "ID\tSpecs"
for q in Quota.objects.all().order_by('id'):
print "%s\t%s" % (q.id, q)
return
#Debugging args
if dry_run:
print "Dry run initialized.."
#Optional args
if args.quota_id:
quota = Quota.objects.get(id=args.quota_id)
if not args.provider_id:
print "ERROR: provider-id is required. To get a list of providers use"\
" --provider-list"
provider = Provider.objects.get(id=args.provider_id)
print "Provider Selected:%s" % provider
acct_driver = get_account_driver(provider)
groups = args.groups.split(",") if args.groups else []
total_added = process_groups(acct_driver, groups, quota, make_admins)
users = args.users.split(",") if args.users else []
total_added += process_users(acct_driver, users, quota, make_admins)
print "Processing complete. %d users processed." % total_added
def process_groups(acct_driver, groups, quota=None, make_admin=False):
total_added = 0
for groupname in groups:
group_add = 0
users = get_members(groupname)
print "Total users in group %s:%s" % (groupname, len(users))
group_add = process_users(acct_driver, users, quota, make_admin)
total_added += group_add
return total_added
def process_users(acct_driver, users, quota=None, admin_user=False):
total_added = 0
for user in users:
success = process_user(acct_driver, user, quota=quota,
admin_user=admin_user)
if success:
total_added += 1
print "Total users added:%s" % (total_added)
return total_added
def process_user(acct_driver, username, quota=None, admin_user=False):
try:
if not is_atmo_user(username):
print "%s is not in the LDAP atmosphere group (atmo-user)." %\
(username)
return False
if not dry_run:
acct_driver.create_account(username,
quota=quota,
# Admin users get maximum quota
max_quota=admin_user)
if admin_user:
if not dry_run:
make_admin(username)
print "%s added as admin." % (username)
else:
print "%s added." % (username)
return True
except Exception as e:
print "Problem adding %s." % (username)
print e.message
return False
def make_admin(user):
u = User.objects.get(username=user)
u.is_superuser = True
u.is_staff = True
u.save()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from os import chdir, environ
from os.path import dirname, abspath
from collections import Counter
from datetime import datetime, timedelta
from random import choice, randint
from re import findall
from json import loads
from pytz import utc
from flask import Flask, render_template, make_response, send_from_directory, request, redirect, jsonify, abort
from flask_cache import Cache # Caching
from flask_sslify import SSLify # Ensure HTTPS
from flask_wtf.csrf import CSRFProtect # CSRF
from whitenoise import WhiteNoise # Easy static serve
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from cachecontrol import CacheControl
debug = False
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = environ.get("SECRET_KEY", "".join(choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)") for _ in range(50)))
app.wsgi_app = WhiteNoise(app.wsgi_app, root="static/")
if not debug:
app.config['REMEMBER_COOKIE_SECURE'] = True
app.config['SESSION_COOKIE_SECURE'] = True
cache = Cache(app, config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_URL': environ.get("REDIS_URL")})
csrf = CSRFProtect(app)
sslify = SSLify(app)
'''
Required functionality
'''
def timeDate(typeDate, offset):
time = None
if (datetime.now(tz=utc) + timedelta(hours=offset)).weekday() == 6:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=1)
elif (datetime.now(tz=utc) + timedelta(hours=offset)).weekday() + 1 == 6:
if (datetime.now(tz=utc) + timedelta(hours=offset)).hour < 15:
time = datetime.now(tz=utc) + timedelta(hours=offset)
else:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=2)
else:
if (datetime.now(tz=utc) + timedelta(hours=offset)).hour < 15:
time = datetime.now(tz=utc) + timedelta(hours=offset)
else:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=1)
if typeDate == 'day':
return str(time.day)
elif typeDate == 'month':
return str(time.month)
elif typeDate == 'year':
return str(time.year)
def coloring(mood=None):
if mood == "Good":
return "teal"
elif (mood == "Average") or (mood == "О"):
return "#FF5722"
elif (mood == "Bad") or (mood == "Н"):
return "red"
elif (mood == "Б") or (mood == "П"):
return "#01579B"
else:
return "#212121"
def kaomoji(mood=None):
if mood == "Good":
return "( ˙꒳˙ )"
elif mood == "Average":
return "(--_--)"
elif mood == "Bad":
return "(・・ )"
else:
return "ヽ(ー_ー )ノ"
'''
Template handling
'''
@app.route("/", methods=['GET'])
def index():
response = make_response(render_template('index.html'))
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/main", methods=['GET'])
def main():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
offline = False
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
except ConnectionError:
offline = True
if offline:
user = "товарищ Тестер"
else:
user = user_data['firstName']
if request.cookies.get("AccountType") == 'Student':
response = make_response(render_template('index_logged_in.html', user=user))
elif request.cookies.get("AccountType") == 'Parent':
if offline:
opts = [{"Профилактические работы": str(randint(0, 2000))}]
else:
options = user_data['children']
opts = []
for option in options:
opts.append({f"{option['firstName']} {option['lastName']}": option['personId']})
response = make_response(render_template('index_logged_in.html', opts=opts, user=user))
else:
response = make_response(render_template('index_logged_in.html'))
else:
response = make_response(redirect("/"))
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/stats", methods=['POST'])
def stats():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
termPeriod = request.form.get('term', '1')
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Кажется, Дневник.Ру ушел в оффлайн :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
try:
if request.cookies.get('AccountType') == 'Student':
response = s.get(f"https://api.dnevnik.ru/mobile/v2/allMarks?personId={user_data['personId']}&groupId={user_data['groupIds'][0]}&access_token={access_token}")
elif request.cookies.get('AccountType') == 'Parent':
childId = request.form.get('child', '')
for child in user_data['children']:
if childId == child['personId']:
response = s.get(f"https://api.dnevnik.ru/mobile/v2/allMarks?personId={childId}&groupId={child['groupIds'][0]}&access_token={access_token}")
marks_data = loads(response.text)["AllMarks"]
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
for markData in marks_data:
if termPeriod in markData["Period"]["Text"]:
for subjectData in markData["SubjectMarks"]:
subjectId = subjectData["SubjectId"]
markCollection = []
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div><i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;">format_list_bulleted</i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += f'<h5 style="font-weight:600">{subjectData["Name"]}</h5>'
for mark in subjectData["Marks"]:
markCollection.append((mark["Values"][0]["Value"], mark["Values"][0]["Mood"]))
markCollectionCounted = (*Counter(sorted(markCollection)).items(),)
markSum = 0
markTotal = len(markCollection)
for markTuple in markCollectionCounted:
try:
html_out += f'<h8 style="color:{coloring(markTuple[0][1])};">{markTuple[0][0]}: {markTuple[1]}</h8><br>'
markSum += int(markTuple[0][0]) * int(markTuple[1])
except (KeyError, IndexError):
pass
try:
html_out += f'<h8 style="color:{coloring()};">Среднее значение: {round(markSum / markTotal, 2)}</h8><br>'
except ZeroDivisionError:
html_out += f'<h8 style="color:{coloring()};">Среднее значение: n/a</h8><br>'
try:
html_out += f'<h8 style="color:{coloring(subjectData["FinalMark"]["Values"][0]["Mood"])};">Итоговое значение: {subjectData["FinalMark"]["Values"][0]["Value"]}</h8><br>'
except (KeyError, IndexError, TypeError):
pass
html_out += '<div style="display:block; height:5px; clear:both;"></div>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Кажется, Дневник.Ру ушел в оффлайн :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
else:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Залогиньтесь ¯\_(ツ)_/¯</h5>'
html_out += 'Вы явно такого не ожидали, не правда ли?'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/dnevnik", methods=['POST'])
def dnevnik():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
timeMonth = request.form.get('month', '')
timeDay = request.form.get('day', '')
offset = int(request.cookies.get('Offset'))
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
if timeDay is '':
day = timeDate('day', offset=offset, )
else:
day = timeDay
if timeMonth is '':
month = timeDate('month', offset)
else:
month = timeMonth
year = timeDate('year', offset)
if request.cookies.get('AccountType') == 'Student':
response = s.get(f"https://api.dnevnik.ru/mobile/v2/schedule?startDate={year}-{month}-{day}&endDate={year}-{month}-{day}&personId={user_data['personId']}&groupId={user_data['groupIds'][0]}&access_token={access_token}")
elif request.cookies.get('AccountType') == 'Parent':
childId = request.form.get('child', '')
for child in user_data['children']:
if childId == child['personId']:
response = s.get(f"https://api.dnevnik.ru/mobile/v2/schedule?startDate={year}-{month}-{day}&endDate={year}-{month}-{day}&personId={childId}&groupId={child['groupIds'][0]}&access_token={access_token}")
try:
lesson_data = loads(response.text)['Days'][0]['Schedule']
except (KeyError, IndexError):
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Уроков нет :>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
if lesson_data == []:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Уроков нет :>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
for lesson in lesson_data:
if lesson['Status'] == 'NotInitialised':
continue
else:
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div><i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;">format_list_bulleted</i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += f'<h5 style="font-weight:600">{lesson["Subject"]["Name"]}</h5>'
for mark in lesson['Marks']:
if mark:
if mark["MarkType"] == 'LogEntry':
html_out += f'<h8 style="color:{coloring(mark["Values"][0]["Value"])};">Присутствие: {mark["MarkTypeText"]}.</h8><br>'
elif mark["MarkType"] == "Mark":
if len(mark['Values']) > 1:
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
for markValue in mark['Values']:
html_out += f'<h8 style="color:{coloring(markValue["Mood"])};">Оценка: {markValue["Value"]} ({mark["MarkTypeText"]}) {kaomoji(markValue["Mood"])}</h8><br>'
if len(mark['Values']) > 1:
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
try:
html_out += f'<h8 style="color:{coloring()};">Урок: {lesson["Theme"]} ({lesson["ImportantWorks"][0]["WorkType"]})</h8><br>'
except (KeyError, IndexError):
try:
html_out += f'<h8 style="color:{coloring()};">Урок: {lesson["Theme"]}</h8><br>'
except (KeyError, IndexError):
pass
if lesson["HomeworksText"] != "":
hw = lesson["HomeworksText"]
links = list(set(findall(r"http[s]?:\/\/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", hw)))
for link in links:
hw = hw.replace(link, f'<a href="{link}" target="_blank">[ссылка]</a>')
html_out += f'<h8 style="color:{coloring()};">ДЗ: {hw}</h8><br>'
html_out += '<div style="display:block; height:5px; clear:both;"></div>'
html_out += '</div>'
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'API Дневник.Ру в оффлайне :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
else:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Залогиньтесь ¯\_(ツ)_/¯</h5>'
html_out += 'Вы явно такого не ожидали, не правда ли?'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/login", methods=['GET'])
def log_in():
accounttype = None
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
try:
access_token = request.cookies.get('AccessToken_Temp')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
try:
s.cookies.get_dict()['dnevnik_sst']
except KeyError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
user_data = loads(response.text)
except ConnectionError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
try:
type_block = user_data['roles']
except KeyError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
if "EduStudent" in type_block:
accounttype = "Student"
elif "EduParent" in type_block:
accounttype = "Parent"
else:
return jsonify("Пора задуматься о том, куда катится ваша жизнь.")
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
response.set_cookie('AccountType', value=accounttype, max_age=2592000, expires=2592000)
response.set_cookie('AccessToken', value=access_token, max_age=2592000, expires=2592000)
return response
@app.route("/apply", methods=['POST'])
def apply():
color = request.form.get('color', '')
html_out = ""
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += '<p style="text-align:center; color:green;">Смена цветовой схемы успешна ^^</p>'
response = make_response(jsonify(html_out))
response.set_cookie('Theme', value=color, max_age=2592000, expires=2592000)
return response
@app.route("/logout", methods=['GET'])
def log_out():
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
s.headers.update({'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,en-US;q=0.8,ru;q=0.6,en;q=0.4'})
offline = False
try:
s.get('https://dnevnik.ru/')
except ConnectionError:
offline = True
response = make_response(redirect('/'))
if 'AccessToken' in request.cookies and not offline:
response.set_cookie('AccessToken', value='', max_age=0, expires=0)
response.set_cookie('AccountType', value='', max_age=0, expires=0)
response.set_cookie('Offset', value='', max_age=0, expires=0)
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
return response
@app.route('/sw.js', methods=['GET'])
def serviceworker():
return send_from_directory('sw', 'sw.js')
@app.route('/sw/<path:path>', methods=['GET'])
def serve_sw(path):
if path != 'sw.js':
return send_from_directory('sw', path)
else:
abort(404)
if __name__ == "__main__":
chdir(dirname(abspath(__file__)))
app.run(debug=debug, use_reloader=True)
Fixes
# -*- coding: utf-8 -*-
from os import chdir, environ
from os.path import dirname, abspath
from collections import Counter
from datetime import datetime, timedelta
from random import choice, randint
from re import findall, match
from json import loads
from pytz import utc
from flask import Flask, render_template, make_response, send_from_directory, request, redirect, jsonify, abort
from flask_cache import Cache # Caching
from flask_sslify import SSLify # Ensure HTTPS
from flask_wtf.csrf import CSRFProtect # CSRF
from whitenoise import WhiteNoise # Easy static serve
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from cachecontrol import CacheControl
debug = False
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = environ.get("SECRET_KEY", "".join(choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)") for _ in range(50)))
app.wsgi_app = WhiteNoise(app.wsgi_app, root="static/")
if not debug:
app.config['REMEMBER_COOKIE_SECURE'] = True
app.config['SESSION_COOKIE_SECURE'] = True
cache = Cache(app, config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_URL': environ.get("REDIS_URL")})
csrf = CSRFProtect(app)
sslify = SSLify(app)
'''
Required functionality
'''
def timeDate(typeDate, offset):
time = None
if (datetime.now(tz=utc) + timedelta(hours=offset)).weekday() == 6:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=1)
elif (datetime.now(tz=utc) + timedelta(hours=offset)).weekday() + 1 == 6:
if (datetime.now(tz=utc) + timedelta(hours=offset)).hour < 15:
time = datetime.now(tz=utc) + timedelta(hours=offset)
else:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=2)
else:
if (datetime.now(tz=utc) + timedelta(hours=offset)).hour < 15:
time = datetime.now(tz=utc) + timedelta(hours=offset)
else:
time = datetime.now(tz=utc) + timedelta(hours=offset, days=1)
if typeDate == 'day':
return str(time.day)
elif typeDate == 'month':
return str(time.month)
elif typeDate == 'year':
return str(time.year)
def coloring(mood=None):
if mood == "Good":
return "teal"
elif (mood == "Average") or (mood == "О"):
return "#FF5722"
elif (mood == "Bad") or (mood == "Н"):
return "red"
elif (mood == "Б") or (mood == "П"):
return "#01579B"
else:
return "#212121"
def kaomoji(mood=None):
if mood == "Good":
return "( ˙꒳˙ )"
elif mood == "Average":
return "(--_--)"
elif mood == "Bad":
return "(・・ )"
else:
return "ヽ(ー_ー )ノ"
'''
Template handling
'''
@app.route("/", methods=['GET'])
def index():
response = make_response(render_template('index.html'))
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/main", methods=['GET'])
def main():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
offline = False
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
except ConnectionError:
offline = True
if offline:
user = "товарищ Тестер"
else:
user = user_data['firstName']
if request.cookies.get("AccountType") == 'Student':
response = make_response(render_template('index_logged_in.html', user=user))
elif request.cookies.get("AccountType") == 'Parent':
if offline:
opts = [{"Профилактические работы": str(randint(0, 2000))}]
else:
options = user_data['children']
opts = []
for option in options:
opts.append({f"{option['firstName']} {option['lastName']}": option['personId']})
response = make_response(render_template('index_logged_in.html', opts=opts, user=user))
else:
response = make_response(render_template('index_logged_in.html'))
else:
response = make_response(redirect("/"))
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/stats", methods=['POST'])
def stats():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
termPeriod = request.form.get('term', '1')
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Кажется, Дневник.Ру ушел в оффлайн :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
try:
if request.cookies.get('AccountType') == 'Student':
response = s.get(f"https://api.dnevnik.ru/mobile/v2/allMarks?personId={user_data['personId']}&groupId={user_data['groupIds'][0]}&access_token={access_token}")
elif request.cookies.get('AccountType') == 'Parent':
childId = request.form.get('child', '')
for child in user_data['children']:
if childId == child['personId']:
response = s.get(f"https://api.dnevnik.ru/mobile/v2/allMarks?personId={childId}&groupId={child['groupIds'][0]}&access_token={access_token}")
marks_data = loads(response.text)["AllMarks"]
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
for markData in marks_data:
if termPeriod in markData["Period"]["Text"]:
for subjectData in markData["SubjectMarks"]:
subjectId = subjectData["SubjectId"]
markCollection = []
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div><i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;">format_list_bulleted</i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += f'<h5 style="font-weight:600">{subjectData["Name"]}</h5>'
for mark in subjectData["Marks"]:
markCollection.append((mark["Values"][0]["Value"], mark["Values"][0]["Mood"]))
markCollectionCounted = (*Counter(sorted(markCollection)).items(),)
markSum = 0
markTotal = len(markCollection)
for markTuple in markCollectionCounted:
try:
html_out += f'<h8 style="color:{coloring(markTuple[0][1])};">{markTuple[0][0]}: {markTuple[1]}</h8><br>'
markSum += int(markTuple[0][0]) * int(markTuple[1])
except (KeyError, IndexError):
pass
try:
html_out += f'<h8 style="color:{coloring()};">Среднее значение: {round(markSum / markTotal, 2)}</h8><br>'
except ZeroDivisionError:
html_out += f'<h8 style="color:{coloring()};">Среднее значение: n/a</h8><br>'
try:
html_out += f'<h8 style="color:{coloring(subjectData["FinalMark"]["Values"][0]["Mood"])};">Итоговое значение: {subjectData["FinalMark"]["Values"][0]["Value"]}</h8><br>'
except (KeyError, IndexError, TypeError):
pass
html_out += '<div style="display:block; height:5px; clear:both;"></div>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Кажется, Дневник.Ру ушел в оффлайн :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
else:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Статистика</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Залогиньтесь ¯\_(ツ)_/¯</h5>'
html_out += 'Вы явно такого не ожидали, не правда ли?'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/dnevnik", methods=['POST'])
def dnevnik():
if 'AccessToken' in request.cookies:
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
timeMonth = request.form.get('month', '')
timeDay = request.form.get('day', '')
offset = int(request.cookies.get('Offset'))
try:
access_token = request.cookies.get('AccessToken')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
user_data = loads(response.text)
if timeDay is '':
day = str(timeDate('day', offset=offset))
else:
day = timeDay
if timeMonth is '':
month = str(timeDate('month', offset=offset))
else:
month = timeMonth
year = str(timeDate('year', offset=offset))
day = "0" + day if match(r"^\d{1}$", day) else day
month = "0" + month if match(r"^\d{1}$", month) else month
if request.cookies.get('AccountType') == 'Student':
response = s.get(f"https://api.dnevnik.ru/mobile/v2/schedule?startDate={year}-{month}-{day}&endDate={year}-{month}-{day}&personId={user_data['personId']}&groupId={user_data['groupIds'][0]}&access_token={access_token}")
elif request.cookies.get('AccountType') == 'Parent':
childId = request.form.get('child', '')
for child in user_data['children']:
if childId == child['personId']:
response = s.get(f"https://api.dnevnik.ru/mobile/v2/schedule?startDate={year}-{month}-{day}&endDate={year}-{month}-{day}&personId={childId}&groupId={child['groupIds'][0]}&access_token={access_token}")
try:
lesson_data = loads(response.text)['Days'][0]['Schedule']
except (KeyError, IndexError):
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Уроков нет :>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
if lesson_data == []:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'Уроков нет :>'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
for lesson in lesson_data:
if lesson['Status'] == 'NotInitialised':
continue
else:
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div><i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;">format_list_bulleted</i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += f'<h5 style="font-weight:600">{lesson["Subject"]["Name"]}</h5>'
for mark in lesson['Marks']:
if mark:
if mark["MarkType"] == 'LogEntry':
html_out += f'<h8 style="color:{coloring(mark["Values"][0]["Value"])};">Присутствие: {mark["MarkTypeText"]}.</h8><br>'
elif mark["MarkType"] == "Mark":
if len(mark['Values']) > 1:
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
for markValue in mark['Values']:
html_out += f'<h8 style="color:{coloring(markValue["Mood"])};">Оценка: {markValue["Value"]} ({mark["MarkTypeText"]}) {kaomoji(markValue["Mood"])}</h8><br>'
if len(mark['Values']) > 1:
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
try:
html_out += f'<h8 style="color:{coloring()};">Урок: {lesson["Theme"]} ({lesson["ImportantWorks"][0]["WorkType"]})</h8><br>'
except (KeyError, IndexError):
try:
html_out += f'<h8 style="color:{coloring()};">Урок: {lesson["Theme"]}</h8><br>'
except (KeyError, IndexError):
pass
if lesson["HomeworksText"] != "":
hw = lesson["HomeworksText"]
links = list(set(findall(r"http[s]?:\/\/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", hw)))
for link in links:
hw = hw.replace(link, f'<a href="{link}" target="_blank">[ссылка]</a>')
html_out += f'<h8 style="color:{coloring()};">ДЗ: {hw}</h8><br>'
html_out += '<div style="display:block; height:5px; clear:both;"></div>'
html_out += '</div>'
except ConnectionError:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Данные не получены ¯\_(ツ)_/¯</h5>'
html_out += 'API Дневник.Ру в оффлайне :> <br>'
html_out += 'Если вы сумели успешно запросить данные ранее, то сделайте длинное нажатие по кнопке запроса.'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
else:
html_out = ""
html_out += '<h4 class="mdl-cell mdl-cell--12-col">Дневник</h4>'
html_out += '<div class="section__circle-container mdl-cell mdl-cell--2-col mdl-cell--1-col-phone">'
html_out += '<i class="material-icons mdl-list__item-avatar mdl-color--primary" style="font-size:32px; padding-top:2.5px; text-align:center;"></i>'
html_out += '</div>'
html_out += '<div class="section__text mdl-cell mdl-cell--10-col-desktop mdl-cell--6-col-tablet mdl-cell--3-col-phone">'
html_out += '<h5>Залогиньтесь ¯\_(ツ)_/¯</h5>'
html_out += 'Вы явно такого не ожидали, не правда ли?'
html_out += '</div>'
response = make_response(jsonify(html_out))
response.set_cookie('Offset', value='', max_age=0, expires=0)
return response
@app.route("/login", methods=['GET'])
def log_in():
accounttype = None
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
try:
access_token = request.cookies.get('AccessToken_Temp')
response = s.get(f"https://api.dnevnik.ru/v1/users/me/context?access_token={access_token}")
try:
s.cookies.get_dict()['dnevnik_sst']
except KeyError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
user_data = loads(response.text)
except ConnectionError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
try:
type_block = user_data['roles']
except KeyError:
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
return response
if "EduStudent" in type_block:
accounttype = "Student"
elif "EduParent" in type_block:
accounttype = "Parent"
else:
return jsonify("Пора задуматься о том, куда катится ваша жизнь.")
response = make_response(redirect("/"))
response.set_cookie('AccessToken_Temp', value='', max_age=0, expires=0)
response.set_cookie('AccountType', value=accounttype, max_age=2592000, expires=2592000)
response.set_cookie('AccessToken', value=access_token, max_age=2592000, expires=2592000)
return response
@app.route("/apply", methods=['POST'])
def apply():
color = request.form.get('color', '')
html_out = ""
html_out += '<div style="display:block; height:2px; clear:both;"></div>'
html_out += '<p style="text-align:center; color:green;">Смена цветовой схемы успешна ^^</p>'
response = make_response(jsonify(html_out))
response.set_cookie('Theme', value=color, max_age=2592000, expires=2592000)
return response
@app.route("/logout", methods=['GET'])
def log_out():
s = CacheControl(Session())
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
s.headers.update({'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,en-US;q=0.8,ru;q=0.6,en;q=0.4'})
offline = False
try:
s.get('https://dnevnik.ru/')
except ConnectionError:
offline = True
response = make_response(redirect('/'))
if 'AccessToken' in request.cookies and not offline:
response.set_cookie('AccessToken', value='', max_age=0, expires=0)
response.set_cookie('AccountType', value='', max_age=0, expires=0)
response.set_cookie('Offset', value='', max_age=0, expires=0)
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
return response
@app.route('/sw.js', methods=['GET'])
def serviceworker():
return send_from_directory('sw', 'sw.js')
@app.route('/sw/<path:path>', methods=['GET'])
def serve_sw(path):
if path != 'sw.js':
return send_from_directory('sw', path)
else:
abort(404)
if __name__ == "__main__":
chdir(dirname(abspath(__file__)))
app.run(debug=debug, use_reloader=True)
|
# Start, stop, query, or configure TPV logging on a set of servers
# Example: wsadmin -username wsadmin -password wsadmin -lang jython -f tpvlogging.py -userprefs wsadmin -action start -server server1
def usage():
print "usage: wsadmin -lang jython -f tpvlogging.py -action [start|stop|list|setlevel] -userprefs USER [-node NODE] [-server SERVER] [-pmilevel NEWLEVEL]"
print " -userprefs is required and you can just pass in the same user as -username for wsadmin, or any name otherwise"
print " -pmilevel is only used with -action setlevel. Valid values are none, basic, extended, all"
sys.exit()
import sys
import com.ibm.ws.tpv.engine.UserPreferences as UserPreferences
import com.ibm.ws.tpv.engine.utils.ServerBean as ServerBean
import jarray
import javax.management as mgmt
sType = "APPLICATION_SERVER"
action = "start"
targetNode = ""
targetApplicationServer = ""
user = ""
filename = "tpv"
duration = 300000000
fileSize = 10485760
numFiles = 20
outputType = "bin" # or "xml"
bufferSize = 40
pmilevel = "extended" # only if -action setlevel
help = 0
refreshRate = 30
affectedCount = 0
verbose = 0
l = len(sys.argv)
i = 0
while i < l:
arg = sys.argv[i]
if arg == "-help" or arg == "-h" or arg == "-usage" or arg == "-?":
help = 1
if arg == "-action":
action = sys.argv[i + 1]
if arg == "-node":
targetNode = sys.argv[i + 1]
if arg == "-server":
targetApplicationServer = sys.argv[i + 1]
if arg == "-userprefs":
user = sys.argv[i + 1]
if arg == "-filename":
filename = sys.argv[i + 1]
if arg == "-duration":
duration = int(sys.argv[i + 1])
if arg == "-filesize":
fileSize = int(sys.argv[i + 1])
if arg == "-numfiles":
numFiles = int(sys.argv[i + 1])
if arg == "-buffersize":
bufferSize = int(sys.argv[i + 1])
if arg == "-refreshrate":
refreshRate = int(sys.argv[i + 1])
if arg == "-outputtype":
outputType = sys.argv[i + 1]
if arg == "-pmilevel":
pmilevel = sys.argv[i + 1]
if arg == "-verbose":
verbose = 1
i = i + 1
if help == 1:
usage()
if len(user) == 0:
print ""
print "ERROR: -userprefs must be specified (see usage below)"
print ""
usage()
def getExceptionText(typ, value, tb):
value = `value`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
i = sd.rfind(" File ")
j = sd.rfind(", line ")
k = sd.rfind(", in ")
locn = ""
if(i>0 and j>0 and k>0):
file = sd[i+7:j]
line = sd[j+7:k]
func = sd[k+4:-3]
locn = "Function="+func+" Line="+line+" File="+file
return value+" "+locn
def convertToList( inlist ):
outlist = []
clist = None
if (len(inlist) > 0):
if (inlist[0] == '[' and inlist[len(inlist) - 1] == ']'):
if (inlist[1] == "\"" and inlist[len(inlist)-2] == "\""):
clist = inlist[1:len(inlist) -1].split(")\" ")
else:
clist = inlist[1:len(inlist) - 1].split(" ")
else:
clist = inlist.split(java.lang.System.getProperty("line.separator"))
if clist != None:
for elem in clist:
elem = elem.rstrip();
if (len(elem) > 0):
if (elem[0] == "\"" and elem[len(elem) -1] != "\""):
elem = elem+")\""
outlist.append(elem)
return outlist
def listNodes():
nodes = AdminConfig.list("Node")
nodeList = convertToList(nodes)
return nodeList
def listServers(serverType="", nodeName=""):
optionalParamList = []
if (len(serverType) > 0):
optionalParamList = ['-serverType', serverType]
if (len(nodeName) > 0):
node = AdminConfig.getid("/Node:" +nodeName+"/")
optionalParamList = optionalParamList + ['-nodeName', nodeName]
servers = AdminTask.listServers(optionalParamList)
servers = convertToList(servers)
newservers = []
for aServer in servers:
sname = aServer[0:aServer.find("(")]
nname = aServer[aServer.find("nodes/")+6:aServer.find("servers/")-1]
sid = AdminConfig.getid("/Node:"+nname+"/Server:"+sname)
if (newservers.count(sid) <= 0):
newservers.append(sid)
return newservers
print "Action: " + action
print "User: " + user
print "Node: " + targetNode
print "Server: " + targetApplicationServer
print "File name: " + filename
print "Duration: " + str(duration)
print "File Size: " + str(fileSize)
print "Historical Files: " + str(numFiles)
print "Output type: " + outputType
print "Refresh Rate: " + str(refreshRate)
nodeList = listNodes()
for nodeObject in nodeList:
nodeName = nodeObject.split("(")[0]
if len(targetNode) > 0 and targetNode.lower() != nodeName.lower():
print "Skipping node " + nodeName + " because it did not match targetNode"
continue
print ""
print "Processing node: " + nodeName
try:
# build list of Application Servers in the Node
serverList = listServers(sType,nodeName)
except:
typ, val, tb = sys.exc_info()
value = `val`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
print "Could not process node. Probably the DMGR (which is ok to skip)? Continuing with the other nodes... " + value + " " + sd
continue
if verbose:
print "Number of servers: " + str(len(serverList))
for serverObject in serverList:
serverName = serverObject.split("(")[0]
if len(targetApplicationServer) > 0 and targetApplicationServer.lower() != serverName.lower():
if verbose:
print "Skipping server " + serverName + " (node " + nodeName + ")"
continue
prefs = UserPreferences()
prefs.setServerName(serverName)
prefs.setNodeName(nodeName)
prefs.setLoggingDuration(duration)
prefs.setLogFileSize(fileSize)
prefs.setNumLogFiles(numFiles)
prefs.setTpvLogFormat(outputType)
prefs.setLogFileName(filename)
prefs.setBufferSize(bufferSize)
prefs.setUserId(user)
prefs.setRefreshRate(refreshRate)
params = [prefs]
sig = ["com.ibm.ws.tpv.engine.UserPreferences"]
target = "node=" + nodeName
name = AdminControl.completeObjectName("type=TivoliPerfEngine," + target + ",*")
mbeanObjectName = mgmt.ObjectName(name)
display = nodeName + "\\" + serverName
if action == "start":
print "Calling TivoliPerfEngine.monitorServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "monitorServer", params, sig)
print "Calling TivoliPerfEngine.startLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "startLogging", params, sig)
affectedCount = affectedCount + 1
elif action == "stop":
print "Calling TivoliPerfEngine.stopLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "stopLogging", params, sig)
print "Calling TivoliPerfEngine.disableServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "disableServer", params, sig)
affectedCount = affectedCount + 1
elif action == "list":
print "Monitored Servers (by " + user + ")"
print "======================"
servers = AdminControl.invoke(name, "getMonitoredServers", user)
if len(servers) > 0:
isLoggingSig = ["com.ibm.ws.tpv.engine.utils.ServerBean"]
for server in servers.split("\n"):
pieces = server.split(".")
bean = ServerBean(pieces[0], pieces[1])
isLoggingParams = [bean]
res = AdminControl.invoke_jmx(mbeanObjectName, "isServerLogging", isLoggingParams, isLoggingSig)
perftarget = "node=" + nodeName + ",process=" + pieces[1]
perfname = AdminControl.completeObjectName("type=Perf," + perftarget + ",*")
print server + " ; Logging=" + str(res) + " ; Level=" + AdminControl.invoke(perfname, "getStatisticSet")
break # otherwise we'll do the list for each server in the node -- TODO break outter loop too?
elif action == "setlevel":
target = target + ",process=" + serverName
perfname = AdminControl.completeObjectName("type=Perf," + target + ",*")
# none, basic, extended, all, custom
print "Setting PMI level to " + pmilevel + " on " + serverName
AdminControl.invoke(perfname, "setStatisticSet", pmilevel)
AdminControl.invoke(perfname, "savePMIConfiguration")
affectedCount = affectedCount + 1
elif action == "debug":
print "Debug"
else:
print "Unknown action " + action
print ""
print "Script finished. " + str(affectedCount) + " servers affected."
Change default log type to XML for tpvlogging.py
# Start, stop, query, or configure TPV logging on a set of servers
# Example: wsadmin -username wsadmin -password wsadmin -lang jython -f tpvlogging.py -userprefs wsadmin -action start -server server1
def usage():
print "usage: wsadmin -lang jython -f tpvlogging.py -action [start|stop|list|setlevel] -userprefs USER [-node NODE] [-server SERVER] [-pmilevel none|basic|extended|all]"
print " -userprefs is required and you can just pass in the same user as -username for wsadmin, or any name otherwise"
print " -pmilevel is only used with -action setlevel. Valid values are none, basic, extended, all"
print " If neither -node nor -server are specified, then all application servers on all nodes will be executed"
print " If -node is specified but -server isn't, then all application servers on the node will be executed"
print " This script does not yet support a custom statistics set for -action setlevel"
sys.exit()
import sys
import com.ibm.ws.tpv.engine.UserPreferences as UserPreferences
import com.ibm.ws.tpv.engine.utils.ServerBean as ServerBean
import jarray
import javax.management as mgmt
sType = "APPLICATION_SERVER"
action = "start"
targetNode = ""
targetApplicationServer = ""
user = ""
filename = "tpv"
duration = 999999
fileSize = 52428800
numFiles = 5
outputType = "xml" # or "bin"
bufferSize = 40
pmilevel = "extended" # only if -action setlevel
help = 0
refreshRate = 30
affectedCount = 0
verbose = 0
l = len(sys.argv)
i = 0
while i < l:
arg = sys.argv[i]
if arg == "-help" or arg == "-h" or arg == "-usage" or arg == "-?":
help = 1
if arg == "-action":
action = sys.argv[i + 1]
if arg == "-node":
targetNode = sys.argv[i + 1]
if arg == "-server":
targetApplicationServer = sys.argv[i + 1]
if arg == "-userprefs":
user = sys.argv[i + 1]
if arg == "-filename":
filename = sys.argv[i + 1]
if arg == "-duration":
duration = int(sys.argv[i + 1])
if arg == "-filesize":
fileSize = int(sys.argv[i + 1])
if arg == "-numfiles":
numFiles = int(sys.argv[i + 1])
if arg == "-buffersize":
bufferSize = int(sys.argv[i + 1])
if arg == "-refreshrate":
refreshRate = int(sys.argv[i + 1])
if arg == "-outputtype":
outputType = sys.argv[i + 1]
if arg == "-pmilevel":
pmilevel = sys.argv[i + 1]
if arg == "-verbose":
verbose = 1
i = i + 1
if help == 1:
usage()
if len(user) == 0:
print ""
print "ERROR: -userprefs must be specified (see usage below)"
print ""
usage()
def getExceptionText(typ, value, tb):
value = `value`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
i = sd.rfind(" File ")
j = sd.rfind(", line ")
k = sd.rfind(", in ")
locn = ""
if(i>0 and j>0 and k>0):
file = sd[i+7:j]
line = sd[j+7:k]
func = sd[k+4:-3]
locn = "Function="+func+" Line="+line+" File="+file
return value+" "+locn
def convertToList( inlist ):
outlist = []
clist = None
if (len(inlist) > 0):
if (inlist[0] == '[' and inlist[len(inlist) - 1] == ']'):
if (inlist[1] == "\"" and inlist[len(inlist)-2] == "\""):
clist = inlist[1:len(inlist) -1].split(")\" ")
else:
clist = inlist[1:len(inlist) - 1].split(" ")
else:
clist = inlist.split(java.lang.System.getProperty("line.separator"))
if clist != None:
for elem in clist:
elem = elem.rstrip();
if (len(elem) > 0):
if (elem[0] == "\"" and elem[len(elem) -1] != "\""):
elem = elem+")\""
outlist.append(elem)
return outlist
def listNodes():
nodes = AdminConfig.list("Node")
nodeList = convertToList(nodes)
return nodeList
def listServers(serverType="", nodeName=""):
optionalParamList = []
if (len(serverType) > 0):
optionalParamList = ['-serverType', serverType]
if (len(nodeName) > 0):
node = AdminConfig.getid("/Node:" +nodeName+"/")
optionalParamList = optionalParamList + ['-nodeName', nodeName]
servers = AdminTask.listServers(optionalParamList)
servers = convertToList(servers)
newservers = []
for aServer in servers:
sname = aServer[0:aServer.find("(")]
nname = aServer[aServer.find("nodes/")+6:aServer.find("servers/")-1]
sid = AdminConfig.getid("/Node:"+nname+"/Server:"+sname)
if (newservers.count(sid) <= 0):
newservers.append(sid)
return newservers
print "Action: " + action
print "User: " + user
print "Node: " + targetNode
print "Server: " + targetApplicationServer
print "File name: " + filename
print "Duration: " + str(duration)
print "File Size: " + str(fileSize)
print "Historical Files: " + str(numFiles)
print "Output type: " + outputType
print "Refresh Rate: " + str(refreshRate)
nodeList = listNodes()
for nodeObject in nodeList:
nodeName = nodeObject.split("(")[0]
if len(targetNode) > 0 and targetNode.lower() != nodeName.lower():
print "Skipping node " + nodeName + " because it did not match targetNode"
continue
print ""
print "Processing node: " + nodeName
try:
# build list of Application Servers in the Node
serverList = listServers(sType,nodeName)
except:
typ, val, tb = sys.exc_info()
value = `val`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
print "Could not process node. Probably the DMGR (which is ok to skip)? Continuing with the other nodes... " + value + " " + sd
continue
if verbose:
print "Number of servers: " + str(len(serverList))
for serverObject in serverList:
serverName = serverObject.split("(")[0]
if len(targetApplicationServer) > 0 and targetApplicationServer.lower() != serverName.lower():
if verbose:
print "Skipping server " + serverName + " (node " + nodeName + ")"
continue
prefs = UserPreferences()
prefs.setServerName(serverName)
prefs.setNodeName(nodeName)
prefs.setLoggingDuration(duration)
prefs.setLogFileSize(fileSize)
prefs.setNumLogFiles(numFiles)
prefs.setTpvLogFormat(outputType)
prefs.setLogFileName(filename)
prefs.setBufferSize(bufferSize)
prefs.setUserId(user)
prefs.setRefreshRate(refreshRate)
params = [prefs]
sig = ["com.ibm.ws.tpv.engine.UserPreferences"]
target = "node=" + nodeName
name = AdminControl.completeObjectName("type=TivoliPerfEngine," + target + ",*")
mbeanObjectName = mgmt.ObjectName(name)
display = nodeName + "\\" + serverName
if action == "start":
print "Calling TivoliPerfEngine.monitorServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "monitorServer", params, sig)
print "Calling TivoliPerfEngine.startLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "startLogging", params, sig)
affectedCount = affectedCount + 1
elif action == "stop":
print "Calling TivoliPerfEngine.stopLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "stopLogging", params, sig)
print "Calling TivoliPerfEngine.disableServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "disableServer", params, sig)
affectedCount = affectedCount + 1
elif action == "list":
print "Monitored Servers (by " + user + ")"
print "======================"
servers = AdminControl.invoke(name, "getMonitoredServers", user)
if len(servers) > 0:
isLoggingSig = ["com.ibm.ws.tpv.engine.utils.ServerBean"]
for server in servers.split("\n"):
pieces = server.split(".")
bean = ServerBean(pieces[0], pieces[1])
isLoggingParams = [bean]
res = AdminControl.invoke_jmx(mbeanObjectName, "isServerLogging", isLoggingParams, isLoggingSig)
perftarget = "node=" + nodeName + ",process=" + pieces[1]
perfname = AdminControl.completeObjectName("type=Perf," + perftarget + ",*")
print server + " ; Logging=" + str(res) + " ; Level=" + AdminControl.invoke(perfname, "getStatisticSet")
break # otherwise we'll do the list for each server in the node -- TODO break outter loop too?
elif action == "setlevel":
target = target + ",process=" + serverName
perfname = AdminControl.completeObjectName("type=Perf," + target + ",*")
# none, basic, extended, all, custom
print "Setting PMI level to " + pmilevel + " on " + serverName
AdminControl.invoke(perfname, "setStatisticSet", pmilevel)
AdminControl.invoke(perfname, "savePMIConfiguration")
affectedCount = affectedCount + 1
elif action == "debug":
print "Debug"
else:
print "Unknown action " + action
print ""
print "Script finished. " + str(affectedCount) + " servers affected."
|
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
from argparse import ArgumentParser
import lxml.etree as etree
import codecs
from androguard.core import androconf
from androguard.core.bytecodes import apk
from androguard.util import read
def main(arscobj, outp=None, package=None, typ=None, locale=None):
package = package or arscobj.get_packages_names()[0]
ttype = typ or "public"
locale = locale or '\x00\x00'
# TODO: be able to dump all locales of a specific type
# TODO: be able to recreate the structure of files when developing, eg a res
# folder with all the XML files
if not hasattr(arscobj, "get_{}_resources".format(ttype)):
print("No decoder found for type: '{}'! Please open a bug report.".format(ttype), file=sys.stderr)
sys.exit(1)
x = getattr(arscobj, "get_" + ttype + "_resources")(package, locale)
buff = etree.tostring(etree.fromstring(x), pretty_print=True, encoding="UTF-8")
if outp:
with open(outp, "w") as fd:
fd.write(buff)
else:
print(buff)
if __name__ == "__main__":
parser = ArgumentParser(description="Decode resources.arsc either directly"
"from a given file or from an APK.")
parser.add_argument("--version", "-v", action="store_true", default=False,
help="Print androguard version and exit")
parser.add_argument("--input", "-i",
help="resources.arsc or APK to parse (legacy option)")
parser.add_argument("file", nargs="?",
help="resources.arsc or APK to parse")
parser.add_argument("--output", "-o",
help="filename to save the decoded resources to")
parser.add_argument("--package", "-p",
help="Show only resources for the given package name (default: the first package name found)")
parser.add_argument("--locale", "-l",
help="Show only resources for the given locale (default: '\\x00\\x00')")
parser.add_argument("--type", "-t",
help="Show only resources of the given type (default: public)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--list-packages", action="store_true", default=False,
help="List all package names and exit")
group.add_argument("--list-locales", action="store_true", default=False,
help="List all locales and exit")
group.add_argument("--list-types", action="store_true", default=False,
help="List all types and exit")
args = parser.parse_args()
if args.file and args.input:
print("Can not give --input and positional argument! Please use only one of them!")
sys.exit(1)
if args.version:
print("Androaxml version %s" % androconf.ANDROGUARD_VERSION)
sys.exit(0)
if not args.input and not args.file:
print("Give one file to decode!")
sys.exit(1)
if args.input:
fname = args.input
else:
fname = args.file
ret_type = androconf.is_android(fname)
if ret_type == "APK":
a = apk.APK(fname)
arscobj = a.get_android_resources()
elif ret_type == "ARSC":
arscobj = apk.ARSCParser(read(fname))
else:
print("Unknown file type")
sys.exit(1)
if args.list_packages:
print("\n".join(arscobj.get_packages_names()))
sys.exit(0)
if args.list_locales:
for p in arscobj.get_packages_names():
print("In Package:", p)
print("\n".join(map(lambda x: " \\x00\\x00" if x == "\x00\x00" else
" {}".format(x), sorted(arscobj.get_locales(p)))))
sys.exit(0)
if args.list_types:
for p in arscobj.get_packages_names():
print("In Package:", p)
for locale in sorted(arscobj.get_locales(p)):
print(" In Locale: {}".format("\\x00\\x00" if locale == "\x00\x00" else
locale))
print("\n".join(map(" {}".format, sorted(arscobj.get_types(p,
locale)))))
sys.exit(0)
main(arscobj, package=args.package, typ=args.type, locale=args.locale)
stderr
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
from argparse import ArgumentParser
import lxml.etree as etree
from androguard.core import androconf
from androguard.core.bytecodes import apk
from androguard.util import read
def main(arscobj, outp=None, package=None, typ=None, locale=None):
package = package or arscobj.get_packages_names()[0]
ttype = typ or "public"
locale = locale or '\x00\x00'
# TODO: be able to dump all locales of a specific type
# TODO: be able to recreate the structure of files when developing, eg a res
# folder with all the XML files
if not hasattr(arscobj, "get_{}_resources".format(ttype)):
print("No decoder found for type: '{}'! Please open a bug report.".format(ttype), file=sys.stderr)
sys.exit(1)
x = getattr(arscobj, "get_" + ttype + "_resources")(package, locale)
buff = etree.tostring(etree.fromstring(x), pretty_print=True, encoding="UTF-8")
if outp:
with open(outp, "w") as fd:
fd.write(buff)
else:
print(buff)
if __name__ == "__main__":
parser = ArgumentParser(description="Decode resources.arsc either directly"
"from a given file or from an APK.")
parser.add_argument("--version", "-v", action="store_true", default=False,
help="Print androguard version and exit")
parser.add_argument("--input", "-i",
help="resources.arsc or APK to parse (legacy option)")
parser.add_argument("file", nargs="?",
help="resources.arsc or APK to parse")
parser.add_argument("--output", "-o",
help="filename to save the decoded resources to")
parser.add_argument("--package", "-p",
help="Show only resources for the given package name (default: the first package name found)")
parser.add_argument("--locale", "-l",
help="Show only resources for the given locale (default: '\\x00\\x00')")
parser.add_argument("--type", "-t",
help="Show only resources of the given type (default: public)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--list-packages", action="store_true", default=False,
help="List all package names and exit")
group.add_argument("--list-locales", action="store_true", default=False,
help="List all locales and exit")
group.add_argument("--list-types", action="store_true", default=False,
help="List all types and exit")
args = parser.parse_args()
if args.file and args.input:
print("Can not give --input and positional argument! Please use only one of them!", file=sys.stderr)
sys.exit(1)
if args.version:
print("Androaxml version %s" % androconf.ANDROGUARD_VERSION)
sys.exit(0)
if not args.input and not args.file:
print("Give one file to decode!", file=sys.stderr)
sys.exit(1)
if args.input:
fname = args.input
else:
fname = args.file
ret_type = androconf.is_android(fname)
if ret_type == "APK":
a = apk.APK(fname)
arscobj = a.get_android_resources()
elif ret_type == "ARSC":
arscobj = apk.ARSCParser(read(fname))
else:
print("Unknown file type!", file=sys.stderr)
sys.exit(1)
if args.list_packages:
print("\n".join(arscobj.get_packages_names()))
sys.exit(0)
if args.list_locales:
for p in arscobj.get_packages_names():
print("In Package:", p)
print("\n".join(map(lambda x: " \\x00\\x00" if x == "\x00\x00" else
" {}".format(x), sorted(arscobj.get_locales(p)))))
sys.exit(0)
if args.list_types:
for p in arscobj.get_packages_names():
print("In Package:", p)
for locale in sorted(arscobj.get_locales(p)):
print(" In Locale: {}".format("\\x00\\x00" if locale == "\x00\x00" else
locale))
print("\n".join(map(" {}".format, sorted(arscobj.get_types(p,
locale)))))
sys.exit(0)
main(arscobj, package=args.package, typ=args.type, locale=args.locale)
|
#!/usr/bin/env python
from __future__ import print_function
try: input = raw_input
except: pass
import sys
# For various kmer sizes, find the number of sequencing errors that would add a
# new edges between two existing kmers
# Note: there are 3*reflen possible mutations
cov = None
seqn_err_rate = None
def usage():
print("usage: python count-bad-edges.py <cov> <seqn-err-rate>",file=sys.stderr)
exit(-1)
if len(sys.argv) != 1 and len(sys.argv) != 3: usage()
if len(sys.argv) == 3:
try: cov,seqn_err_rate = int(sys.argv[1]),float(sys.argv[2])
except: usage()
s = input().strip().upper()
kmers = [21,31,41,51,61,71,81,91,99]
def est_num_of_added_edges(reflen,nerror_edges,cov,seqn_err_rate):
nerrors = cov * reflen * seqn_err_rate
bad_error_rate = nerror_edges / (3.0 * reflen) # errs that create new edges
return int(nerrors*bad_error_rate)
print("# The number of sequencing errors that would add a new edge between two")
print("# existing kmers. Note: there are 3*reflen possible mutations")
cols = ["kmer","reflen","nkmers","nedges","nerror_edges"]
if cov is not None: cols.extend(["cov","err_rate","est_bad_edges"])
print(",".join([str(x) for x in cols]))
for k in kmers:
kmers = set()
edges = set()
for i in range(len(s)-k):
kmers.add(s[i:i+k])
edges.add(s[i:i+k+1])
kmers.add(s[-k:])
err_edges = 0
pk = s[0:k] # first kmer
for i in range(1,len(s)-k+1):
nextb = s[i+k-1]
for c in "ACGT":
err_edges += (c != nextb and pk[1:]+c in kmers and pk+c not in edges)
pk = s[i:i+k]
cols = [k,len(s),len(kmers),len(edges),err_edges]
if cov is not None:
cols.extend([cov, seqn_err_rate,
est_num_of_added_edges(len(s),err_edges,cov,seqn_err_rate)])
print(",".join([str(x) for x in cols]))
Remove non-ascii values from python script [skip CI]
#!/usr/bin/env python
from __future__ import print_function
try: input = raw_input
except: pass
import sys
# For various kmer sizes, find the number of sequencing errors that would add a
# new edges between two existing kmers
# Note: there are 3*reflen possible mutations
cov = None
seqn_err_rate = None
def usage():
print("usage: python count-bad-edges.py [<cov> <seqn-err-rate>]",file=sys.stderr)
exit(-1)
if len(sys.argv) != 1 and len(sys.argv) != 3: usage()
if len(sys.argv) == 3:
try: cov,seqn_err_rate = int(sys.argv[1]),float(sys.argv[2])
except: usage()
s = input().strip().upper()
kmers = [21,31,41,51,61,71,81,91,99]
def est_num_of_added_edges(reflen,nerror_edges,cov,seqn_err_rate):
nerrors = cov * reflen * seqn_err_rate
bad_error_rate = nerror_edges / (3.0 * reflen) # errs that create new edges
return int(nerrors*bad_error_rate)
print("# The number of sequencing errors that would add a new edge between two")
print("# existing kmers. Note: there are 3*reflen possible mutations")
cols = ["kmer","reflen","nkmers","nedges","nerror_edges"]
if cov is not None: cols.extend(["cov","err_rate","est_bad_edges"])
print(",".join([str(x) for x in cols]))
for k in kmers:
kmers = set()
edges = set()
for i in range(len(s)-k):
kmers.add(s[i:i+k])
edges.add(s[i:i+k+1])
kmers.add(s[-k:])
err_edges = 0
pk = s[0:k] # first kmer
for i in range(1,len(s)-k+1):
nextb = s[i+k-1]
for c in "ACGT":
err_edges += (c != nextb and pk[1:]+c in kmers and pk+c not in edges)
pk = s[i:i+k]
cols = [k,len(s),len(kmers),len(edges),err_edges]
if cov is not None:
cols.extend([cov, seqn_err_rate,
est_num_of_added_edges(len(s),err_edges,cov,seqn_err_rate)])
print(",".join([str(x) for x in cols]))
|
import json
import flask
import markdown
from time import gmtime, strftime
from flask import Flask, request, redirect, url_for, g, render_template, flash, session, abort,make_response, Markup, send_from_directory,send_file
from werkzeug.utils import secure_filename
from random import randrange
import sys,os
from mimetypes import MimeTypes
import urllib
import binascii
from rauth import OAuth2Service
sys.path.append('sys/controller')
sys.path.append('sys/model')
from AuthorHelper import *
from DatabaseAdapter import *
from PostHelper import *
from RequestHelper import *
from CircleHelper import *
from PostController import *
from AuthorController import *
from RequestController import *
from CommentController import *
DEBUG = True
# create a new database obj
dbAdapter = DatabaseAdapter()
# connect
dbAdapter.connect()
dbAdapter.setAutoCommit()
ahelper = AuthorHelper(dbAdapter)
aController = AuthorController(dbAdapter)
# use the conneted dbAdapter to initialize postHelper obj
postHelper = PostHelper(dbAdapter)
postcontroller = PostController(dbAdapter)
#
reController = RequestController(dbAdapter)
#
circleHelper = CircleHelper(dbAdapter)
circleController = CircleController(dbAdapter)
#
commentController = CommentController(dbAdapter)
#Allowed file extensions
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config.from_object(__name__)
# add upload
UPLOAD_FOLDER='upload/image'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = os.urandom(24)
admin_id = '000000'
admin_name='admin'
admin_model = False
error = None
GITHUB_CLIENT_ID = '02b57f045e11c12db42c'
GITHUB_CLIENT_SECRET = 'b759b58460b2f81cfef696f7bf157be9460517f2'
github = OAuth2Service(
client_id=GITHUB_CLIENT_ID,
client_secret=GITHUB_CLIENT_SECRET,
name='github',
authorize_url='https://github.com/login/oauth/authorize',
access_token_url='https://github.com/login/oauth/access_token',
base_url='https://api.github.com/')
def flaskPostToJson():
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data != ''):
return json.loads(request.data)
else:
return json.loads(request.form.keys()[0])
# default path
@app.route('/', methods=['GET', 'POST'])
def root():
return redirect(url_for('login'))
@app.route('/<aid>', methods=['GET', 'POST'])
def author_view(aid):
if 'logged_in' in session and aid ==session['logged_id']:
username = session['logged_in']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return redirect(url_for('login'))
@app.route('/<aid>/profile',methods=['GET'])
def view_profile(aid):
return render_template('profile.html')
@app.route('/<aid>/profile/image/<imagename>',methods=['GET'])
def view_profile_image(aid,imagename):
print imagename
return send_from_directory(app.config['UPLOAD_FOLDER'],imagename, as_attachment=False)
@app.route('/<aid>/profile.json',methods=['GET'])
def get_profile(aid):
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
print re
if re != False:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/'))
@app.route('/<aid>/profile/change',methods=['POST'])
def change_profile(aid):
if 'logged_in' in session and aid ==session['logged_id']:
try:
keyword = request.args.get('type')
except KeyError:
return "Wrong URL",404
if keyword == "information":
gender=""
filename=""
email = request.form['email']
#parse optional information
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
file = request.files['profile_image']
filename = file.filename
print "--"+file.filename
except KeyError:
file =None
try:
gender = request.form['gender']
except KeyError:
gender = ""
if file!=None and filename!="":
filename = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,filename):
re = make_response("OK")
else:
re = make_response("Failed")
return re
elif keyword == "password":
new_pwd = request.form['register_pwd']
if ahelper.updatePasswordByAid(aid,new_pwd):
re = make_response("OK")
else:
re = make_response("Error")
return re
else:
return redirect(url_for('/'))
@app.route('/ajax/aid')
def getuid():
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_id'])
re.headers['Content-Type']='text/plain'
return re
@app.route('/ajax/author_name')
def getaname():
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_in'])
re.headers['Content-Type']='text/plain'
return re
# login page
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
authorName =request.form['username']
password =request.form['password']
json_str = ahelper.authorAuthenticate(authorName,password)
if json_str == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re ,200
else:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
if(session['logged_id']==admin_id):
session['admin_model']= admin_id;
return json_str,200
else:
if not session.get('oauth_state'):
session['oauth_state'] = binascii.hexlify(os.urandom(24))
authorize_url = github.get_authorize_url(scope='user,notifications', state=session.get('oauth_state'))
return render_template('header.html',authorize_url=authorize_url)
if "logged_in" in session:
aid = session['logged_id']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return render_template('header.html')
# register page
@app.route('/register', methods=['PUT', 'POST'])
def register():
if request.method == 'POST':
#parse require information
gender=""
email = request.form['email']
authorName=request.form['author_name']
password=request.form['register_pwd']
#parse optional information
file = request.files['profile_image']
print "--"+file.filename
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
gender = request.form['gender']
except KeyError:
gender = ""
aid_json = ahelper.addAuthor(authorName,password,nickName)
if aid_json == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
else:
aid = json.loads(aid_json)['aid']
session['logged_in'] = authorName
session['logged_id'] = aid
path =""
if(file!=None and file.filename!=""):
path = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,path) ==False:
abort(500)
return aid_json
return redirect(url_for('/'))
def save_image(aid,file):
filename = aid+"."+file.filename.rsplit('.', 1)[1]
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
return filename
@app.route('/image/view/<name>',methods=['GET'])
def view_imagin():
pass
@app.route('/<aid>/recommended_authorlist.json', methods=['GET'])
def authorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getRecommendedAuthor(aid)
return re
# search authors with keyword
@app.route('/<aid>/author/search',methods=['GET'])
def search_author(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('key')
except KeyError:
return redirect(url_for('/'))
if keyword!=None and keyword!="":
re = aController.searchAuthorByString(keyword)
return re
@app.route('/<aid>/authorlist.json',methods=['GET'])
def allauthorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getOtherAuthor(aid)
print re
return re
@app.route('/<aid>/circlelist.json',methods=['GET'])
def circleauthorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = circleController.getFriendList(aid)
print re
return re
@app.route('/<aid>/circle',methods=['GET'])
def render_circle_modal(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
return render_template('view_circles_modal.html')
@app.route('/<aid>/circle/delete',methods=['GET'])
def delete_friends(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('aid')
if circleController.deleteFriendOfAuthor(aid,keyword):
re =make_response("OK")
else:
re =make_response("Failed")
return re
except KeyError:
return redirect(url_for('/'))
@app.route('/<aid>/messages.json', methods=['GET'])
def messages(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
abort(404)
else:
jsonstring = reController.getAllRequestByAid(aid)
print jsonstring
return jsonstring
# logout
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(url_for('login'))
# make request
@app.route('/<aid>/author/request',methods=['GET'])
def addfriend(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
abort(400)
else:
try:
request_aid = request.args.get('recipient')
if reController.sendRequest(aid,request_aid) is True:
re = make_response("OK")
return re
else:
re = make_response("Existed")
return re
except KeyError:
return redirect(url_for(aid))
#accept request
@app.route('/<aid>/author/request/accept',methods=['GET'])
def accept_request(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
return redirect(url_for('/'))
else:
try:
accept_aid = request.args.get('sender')
if reController.acceptRequestFromSender(aid,accept_aid):
re = make_response("OK")
else:
re = make_response("Fail")
return re
except KeyError:
return redirect(url_for('aid'))
#accept request
@app.route('/<aid>/author/request/deny',methods=['GET'])
def deny_request(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
return redirect(url_for('/'))
else:
try:
deny_aid = request.args.get('sender')
if reController.deleteRequest(accept_aid,aid):
re = make_response("OK")
else:
re = make_response("Fail")
except KeyError:
return redirect(url_for('aid'))
@app.route('/author/<authorName>')
def renderStruct(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
return render_template('struct.html')
else:
return abort(404)
# get all the new posts that a specific author can view from the server
@app.route('/<aid>/pull/')
def getPostForAuthor(aid):
if ('logged_in' in session) and (session['logged_in'] == aid):
aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
post = postcontroller.getPost(aid)
return post,200
else:
return abort(404)
@app.route('/markdown',methods=['GET','POST'])
def index():
if request.method == 'POST':
content = request.form['postMarkDown']
content = Markup(markdown.markdown(content))
return render_template('markdown.html', **locals())
return render_template('markdown_input.html')
@app.route('/author/<aid>/post/<pid>/comments',methods=['GET','POST'])
def getAllCommentsForPost(aid,pid):
if ('logged_in' in session) and (session['logged_in'] == aid):
return commentController.getAllCommentsForPost(pid),200
def allowed_file(filename):
return '.' in filename and filename.rsplit('.' ,1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/test')
def test():
return render_template('upload_image.html')
@app.route('/upload',methods=['POST'])
def upload():
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))
return redirect(url_for('uploadImage',filename=filename))
@app.route('/uploads/<filename>')
def uploadImage(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
@app.route('/<authorName>/post/',methods=['PUT','POST'])
def uploadPostToServer(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
aid = session['logged_id']
#aid = ahelper.getAidByAuthorName(authorName)
postName = authorName
postObj = flaskPostToJson()
postTitle = postObj['title']
postMsg = postObj['message']
postType = postObj['type']
postPermission = postObj['permission']
postDate = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if aid == None:
return json.dumps({'status':False}),200
else:
newPost = Post(None,aid,postName,postDate,postTitle,postMsg,postType,postPermission)
result = postHelper.addPost(aid,postTitle,postMsg,postType,postPermission)
return json.dumps({'status':result}),200
else:
return abort(404)
'''
Retrive the posting permission information for a specific author by authorName
'''
@app.route('/<authorName>/post/getPermissionList/',methods=['GET'])
def getPermissionList(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
if request.method == 'GET':
aid = session['logged_id']
# Get the permission: friend or fof, from parameter
permission = request.args.get('option')
if permission == "friend" or permission == "friends":
friendlist = circleHelper.getFriendList(aid)
if friendlist != None:
return json.dumps(friendlist),200
elif permission == "fof":
fof = circleHelper.getFriendOfFriend(aid)
if fof != None:
return json.dumps(fof),200
else:
return "null",200
return "null",200
else:
return abort(404)
'''
Get all the comments for a specific post
'''
@app.route('/author/<aid>/post/<pid>/comment/',methods=['GET','PUT'])
def getCommentsForPost(aid,pid):
if('logged_id' in session) and (session['logged_id'] == aid):
result = commentController.getAllCommentsForPost(pid)
print result
return result,200
else:
return abort(404)
@app.route('/get_image/<authorName>/<path>')
def get_image(authorName,path):
if ('logged_in' in session):
path = 'upload/image/'+authorName+'/'+path
mime = MimeTypes()
url = urllib.pathname2url(path)
mime_type = mime.guess_type(url)
return send_file(path, mimetype=mime_type[0])
else:
return abort(404)
# get all the new posts that a specific author can view from the server
@app.route('/<authorName>/github/notification')
def getNotification(authorName):
authorToken = authorName + '_authToken'
if ('logged_in' in session) and (session['logged_in'] == authorName) and (authorToken in session):
# get author auth token
authorAuthToken=session[authorToken]
# get auth session
auth_session = github.get_session(token=authorAuthToken)
aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
r = auth_session.get('/notifications')
for i in range(0,len(r.json())):
postMsg=''
for key,value in r.json()[i].iteritems():
if key == "updated_at":
postMsg=postMsg+"updatet time: " + value +"\n"
print "updatet time: " + value
elif key == "subject":
for key1,value1 in value.iteritems():
if key1 == "url":
postMsg=postMsg+"update at: " + value1 +"\n"
print "update at: " + value1
elif key1 == "title":
postMsg=postMsg+"title :" + value1 +"\n"
print "title :" + value1
#newPost = Post(None,aid,None,'Github Notification',postMsg,'text','me')
#result = postHelper.addPost(aid,'Github Notification',postMsg,'text','me')
r = auth_session.put('/notifications')
return "a"
else:
return abort(404)
@app.route('/github/callback')
def callback():
code = request.args['code']
state = request.args['state'].encode('utf-8')
#if state!=session.get('oauth_state'):
#return render_template('header.html')
# get auth session
auth_session = github.get_auth_session(data={'code': code})
# get author name
r = auth_session.get('/user')
authorName = r.json()['login']
# store author token
authorToken = authorName + '_authToken'
session[authorToken] = auth_session.access_token
# try to register account
aid_json = ahelper.addAuthor(authorName,123,authorName)
if aid_json!= False:
aid = json.loads(aid_json)['aid']
session['logged_in'] = authorName
session['logged_id'] = aid
else:
# try to log in
json_str = ahelper.authorAuthenticate(authorName,123)
if json_str!=False:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
else:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
return redirect(url_for('login'))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
Fixed a minior bug in comments
import json
import flask
import markdown
from time import gmtime, strftime
from flask import Flask, request, redirect, url_for, g, render_template, flash, session, abort,make_response, Markup, send_from_directory,send_file
from werkzeug.utils import secure_filename
from random import randrange
import sys,os
from mimetypes import MimeTypes
import urllib
import binascii
from rauth import OAuth2Service
sys.path.append('sys/controller')
sys.path.append('sys/model')
from AuthorHelper import *
from DatabaseAdapter import *
from PostHelper import *
from RequestHelper import *
from CircleHelper import *
from PostController import *
from AuthorController import *
from RequestController import *
from CommentController import *
DEBUG = True
# create a new database obj
dbAdapter = DatabaseAdapter()
# connect
dbAdapter.connect()
dbAdapter.setAutoCommit()
ahelper = AuthorHelper(dbAdapter)
aController = AuthorController(dbAdapter)
# use the conneted dbAdapter to initialize postHelper obj
postHelper = PostHelper(dbAdapter)
postcontroller = PostController(dbAdapter)
#
reController = RequestController(dbAdapter)
#
circleHelper = CircleHelper(dbAdapter)
circleController = CircleController(dbAdapter)
#
commentController = CommentController(dbAdapter)
#Allowed file extensions
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config.from_object(__name__)
# add upload
UPLOAD_FOLDER='upload/image'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = os.urandom(24)
admin_id = '000000'
admin_name='admin'
admin_model = False
error = None
GITHUB_CLIENT_ID = '02b57f045e11c12db42c'
GITHUB_CLIENT_SECRET = 'b759b58460b2f81cfef696f7bf157be9460517f2'
github = OAuth2Service(
client_id=GITHUB_CLIENT_ID,
client_secret=GITHUB_CLIENT_SECRET,
name='github',
authorize_url='https://github.com/login/oauth/authorize',
access_token_url='https://github.com/login/oauth/access_token',
base_url='https://api.github.com/')
def flaskPostToJson():
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data != ''):
return json.loads(request.data)
else:
return json.loads(request.form.keys()[0])
# default path
@app.route('/', methods=['GET', 'POST'])
def root():
return redirect(url_for('login'))
@app.route('/<aid>', methods=['GET', 'POST'])
def author_view(aid):
if 'logged_in' in session and aid ==session['logged_id']:
username = session['logged_in']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return redirect(url_for('login'))
@app.route('/<aid>/profile',methods=['GET'])
def view_profile(aid):
return render_template('profile.html')
@app.route('/<aid>/profile/image/<imagename>',methods=['GET'])
def view_profile_image(aid,imagename):
print imagename
return send_from_directory(app.config['UPLOAD_FOLDER'],imagename, as_attachment=False)
@app.route('/<aid>/profile.json',methods=['GET'])
def get_profile(aid):
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
print re
if re != False:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/'))
@app.route('/<aid>/profile/change',methods=['POST'])
def change_profile(aid):
if 'logged_in' in session and aid ==session['logged_id']:
try:
keyword = request.args.get('type')
except KeyError:
return "Wrong URL",404
if keyword == "information":
gender=""
filename=""
email = request.form['email']
#parse optional information
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
file = request.files['profile_image']
filename = file.filename
print "--"+file.filename
except KeyError:
file =None
try:
gender = request.form['gender']
except KeyError:
gender = ""
if file!=None and filename!="":
filename = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,filename):
re = make_response("OK")
else:
re = make_response("Failed")
return re
elif keyword == "password":
new_pwd = request.form['register_pwd']
if ahelper.updatePasswordByAid(aid,new_pwd):
re = make_response("OK")
else:
re = make_response("Error")
return re
else:
return redirect(url_for('/'))
@app.route('/ajax/aid')
def getuid():
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_id'])
re.headers['Content-Type']='text/plain'
return re
@app.route('/ajax/author_name')
def getaname():
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_in'])
re.headers['Content-Type']='text/plain'
return re
# login page
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
authorName =request.form['username']
password =request.form['password']
json_str = ahelper.authorAuthenticate(authorName,password)
if json_str == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re ,200
else:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
if(session['logged_id']==admin_id):
session['admin_model']= admin_id;
return json_str,200
else:
if not session.get('oauth_state'):
session['oauth_state'] = binascii.hexlify(os.urandom(24))
authorize_url = github.get_authorize_url(scope='user,notifications', state=session.get('oauth_state'))
return render_template('header.html',authorize_url=authorize_url)
if "logged_in" in session:
aid = session['logged_id']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return render_template('header.html')
# register page
@app.route('/register', methods=['PUT', 'POST'])
def register():
if request.method == 'POST':
#parse require information
gender=""
email = request.form['email']
authorName=request.form['author_name']
password=request.form['register_pwd']
#parse optional information
file = request.files['profile_image']
print "--"+file.filename
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
gender = request.form['gender']
except KeyError:
gender = ""
aid_json = ahelper.addAuthor(authorName,password,nickName)
if aid_json == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
else:
aid = json.loads(aid_json)['aid']
session['logged_in'] = authorName
session['logged_id'] = aid
path =""
if(file!=None and file.filename!=""):
path = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,path) ==False:
abort(500)
return aid_json
return redirect(url_for('/'))
def save_image(aid,file):
filename = aid+"."+file.filename.rsplit('.', 1)[1]
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
return filename
@app.route('/image/view/<name>',methods=['GET'])
def view_imagin():
pass
@app.route('/<aid>/recommended_authorlist.json', methods=['GET'])
def authorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getRecommendedAuthor(aid)
return re
# search authors with keyword
@app.route('/<aid>/author/search',methods=['GET'])
def search_author(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('key')
except KeyError:
return redirect(url_for('/'))
if keyword!=None and keyword!="":
re = aController.searchAuthorByString(keyword)
return re
@app.route('/<aid>/authorlist.json',methods=['GET'])
def allauthorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getOtherAuthor(aid)
print re
return re
@app.route('/<aid>/circlelist.json',methods=['GET'])
def circleauthorlist(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = circleController.getFriendList(aid)
print re
return re
@app.route('/<aid>/circle',methods=['GET'])
def render_circle_modal(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
return render_template('view_circles_modal.html')
@app.route('/<aid>/circle/delete',methods=['GET'])
def delete_friends(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('aid')
if circleController.deleteFriendOfAuthor(aid,keyword):
re =make_response("OK")
else:
re =make_response("Failed")
return re
except KeyError:
return redirect(url_for('/'))
@app.route('/<aid>/messages.json', methods=['GET'])
def messages(aid):
if ('logged_in' not in session) or (aid !=session['logged_id']):
abort(404)
else:
jsonstring = reController.getAllRequestByAid(aid)
print jsonstring
return jsonstring
# logout
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(url_for('login'))
# make request
@app.route('/<aid>/author/request',methods=['GET'])
def addfriend(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
abort(400)
else:
try:
request_aid = request.args.get('recipient')
if reController.sendRequest(aid,request_aid) is True:
re = make_response("OK")
return re
else:
re = make_response("Existed")
return re
except KeyError:
return redirect(url_for(aid))
#accept request
@app.route('/<aid>/author/request/accept',methods=['GET'])
def accept_request(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
return redirect(url_for('/'))
else:
try:
accept_aid = request.args.get('sender')
if reController.acceptRequestFromSender(aid,accept_aid):
re = make_response("OK")
else:
re = make_response("Fail")
return re
except KeyError:
return redirect(url_for('aid'))
#accept request
@app.route('/<aid>/author/request/deny',methods=['GET'])
def deny_request(aid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
return redirect(url_for('/'))
else:
try:
deny_aid = request.args.get('sender')
if reController.deleteRequest(accept_aid,aid):
re = make_response("OK")
else:
re = make_response("Fail")
except KeyError:
return redirect(url_for('aid'))
@app.route('/author/<authorName>')
def renderStruct(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
return render_template('struct.html')
else:
return abort(404)
# get all the new posts that a specific author can view from the server
@app.route('/<aid>/pull/')
def getPostForAuthor(aid):
if ('logged_in' in session) and (session['logged_in'] == aid):
aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
post = postcontroller.getPost(aid)
return post,200
else:
return abort(404)
@app.route('/markdown',methods=['GET','POST'])
def index():
if request.method == 'POST':
content = request.form['postMarkDown']
content = Markup(markdown.markdown(content))
return render_template('markdown.html', **locals())
return render_template('markdown_input.html')
@app.route('/author/<aid>/post/<pid>/comments',methods=['GET','POST'])
def getAllCommentsForPost(aid,pid):
if ('logged_in' in session) and (session['logged_in'] == aid):
return commentController.getAllCommentsForPost(pid),200
def allowed_file(filename):
return '.' in filename and filename.rsplit('.' ,1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/test')
def test():
return render_template('upload_image.html')
@app.route('/upload',methods=['POST'])
def upload():
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))
return redirect(url_for('uploadImage',filename=filename))
@app.route('/uploads/<filename>')
def uploadImage(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
@app.route('/<authorName>/post/',methods=['PUT','POST'])
def uploadPostToServer(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
aid = session['logged_id']
#aid = ahelper.getAidByAuthorName(authorName)
postName = authorName
postObj = flaskPostToJson()
postTitle = postObj['title']
postMsg = postObj['message']
postType = postObj['type']
postPermission = postObj['permission']
postDate = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if aid == None:
return json.dumps({'status':False}),200
else:
newPost = Post(None,aid,postName,postDate,postTitle,postMsg,postType,postPermission)
result = postHelper.addPost(aid,postTitle,postMsg,postType,postPermission)
return json.dumps({'status':result}),200
else:
return abort(404)
'''
Retrive the posting permission information for a specific author by authorName
'''
@app.route('/<authorName>/post/getPermissionList/',methods=['GET'])
def getPermissionList(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
if request.method == 'GET':
aid = session['logged_id']
# Get the permission: friend or fof, from parameter
permission = request.args.get('option')
if permission == "friend" or permission == "friends":
friendlist = circleHelper.getFriendList(aid)
if friendlist != None:
return json.dumps(friendlist),200
elif permission == "fof":
fof = circleHelper.getFriendOfFriend(aid)
if fof != None:
return json.dumps(fof),200
else:
return "null",200
return "null",200
else:
return abort(404)
'''
Get all the comments for a specific post
'''
@app.route('/author/<aid>/posts/<pid>/comments/',methods=['GET','PUT'])
def getCommentsForPost(aid,pid):
if('logged_id' in session) and (session['logged_id'] == aid):
result = commentController.getAllCommentsForPost(pid)
print result
return result,200
else:
return abort(404)
@app.route('/get_image/<authorName>/<path>')
def get_image(authorName,path):
if ('logged_in' in session):
path = 'upload/image/'+authorName+'/'+path
mime = MimeTypes()
url = urllib.pathname2url(path)
mime_type = mime.guess_type(url)
return send_file(path, mimetype=mime_type[0])
else:
return abort(404)
# get all the new posts that a specific author can view from the server
@app.route('/<authorName>/github/notification')
def getNotification(authorName):
authorToken = authorName + '_authToken'
if ('logged_in' in session) and (session['logged_in'] == authorName) and (authorToken in session):
# get author auth token
authorAuthToken=session[authorToken]
# get auth session
auth_session = github.get_session(token=authorAuthToken)
aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
r = auth_session.get('/notifications')
for i in range(0,len(r.json())):
postMsg=''
for key,value in r.json()[i].iteritems():
if key == "updated_at":
postMsg=postMsg+"updatet time: " + value +"\n"
print "updatet time: " + value
elif key == "subject":
for key1,value1 in value.iteritems():
if key1 == "url":
postMsg=postMsg+"update at: " + value1 +"\n"
print "update at: " + value1
elif key1 == "title":
postMsg=postMsg+"title :" + value1 +"\n"
print "title :" + value1
#newPost = Post(None,aid,None,'Github Notification',postMsg,'text','me')
#result = postHelper.addPost(aid,'Github Notification',postMsg,'text','me')
r = auth_session.put('/notifications')
return "a"
else:
return abort(404)
@app.route('/github/callback')
def callback():
code = request.args['code']
state = request.args['state'].encode('utf-8')
#if state!=session.get('oauth_state'):
#return render_template('header.html')
# get auth session
auth_session = github.get_auth_session(data={'code': code})
# get author name
r = auth_session.get('/user')
authorName = r.json()['login']
# store author token
authorToken = authorName + '_authToken'
session[authorToken] = auth_session.access_token
# try to register account
aid_json = ahelper.addAuthor(authorName,123,authorName)
if aid_json!= False:
aid = json.loads(aid_json)['aid']
session['logged_in'] = authorName
session['logged_id'] = aid
else:
# try to log in
json_str = ahelper.authorAuthenticate(authorName,123)
if json_str!=False:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
else:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
return redirect(url_for('login'))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
#!/usr/bin/env python
import logging
import subprocess
import sys
import textwrap
import xmlrpclib
USAGE = 'Usage: year_in_review.py <YEAR>'
HEADER = 'year_in_review.py: find out what happened!'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
PRODUCTS = ['support.mozilla.org']
BZ_RESOLUTIONS = ['', 'FIXED', 'INVALID', 'WONTFIX', 'DUPLICATE',
'WORKSFORME', 'INCOMPLETE', 'SUPPORT', 'EXPIRED',
'MOVED']
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creator',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpclib.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpclib.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpclib.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpclib.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
defaults = {
'include_fields': BZ_FIELDS,
}
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
defaults.update(kwargs)
if 'ids' in defaults:
defaults['permissive'] = True
log.debug('Getting bugs with kwargs: %s', defaults)
bugs = self.Bug.get(defaults)
else:
log.debug('Searching bugs with kwargs: %s', defaults)
bugs = self.Bug.search(defaults)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.iteritems())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def print_bugzilla_stats(year):
stats = {}
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
# created in year
bugs = bugzilla.get_bugs(
product=PRODUCTS,
creation_time='%s-01-01' % year,
include_fields=['id', 'creator', 'creation_time'],
history=False,
comments=False)
bugs = bugs['bugs']
total = 0
creators = {}
for bug in bugs:
# We can only get creation_time >= somedate, so we need to nix
# the bugs that are after the year we're looking for.
if bug['creation_time'].year != int(year):
continue
total += 1
creators[bug['creator']] = creators.get(bug['creator'], 0) + 1
stats['created'] = total
creators = creators.items()
creators.sort(key=lambda item: item[1])
creators.reverse()
stats['created_by'] = creators[:10]
# resolved in year
bugs = bugzilla.get_bugs(
product=PRODUCTS,
last_change_time='%s-01-01' % year,
include_fields=['id', 'assigned_to', 'last_change_time', 'resolution'],
status=['RESOLVED', 'VERIFIED', 'CLOSED'],
history=True,
comments=False)
bugs = bugs['bugs']
total = 0
peeps = {}
resolutions = {}
for bug in bugs:
# We can only get last_change_time >= somedate, so we need to
# nix the bugs that are after the year we're looking for.
if bug['last_change_time'].year != int(year):
continue
for hist in bug['history']:
for change in hist['changes']:
if not change['field_name'] == 'resolution':
continue
# I think this history item comes from clearing the
# resolution. i.e. reopening.
if change['added'] == '':
continue
total += 1
# If the bug is marked FIXED, we assume that whoever
# it was assigned to should get the "credit". If it
# wasn't marked FIXED, then it's probably someone
# doing triage and so whoever changed the resolution
# should get "credit".
if (change['added'] == 'FIXED'
and not 'nobody' in bug['assigned_to']):
person = bug['assigned_to']
else:
person = hist['who']
peeps_dict = peeps.setdefault(person, {})
key = change['added']
peeps_dict[key] = peeps_dict.get(key, 0) + 1
resolutions[change['added']] = resolutions.get(
change['added'], 0) + 1
peeps = peeps.items()
peeps.sort(key=lambda item: sum(item[1].values()))
peeps.reverse()
stats['resolved'] = total
stats['resolved_people'] = peeps[:10]
resolutions = resolutions.items()
resolutions.sort(key=lambda item: item[1])
stats['resolved_resolutions'] = resolutions
print 'Bugs created:', stats['created']
print ''
for mem in stats['created_by']:
person = mem[0].split('@')[0]
print ' %20s : %s' % (person, mem[1])
print ''
print 'Bugs resolved:', stats['resolved']
print ''
for mem in stats['resolved_people']:
person = mem[0].split('@')[0]
print ' %20s : %d' % (person, sum(mem[1].values()))
for res, count in mem[1].items():
print ' %20s : %10s %d' % ('', res, count)
print ''
for mem in stats['resolved_resolutions']:
print ' %20s : %s' % (mem[0], mem[1])
def print_git_stats(year):
stats = {}
commits = subprocess.check_output(
['git', 'log',
'--after=%s-01-01' % year,
'--before=%s-01-01' % (int(year) + 1),
'--format=%an'])
commits = commits.splitlines()
stats['commits'] = len(commits)
committers = {}
for mem in commits:
committers[mem] = committers.get(mem, 0) + 1
committers = committers.items()
committers.sort(key=lambda item: item[1])
committers.reverse()
stats['committers'] = committers
print 'Total commits:', stats['commits']
print ''
for mem in stats['committers']:
print ' %20s : %s' % (mem[0], mem[1])
def print_header(text):
print ''
print text
print '=' * len(text)
print ''
def main(argv):
# XXX: This helps debug bugzilla xmlrpc bits.
# logging.basicConfig(level=logging.DEBUG)
if not argv:
print USAGE
print 'Error: Must specify the year. e.g. 2012'
return 1
year = argv[0]
print HEADER
print_header('Twas the year: %s' % year)
print_header('Bugzilla')
print_bugzilla_stats(year)
print_header('git')
print_git_stats(year)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
Update year-in-review script
* adds added, deleted and num files to git section
* adds tracker list to bugzilla section
* adds research list to bugzilla section
#!/usr/bin/env python
import logging
import subprocess
import sys
import textwrap
import xmlrpclib
USAGE = 'Usage: year_in_review.py <YEAR>'
HEADER = 'year_in_review.py: find out what happened!'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
PRODUCTS = [
'support.mozilla.org'
]
BZ_RESOLUTIONS = [
'',
'FIXED',
'INVALID',
'WONTFIX',
'DUPLICATE',
'WORKSFORME',
'INCOMPLETE',
'SUPPORT',
'EXPIRED',
'MOVED'
]
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creator',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpclib.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpclib.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpclib.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpclib.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
defaults = {
'include_fields': BZ_FIELDS,
}
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
defaults.update(kwargs)
if 'ids' in defaults:
defaults['permissive'] = True
log.debug('Getting bugs with kwargs: %s', defaults)
bugs = self.Bug.get(defaults)
else:
log.debug('Searching bugs with kwargs: %s', defaults)
bugs = self.Bug.search(defaults)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpclib.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.iteritems())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def print_bugzilla_stats(year):
stats = {}
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
# -------------------------------------------
# Bugs created this year
# -------------------------------------------
bugs = bugzilla.get_bugs(
product=PRODUCTS,
creation_time='%s-01-01' % year,
include_fields=['id', 'creator', 'creation_time'],
history=False,
comments=False)
bugs = bugs['bugs']
total = 0
creators = {}
for bug in bugs:
# We can only get creation_time >= somedate, so we need to nix
# the bugs that are after the year we're looking for.
if bug['creation_time'].year != int(year):
continue
total += 1
creators[bug['creator']] = creators.get(bug['creator'], 0) + 1
stats['created'] = total
creators = creators.items()
creators.sort(key=lambda item: item[1])
creators.reverse()
stats['created_by'] = creators[:10]
print ''
print 'Bugs created:', stats['created']
print ''
for mem in stats['created_by']:
person = mem[0].split('@')[0]
print ' %20s : %s' % (person, mem[1])
# -------------------------------------------
# Bugs resolved this year
# -------------------------------------------
bugs = bugzilla.get_bugs(
product=PRODUCTS,
last_change_time='%s-01-01' % year,
include_fields=['id', 'summary', 'assigned_to', 'last_change_time', 'resolution'],
status=['RESOLVED', 'VERIFIED', 'CLOSED'],
history=True,
comments=False)
bugs = bugs['bugs']
total = 0
peeps = {}
resolutions = {}
traceback_bugs = []
research_bugs = []
tracker_bugs = []
for bug in bugs:
# We can only get last_change_time >= somedate, so we need to
# nix the bugs that are after the year we're looking for.
if bug['last_change_time'].year != int(year):
continue
if bug['summary'].lower().startswith('[traceback]'):
traceback_bugs.append(bug)
if bug['summary'].lower().startswith('[research]'):
research_bugs.append(bug)
if bug['summary'].lower().startswith('[tracker]'):
tracker_bugs.append(bug)
for hist in bug['history']:
for change in hist['changes']:
if not change['field_name'] == 'resolution':
continue
# I think this history item comes from clearing the
# resolution. i.e. reopening.
if change['added'] == '':
continue
total += 1
# If the bug is marked FIXED, we assume that whoever
# it was assigned to should get the "credit". If it
# wasn't marked FIXED, then it's probably someone
# doing triage and so whoever changed the resolution
# should get "credit".
if (change['added'] == 'FIXED'
and not 'nobody' in bug['assigned_to']):
person = bug['assigned_to']
else:
person = hist['who']
peeps_dict = peeps.setdefault(person, {})
key = change['added']
peeps_dict[key] = peeps_dict.get(key, 0) + 1
resolutions[change['added']] = resolutions.get(
change['added'], 0) + 1
peeps = peeps.items()
peeps.sort(key=lambda item: sum(item[1].values()))
peeps.reverse()
stats['resolved'] = total
stats['resolved_people'] = peeps[:10]
resolutions = resolutions.items()
resolutions.sort(key=lambda item: item[1])
stats['resolved_resolutions'] = resolutions
print ''
print 'Bugs resolved:', stats['resolved']
print ''
for mem in stats['resolved_people']:
person = mem[0].split('@')[0]
print ' %20s : %d' % (person, sum(mem[1].values()))
for res, count in mem[1].items():
print ' %20s : %10s %d' % ('', res, count)
# -------------------------------------------
# Resolution stats
# -------------------------------------------
print ''
for mem in stats['resolved_resolutions']:
print ' %20s : %s' % (mem[0], mem[1])
# -------------------------------------------
# Research bugs
# -------------------------------------------
print ''
print 'Research bugs:', len(research_bugs)
print ''
for bug in research_bugs:
print '{0}: {1}'.format(bug['id'], bug['summary'])
# -------------------------------------------
# Trackers
# -------------------------------------------
print ''
print 'Tracker bugs:', len(tracker_bugs)
print ''
for bug in tracker_bugs:
print '{0}: {1}'.format(bug['id'], bug['summary'])
def git(*args):
return subprocess.check_output(args)
def print_git_stats(year):
# Get the shas for all the commits we're going to look at.
all_commits = subprocess.check_output([
'git', 'log',
'--after=%s-01-01' % year,
'--before=%s-01-01' % (int(year) + 1),
'--format=%H'
])
all_commits = all_commits.splitlines()
# Person -> # commits
committers = {}
# Person -> (# files changed, # inserted, # deleted)
changes = {}
for commit in all_commits:
author = git('git', 'log', '--format=%an',
'{0}~..{1}'.format(commit, commit))
author = author.strip()
# FIXME - this is lame. what's going on is that there are
# merge commits which have multiple authors, so we just grab
# the second one.
if '\n' in author:
author = author.splitlines()[1]
committers[author] = committers.get(author, 0) + 1
diff_data = git('git', 'diff', '--numstat', '--find-copies-harder',
'{0}~..{1}'.format(commit, commit))
total_added = 0
total_deleted = 0
total_files = 0
for line in diff_data.splitlines():
added, deleted, fn = line.split('\t')
if fn.startswith('vendor/'):
continue
if added != '-':
total_added += int(added)
if deleted != '-':
total_deleted += int(deleted)
total_files += 1
old_changes = changes.get(author, (0, 0, 0))
changes[author] = (
old_changes[0] + total_added,
old_changes[1] + total_deleted,
old_changes[2] + total_files
)
print 'Total commits:', len(all_commits)
print ''
committers = sorted(
committers.items(), key=lambda item: item[1], reverse=True)
for person, count in committers:
print ' %20s : %s (+%s, -%s, files %s)' % (
person, count, changes[person][0], changes[person][1], changes[person][2])
# This is goofy summing, but whatevs.
print ''
print 'Total lines added:', sum([item[0] for item in changes.values()])
print 'Total lines deleted:', sum([item[1] for item in changes.values()])
print 'Total files changed:', sum([item[2] for item in changes.values()])
def print_header(text):
print ''
print text
print '=' * len(text)
print ''
def main(argv):
# XXX: This helps debug bugzilla xmlrpc bits.
# logging.basicConfig(level=logging.DEBUG)
if not argv:
print USAGE
print 'Error: Must specify the year. e.g. 2012'
return 1
year = argv[0]
print HEADER
print_header('Twas the year: %s' % year)
print_header('Bugzilla')
print_bugzilla_stats(year)
print_header('git')
print_git_stats(year)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
#!/usr/bin/env python3
import sys
import re
import random
import argparse
def read_tab_file_handle_sorted(tab_file_handle, factor_index=0):
"""
Parse a tab file (sorted by a column) and return a generator
"""
previous_factor_id = ''
factor_tab_list = list()
# Reading tab file
for line in tab_file_handle:
#ignore header lines
if line.startswith('@'): continue
l = line.strip()
#ignore blanck lines
if not l: continue
tab = l.split()
current_factor = tab[factor_index]
# Yield the previous factor tab list
if current_factor != previous_factor_id:
if previous_factor_id:
yield factor_tab_list
factor_tab_list = list()
factor_tab_list.append(tab)
previous_factor_id = current_factor
# Yield the last tab list
yield factor_tab_list
# Close tab file
tab_file_handle.close()
def read_fasta_file_handle(fasta_file_handle):
"""
Parse a fasta file and return a generator
"""
# Variables initialization
header = ''
seqlines = list()
sequence_nb = 0
# Reading input file
for line in fasta_file_handle:
if line[0] == '>':
# Yield the last read header and sequence
if sequence_nb:
yield (header, ''.join(seqlines))
del seqlines[:]
# Get header
header = line[1:].rstrip()
sequence_nb += 1
else:
# Concatenate sequence
seqlines.append(line.strip())
# Yield the input file last sequence
yield (header, ''.join(seqlines))
# Close input file
fasta_file_handle.close()
def decode_cigar(cigar_values, current_pos):
"""
Return a list of positions (on the ref) covered by this CIGAR (read)
"""
covered_pos = set()
for c_val in cigar_values:
c_len = int(c_val[:-1])
c_letter = c_val[-1].upper()
if c_letter in ('M','X', '='):
start = current_pos
end = start + c_len - 1
covered_pos |= set(range(start, end + 1))
current_pos += c_len
elif c_letter in ('N', 'D'):
current_pos += c_len
return covered_pos
def split_cigar(cigar_string):
"""
Return all cigar's components
One cigar component is a number of any digit, followed by a letter or =
"""
cigar_pattern = re.compile("[\\d]+[a-zA-Z|=]")
cigar_elems = re.findall(cigar_pattern, cigar_string)
return cigar_elems
def compute_depth(reads_by_pos, discarded_reads=None):
if discarded_reads is None:
discarded_reads=[]
return [len([r for r in reads if r not in discarded_reads]) for reads in reads_by_pos]
def restrict_depth(reads_by_pos, threshold):
"""
Return the name of reads to be discarded to fulfill
the depth threshold
"""
discarded_reads = set()
for pos, reads in enumerate(reads_by_pos):
depth = len([r for r in reads if r not in discarded_reads])
while depth > threshold:
#randomly pick a read
read_id = random.choice(reads)
#pick a reads not previously picked
if read_id in discarded_reads: continue
discarded_reads.add(read_id)
depth = len([r for r in reads if r not in discarded_reads])
return discarded_reads
def get_reads_by_pos(alignment_tabs_list, ref_len):
"""
Take a list of alignments (SAM alignment lines)
and return foreach pos on the reference, the list
of reads constributing to this pos
"""
#reads_by_pos = [ set() for p in range(ref_len) ]
#do not use a set because the same exact read can be mapped at the same pos more than once
#see https://github.com/biocore/sortmerna/issues/137
#Init an empty list foreach pos of the reference
reads_by_pos = [ list() for p in range(ref_len) ]
for alignment_tab in alignment_tabs_list:
read_id = alignment_tab[0]
cigar = alignment_tab[5]
#get zero-based position
leftmost_mapping_pos = int(alignment_tab[3]) - 1
#only mapped reads are considered
if leftmost_mapping_pos < 0: continue
mapping_positions = decode_cigar(split_cigar(cigar), leftmost_mapping_pos)
for pos in mapping_positions:
reads_by_pos[pos].append(read_id)
return reads_by_pos
def sample_by_depth(sam_handler, fasta_ref_handler, threshold, sampled_out_sam_handler):
"""
sample the SAM file based on the depth at each pos
"""
ref_seq_dict = dict()
for header, seq in read_fasta_file_handle(fasta_ref_handler):
seqid = header.split()[0]
ref_seq_dict[seqid] = seq.upper()
# Reading sam file reference by reference
for alignment_tabs_list in read_tab_file_handle_sorted(sam_handler, 2):
ref_id = alignment_tabs_list[0][2]
ref_len = len(ref_seq_dict[ref_id])
#print("@SQ SN:%s LN:%s" % (ref_id, ref_len), file=sys.stderr)
reads_by_pos = get_reads_by_pos(alignment_tabs_list, ref_len)
reads_to_discard = restrict_depth(reads_by_pos, threshold)
for alignment_tab in alignment_tabs_list:
read_id = alignment_tab[0]
cigar = alignment_tab[5]
leftmost_mapping_pos = int(alignment_tab[3]) - 1
if read_id not in reads_to_discard:
print('{}\n'.format('\t'.join(alignment_tab)), file=sampled_out_sam_handler, end='')
if __name__ == '__main__':
# Arguments parsing
parser = argparse.ArgumentParser(description='')
# -i / --input_sam
parser.add_argument('-i', '--input_sam',
metavar='INSAM',
type=argparse.FileType('r'),
default='-',
help='Input sam file, sorted by reference and position')
# -o / --output_sam
parser.add_argument('-o', '--output_sam',
metavar='OUTSAM',
type=argparse.FileType('w'),
default='-',
help='Output filtered sam file')
# -r / --references
parser.add_argument('-r', '--references',
metavar='REF',
type=argparse.FileType('r'),
required=True,
help='References fasta file')
# -c / --cov_threshold
parser.add_argument('-c', '--cov_threshold',
metavar='COV',
type=float,
default=50,
help='Identity threshold. '
'Default is %(default)s')
args = parser.parse_args()
sample_by_depth(args.input_sam, args.references, args.cov_threshold, args.output_sam)
Fix the type of --cov_threshold argument #34
#!/usr/bin/env python3
import sys
import re
import random
import argparse
def read_tab_file_handle_sorted(tab_file_handle, factor_index=0):
"""
Parse a tab file (sorted by a column) and return a generator
"""
previous_factor_id = ''
factor_tab_list = list()
# Reading tab file
for line in tab_file_handle:
#ignore header lines
if line.startswith('@'): continue
l = line.strip()
#ignore blanck lines
if not l: continue
tab = l.split()
current_factor = tab[factor_index]
# Yield the previous factor tab list
if current_factor != previous_factor_id:
if previous_factor_id:
yield factor_tab_list
factor_tab_list = list()
factor_tab_list.append(tab)
previous_factor_id = current_factor
# Yield the last tab list
yield factor_tab_list
# Close tab file
tab_file_handle.close()
def read_fasta_file_handle(fasta_file_handle):
"""
Parse a fasta file and return a generator
"""
# Variables initialization
header = ''
seqlines = list()
sequence_nb = 0
# Reading input file
for line in fasta_file_handle:
if line[0] == '>':
# Yield the last read header and sequence
if sequence_nb:
yield (header, ''.join(seqlines))
del seqlines[:]
# Get header
header = line[1:].rstrip()
sequence_nb += 1
else:
# Concatenate sequence
seqlines.append(line.strip())
# Yield the input file last sequence
yield (header, ''.join(seqlines))
# Close input file
fasta_file_handle.close()
def decode_cigar(cigar_values, current_pos):
"""
Return a list of positions (on the ref) covered by this CIGAR (read)
"""
covered_pos = set()
for c_val in cigar_values:
c_len = int(c_val[:-1])
c_letter = c_val[-1].upper()
if c_letter in ('M','X', '='):
start = current_pos
end = start + c_len - 1
covered_pos |= set(range(start, end + 1))
current_pos += c_len
elif c_letter in ('N', 'D'):
current_pos += c_len
return covered_pos
def split_cigar(cigar_string):
"""
Return all cigar's components
One cigar component is a number of any digit, followed by a letter or =
"""
cigar_pattern = re.compile("[\\d]+[a-zA-Z|=]")
cigar_elems = re.findall(cigar_pattern, cigar_string)
return cigar_elems
def compute_depth(reads_by_pos, discarded_reads=None):
if discarded_reads is None:
discarded_reads=[]
return [len([r for r in reads if r not in discarded_reads]) for reads in reads_by_pos]
def restrict_depth(reads_by_pos, threshold):
"""
Return the name of reads to be discarded to fulfill
the depth threshold
"""
discarded_reads = set()
for pos, reads in enumerate(reads_by_pos):
depth = len([r for r in reads if r not in discarded_reads])
while depth > threshold:
#randomly pick a read
read_id = random.choice(reads)
#pick a reads not previously picked
if read_id in discarded_reads: continue
discarded_reads.add(read_id)
depth = len([r for r in reads if r not in discarded_reads])
return discarded_reads
def get_reads_by_pos(alignment_tabs_list, ref_len):
"""
Take a list of alignments (SAM alignment lines)
and return foreach pos on the reference, the list
of reads constributing to this pos
"""
#reads_by_pos = [ set() for p in range(ref_len) ]
#do not use a set because the same exact read can be mapped at the same pos more than once
#see https://github.com/biocore/sortmerna/issues/137
#Init an empty list foreach pos of the reference
reads_by_pos = [ list() for p in range(ref_len) ]
for alignment_tab in alignment_tabs_list:
read_id = alignment_tab[0]
cigar = alignment_tab[5]
#get zero-based position
leftmost_mapping_pos = int(alignment_tab[3]) - 1
#only mapped reads are considered
if leftmost_mapping_pos < 0: continue
mapping_positions = decode_cigar(split_cigar(cigar), leftmost_mapping_pos)
for pos in mapping_positions:
reads_by_pos[pos].append(read_id)
return reads_by_pos
def sample_by_depth(sam_handler, fasta_ref_handler, threshold, sampled_out_sam_handler):
"""
sample the SAM file based on the depth at each pos
"""
ref_seq_dict = dict()
for header, seq in read_fasta_file_handle(fasta_ref_handler):
seqid = header.split()[0]
ref_seq_dict[seqid] = seq.upper()
# Reading sam file reference by reference
for alignment_tabs_list in read_tab_file_handle_sorted(sam_handler, 2):
ref_id = alignment_tabs_list[0][2]
ref_len = len(ref_seq_dict[ref_id])
#print("@SQ SN:%s LN:%s" % (ref_id, ref_len), file=sys.stderr)
reads_by_pos = get_reads_by_pos(alignment_tabs_list, ref_len)
reads_to_discard = restrict_depth(reads_by_pos, threshold)
for alignment_tab in alignment_tabs_list:
read_id = alignment_tab[0]
cigar = alignment_tab[5]
leftmost_mapping_pos = int(alignment_tab[3]) - 1
if read_id not in reads_to_discard:
print('{}\n'.format('\t'.join(alignment_tab)), file=sampled_out_sam_handler, end='')
if __name__ == '__main__':
# Arguments parsing
parser = argparse.ArgumentParser(description='')
# -i / --input_sam
parser.add_argument('-i', '--input_sam',
metavar='INSAM',
type=argparse.FileType('r'),
default='-',
help='Input sam file, sorted by reference and position')
# -o / --output_sam
parser.add_argument('-o', '--output_sam',
metavar='OUTSAM',
type=argparse.FileType('w'),
default='-',
help='Output filtered sam file')
# -r / --references
parser.add_argument('-r', '--references',
metavar='REF',
type=argparse.FileType('r'),
required=True,
help='References fasta file')
# -c / --cov_threshold
parser.add_argument('-c', '--cov_threshold',
metavar='COV',
type=int,
default=500,
help='Identity threshold. '
'Default is %(default)s')
args = parser.parse_args()
sample_by_depth(args.input_sam, args.references, args.cov_threshold, args.output_sam)
|
from flask import Flask, request, render_template, make_response
from flask.ext.restful import Api, Resource, reqparse, abort
import json
import string
import random
from datetime import datetime
# define our priority levels
PRIORITIES = ( 'closed', 'low', 'normal', 'high' )
# load help requests data from disk
with open('data.jsonld') as data:
data = json.load(data)
#
# define some helper functions
#
def generate_id(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def error_if_helprequest_not_found(helprequest_id):
if helprequest_id not in data["helprequests"]:
message = "Help request {} doesn't exist".format(helprequest_id)
abort(404, message=message)
def filter_and_sort_helprequests(q='', sort_by='time'):
filter_function = lambda x: q.lower() in (
x[1]['title'] + x[1]['description']).lower()
filtered_helprequests = filter(filter_function,
data["helprequests"].items())
key_function = lambda x: x[1][sort_by]
return sorted(filtered_helprequests, key=key_function, reverse=True)
def render_helprequest_as_html(helprequest):
return render_template(
'helprequest+microdata+rdfa.html',
helprequest=helprequest,
priorities=reversed(list(enumerate(PRIORITIES))))
def render_helprequest_list_as_html(helprequests):
return render_template(
'helprequests.html',
helprequests=helprequests,
priorities=PRIORITIES)
def nonempty_string(x):
s = str(x)
if len(x) == 0:
raise ValueError('string is empty')
return s
#
# specify the data we need to create a new help request
#
new_helprequest_parser = reqparse.RequestParser()
for arg in ['from', 'title', 'description']:
new_helprequest_parser.add_argument(
arg, type=nonempty_string, required=True,
help="'{}' is a required value".format(arg))
#
# specify the data we need to update an existing help request
#
update_helprequest_parser = reqparse.RequestParser()
update_helprequest_parser.add_argument(
'priority', type=int, default=PRIORITIES.index('normal'))
update_helprequest_parser.add_argument(
'comment', type=str, default='')
#
# specify the parameters for filtering and sorting help requests
#
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'q', type=str, default='')
query_parser.add_argument(
'sort-by', type=str, choices=('priority', 'time'), default='time')
#
# define our (kinds of) resources
#
class HelpRequest(Resource):
def get(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
return make_response(
render_helprequest_as_html(
data["helprequests"][helprequest_id]), 200)
def patch(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
helprequest = data["helprequests"][helprequest_id]
update = update_helprequest_parser.parse_args()
helprequest['priority'] = update['priority']
if len(update['comment'].strip()) > 0:
helprequest.setdefault('comments', []).append(update['comment'])
return make_response(
render_helprequest_as_html(helprequest), 200)
class HelpRequestAsJSON(Resource):
def get(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
helprequest = data["helprequests"][helprequest_id]
helprequest["@context"] = data["@context"]
return helprequest
class HelpRequestList(Resource):
def get(self):
query = query_parser.parse_args()
return make_response(
render_helprequest_list_as_html(
filter_and_sort_helprequests(
q=query['q'], sort_by=query['sort-by'])), 200)
def post(self):
helprequest = new_helprequest_parser.parse_args()
helprequest['time'] = datetime.isoformat(datetime.now())
helprequest['priority'] = PRIORITIES.index('normal')
helprequests[generate_id()] = helprequest
return make_response(
render_helprequest_list_as_html(
filter_and_sort_helprequests()), 201)
class HelpRequestListAsJSON(Resource):
def get(self):
return data
#
# assign URL paths to our resources
#
app = Flask(__name__)
api = Api(app)
api.add_resource(HelpRequestList, '/requests')
api.add_resource(HelpRequestListAsJSON, '/requests.json')
api.add_resource(HelpRequest, '/request/<string:helprequest_id>')
api.add_resource(HelpRequestAsJSON, '/request/<string:helprequest_id>.json')
# start the server
if __name__ == '__main__':
app.run(debug=True)
run on port 5000 externally
from flask import Flask, request, render_template, make_response
from flask.ext.restful import Api, Resource, reqparse, abort
import json
import string
import random
from datetime import datetime
# define our priority levels
PRIORITIES = ( 'closed', 'low', 'normal', 'high' )
# load help requests data from disk
with open('data.jsonld') as data:
data = json.load(data)
#
# define some helper functions
#
def generate_id(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def error_if_helprequest_not_found(helprequest_id):
if helprequest_id not in data["helprequests"]:
message = "Help request {} doesn't exist".format(helprequest_id)
abort(404, message=message)
def filter_and_sort_helprequests(q='', sort_by='time'):
filter_function = lambda x: q.lower() in (
x[1]['title'] + x[1]['description']).lower()
filtered_helprequests = filter(filter_function,
data["helprequests"].items())
key_function = lambda x: x[1][sort_by]
return sorted(filtered_helprequests, key=key_function, reverse=True)
def render_helprequest_as_html(helprequest):
return render_template(
'helprequest+microdata+rdfa.html',
helprequest=helprequest,
priorities=reversed(list(enumerate(PRIORITIES))))
def render_helprequest_list_as_html(helprequests):
return render_template(
'helprequests.html',
helprequests=helprequests,
priorities=PRIORITIES)
def nonempty_string(x):
s = str(x)
if len(x) == 0:
raise ValueError('string is empty')
return s
#
# specify the data we need to create a new help request
#
new_helprequest_parser = reqparse.RequestParser()
for arg in ['from', 'title', 'description']:
new_helprequest_parser.add_argument(
arg, type=nonempty_string, required=True,
help="'{}' is a required value".format(arg))
#
# specify the data we need to update an existing help request
#
update_helprequest_parser = reqparse.RequestParser()
update_helprequest_parser.add_argument(
'priority', type=int, default=PRIORITIES.index('normal'))
update_helprequest_parser.add_argument(
'comment', type=str, default='')
#
# specify the parameters for filtering and sorting help requests
#
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'q', type=str, default='')
query_parser.add_argument(
'sort-by', type=str, choices=('priority', 'time'), default='time')
#
# define our (kinds of) resources
#
class HelpRequest(Resource):
def get(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
return make_response(
render_helprequest_as_html(
data["helprequests"][helprequest_id]), 200)
def patch(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
helprequest = data["helprequests"][helprequest_id]
update = update_helprequest_parser.parse_args()
helprequest['priority'] = update['priority']
if len(update['comment'].strip()) > 0:
helprequest.setdefault('comments', []).append(update['comment'])
return make_response(
render_helprequest_as_html(helprequest), 200)
class HelpRequestAsJSON(Resource):
def get(self, helprequest_id):
error_if_helprequest_not_found(helprequest_id)
helprequest = data["helprequests"][helprequest_id]
helprequest["@context"] = data["@context"]
return helprequest
class HelpRequestList(Resource):
def get(self):
query = query_parser.parse_args()
return make_response(
render_helprequest_list_as_html(
filter_and_sort_helprequests(
q=query['q'], sort_by=query['sort-by'])), 200)
def post(self):
helprequest = new_helprequest_parser.parse_args()
helprequest['time'] = datetime.isoformat(datetime.now())
helprequest['priority'] = PRIORITIES.index('normal')
helprequests[generate_id()] = helprequest
return make_response(
render_helprequest_list_as_html(
filter_and_sort_helprequests()), 201)
class HelpRequestListAsJSON(Resource):
def get(self):
return data
#
# assign URL paths to our resources
#
app = Flask(__name__)
api = Api(app)
api.add_resource(HelpRequestList, '/requests')
api.add_resource(HelpRequestListAsJSON, '/requests.json')
api.add_resource(HelpRequest, '/request/<string:helprequest_id>')
api.add_resource(HelpRequestAsJSON, '/request/<string:helprequest_id>.json')
# start the server
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
#!/usr/bin/env python
#######################################################################################################################
#
#
# Gray matter segmentation - new implementation
#
# ----------------------------------------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont
# Modified: 2016-06-14
#
# About the license: see the file LICENSE.TXT
########################################################################################################################
'''
INFORMATION:
The model used in this function is compound of:
- a dictionary: a list of slices of WM/GM contrasted images with their manual segmentations [slices.pklz]
- a model representing this dictionary in a reduced space (a PCA or an isomap model as implemented in sk-learn) [fitted_model.pklz]
- the dictionary data fitted to this model (i.e. in the model space) [fitted_data.pklz]
- the averaged median intensity in the white and gray matter in the model [intensities.pklz]
- an information file indicating which parameters were used to construct this model, and te date of computation [info.txt]
A constructed model is provided in the toolbox here: $PATH_SCT/data/gm_model.
It's made from T2* images of 80 subjects and computed with the parameters that gives the best gray matter segmentation results.
However you can compute you own model with your own data or with other parameters and use it to segment gray matter by using the flag -model path_new_gm_model/.
To do so, you should have a folder (path_to_dataset/) containing for each subject (with a folder per subject):
- a WM/GM contrasted image (for ex T2*-w) containing 'im' in its name
- a segmentation of the spinal cord containing 'seg' in its name
- a (or several) manual segmentation(s) of the gray matter containing 'gm' in its(their) name(s)
- a level file containing 'level' in its name : it can be an image containing a level label per slice indicating at wich vertebral level correspond this slice (usually obtained by registering the PAM50 template to the WM/GM contrasted image) or a text file indicating the level of each slice.
For more information on the parameters available to compute the model, type:
msct_multiatlas_seg -h
to compute the model, use the following command line :
msct_multiatlas_seg -path-data path_to_dataset/
Then use the folder gm_model/ (output from msct_multiatlas_seg) in this function the flag -model gm_model/
'''
from msct_multiatlas_seg import Param, ParamData, ParamModel, Model
from msct_gmseg_utils import pre_processing, register_data, apply_transfo, normalize_slice, average_gm_wm, binarize
from sct_utils import printv, tmp_create, extract_fname, add_suffix, slash_at_the_end, run
import sct_image
from sct_image import set_orientation
from msct_image import Image
from msct_parser import *
import sct_maths, sct_register_multimodal
from math import exp
import numpy as np
import shutil, os, sys, time
import msct_qc
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Segmentation of the white and gray matter.'
' The segmentation is based on a multi-atlas method that uses a dictionary of pre-segmented gray matter images (already included in SCT) and finds the most similar images for identifying the gray matter using label fusion approach. The model used by this method contains: a template of the white/gray matter segmentation along the cervical spinal cord, and a PCA reduced space to describe the variability of intensity in that template.'
' This method was inspired from [Asman et al., Medical Image Analysis 2014] and features the following additions:\n'
'- possibility to add information from vertebral levels for improved accuracy\n'
'- intensity normalization of the image to segment (allows the segmentation of any kind of contrast)\n'
'- pre-registration based on non-linear transformations')
parser.add_option(name="-i",
type_value="file",
description="Image to segment",
mandatory=True,
example='t2star.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation",
mandatory=True,
example='sc_seg.nii.gz')
parser.add_option(name="-vertfile",
type_value="file",
description='Labels of vertebral levels. This could either be an image (e.g., label/template/PAM50_levels.nii.gz) or a text file that specifies "slice,level" at each line. Example:\n'
"0,3\n"
"1,3\n"
"2,4\n"
"3,4\n"
"4,4\n",
mandatory=False,
default_value=ParamSeg().fname_level)
parser.add_option(name="-vert",
mandatory=False,
deprecated_by='-vertfile')
parser.add_option(name="-l",
mandatory=False,
deprecated_by='-vertfile')
parser.usage.addSection('SEGMENTATION OPTIONS')
parser.add_option(name="-denoising",
type_value='multiple_choice',
description="1: Adaptative denoising from F. Coupe algorithm, 0: no WARNING: It affects the model you should use (if denoising is applied to the target, the model should have been computed with denoising too)",
mandatory=False,
default_value=int(ParamData().denoising),
example=['0', '1'])
parser.add_option(name="-normalization",
type_value='multiple_choice',
description="Normalization of the target image's intensity using median intensity values of the WM and the GM, recomended with MT images or other types of contrast than T2*",
mandatory=False,
default_value=int(ParamData().normalization),
example=['0', '1'])
parser.add_option(name="-p",
type_value='str',
description="Registration parameters to register the image to segment on the model data. Use the same format as for sct_register_to_template and sct_register_multimodal.",
mandatory=False,
default_value=ParamData().register_param,
example='step=1,type=seg,algo=centermassrot,metric=MeanSquares,smooth=2,iter=1:step=2,type=seg,algo=columnwise,metric=MeanSquares,smooth=3,iter=1:step=3,type=seg,algo=bsplinesyn,metric=MeanSquares,iter=3')
parser.add_option(name="-w-levels",
type_value='float',
description="Weight parameter on the level differences to compute the similarities",
mandatory=False,
default_value=ParamSeg().weight_level,
example=2.0)
parser.add_option(name="-w-coordi",
type_value='float',
description="Weight parameter on the euclidean distance (based on images coordinates in the reduced sapce) to compute the similarities ",
mandatory=False,
default_value=ParamSeg().weight_coord,
example=0.005)
parser.add_option(name="-thr-sim",
type_value='float',
description="Threshold to select the dictionary slices most similar to the slice to segment (similarities are normalized to 1)",
mandatory=False,
default_value=ParamSeg().thr_similarity,
example=0.6)
parser.add_option(name="-model",
type_value="folder",
description="Path to the computed model",
mandatory=False,
example='/home/jdoe/gm_seg_model/')
parser.usage.addSection('\nOUTPUT OTIONS')
parser.add_option(name="-res-type",
type_value='multiple_choice',
description="Type of result segmentation : binary or probabilistic",
mandatory=False,
default_value=ParamSeg().type_seg,
example=['bin', 'prob'])
parser.add_option(name="-ratio",
type_value='multiple_choice',
description="Compute GM/WM CSA ratio by slice or by vertebral level (average across levels)",
mandatory=False,
default_value=ParamSeg().ratio,
example=['0', 'slice', 'level'])
parser.add_option(name="-ref",
type_value="file",
description="Reference segmentation of the gray matter for segmentation validation --> output Dice coefficient and Hausdorff's and median distances)",
mandatory=False,
example='manual_gm_seg.nii.gz')
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder",
mandatory=False,
default_value=ParamSeg().path_results,
example='gm_segmentation_results/')
parser.usage.addSection('MISC')
parser.add_option(name='-qc',
type_value='multiple_choice',
description='Output images for quality control.',
mandatory=False,
example=['0', '1'],
default_value=str(int(ParamSeg().qc)))
parser.add_option(name="-r",
type_value="multiple_choice",
description='Remove temporary files.',
mandatory=False,
default_value=str(int(Param().rm_tmp)),
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="Verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value=str(Param().verbose))
parser.add_option(name="-param-qc",
type_value=[[','], 'str'],
description=msct_qc.Qc_Params.get_qc_params_description(["ofolder", "autoview", "generate", "ncol", "thresh"]),
mandatory=False)
return parser
class ParamSeg:
def __init__(self):
self.fname_im = None
self.fname_im_original = None
self.fname_seg = None
self.fname_level = 'label/template/PAM50_levels.nii.gz'
self.fname_manual_gmseg = None
self.path_results = './'
# param to compute similarities:
self.weight_level = 2.5 # gamma
self.weight_coord = 0.0065 # tau --> need to be validated for specific dataset
self.thr_similarity = 0.0005 # epsilon but on normalized to 1 similarities (by slice of dic and slice of target)
# TODO = find the best thr
self.type_seg = 'prob' # 'prob' or 'bin'
self.ratio = '0' # '0', 'slice' or 'level'
self.qc = True
class SegmentGM:
def __init__(self, param_seg=None, param_model=None, param_data=None, param=None):
self.param_seg = param_seg if param_seg is not None else ParamSeg()
self.param_model = param_model if param_model is not None else ParamModel()
self.param_data = param_data if param_data is not None else ParamData()
self.param = param if param is not None else Param()
# create model:
self.model = Model(param_model=self.param_model, param_data=self.param_data, param=self.param)
# create tmp directory
self.tmp_dir = tmp_create(verbose=self.param.verbose) # path to tmp directory
self.target_im = None # list of slices
self.info_preprocessing = None # dic containing {'orientation': 'xxx', 'im_sc_seg_rpi': im, 'interpolated_images': [list of im = interpolated image data per slice]}
self.projected_target = None # list of coordinates of the target slices in the model reduced space
self.im_res_gmseg = None
self.im_res_wmseg = None
def segment(self):
self.copy_data_to_tmp()
# go to tmp directory
os.chdir(self.tmp_dir)
# load model
self.model.load_model()
self.target_im, self.info_preprocessing = pre_processing(self.param_seg.fname_im, self.param_seg.fname_seg, self.param_seg.fname_level, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, verbose=self.param.verbose, rm_tmp=self.param.rm_tmp)
printv('\nRegister target image to model data...', self.param.verbose, 'normal')
# register target image to model dictionary space
path_warp = self.register_target()
printv('\nNormalize intensity of target image...', self.param.verbose, 'normal')
self.normalize_target()
printv('\nProject target image into the model reduced space...', self.param.verbose, 'normal')
self.project_target()
printv('\nCompute similarities between target slices and model slices using model reduced space...', self.param.verbose, 'normal')
list_dic_indexes_by_slice = self.compute_similarities()
printv('\nLabel fusion of model slices most similar to target slices...', self.param.verbose, 'normal')
self.label_fusion(list_dic_indexes_by_slice)
printv('\nWarp back segmentation into image space...', self.param.verbose, 'normal')
self.warp_back_seg(path_warp)
printv('\nPost-processing...', self.param.verbose, 'normal')
self.im_res_gmseg, self.im_res_wmseg = self.post_processing()
if (self.param_seg.path_results != './') and (not os.path.exists('../'+self.param_seg.path_results)):
# create output folder
printv('\nCreate output folder ...', self.param.verbose, 'normal')
os.chdir('..')
os.mkdir(self.param_seg.path_results)
os.chdir(self.tmp_dir)
if self.param_seg.fname_manual_gmseg is not None:
# compute validation metrics
printv('\nCompute validation metrics...', self.param.verbose, 'normal')
self.validation()
if self.param_seg.ratio is not '0':
printv('\nCompute GM/WM CSA ratio...', self.param.verbose, 'normal')
self.compute_ratio()
# go back to original directory
os.chdir('..')
printv('\nSave resulting GM and WM segmentations...', self.param.verbose, 'normal')
fname_res_gmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_gmseg')
fname_res_wmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_wmseg')
self.im_res_gmseg.setFileName(fname_res_gmseg)
self.im_res_wmseg.setFileName(fname_res_wmseg)
self.im_res_gmseg.save()
self.im_res_wmseg.save()
# save quality control and print info
if self.param_seg.type_seg == 'bin':
wm_col = 'Red'
gm_col = 'Blue'
b = '0,1'
else:
wm_col = 'Blue-Lightblue'
gm_col = 'Red-Yellow'
b = '0.4,1'
if self.param_seg.qc:
# output QC image
printv('\nSave quality control images...', self.param.verbose, 'normal')
im = Image(self.tmp_dir+self.param_seg.fname_im)
im.save_quality_control(plane='axial', n_slices=5, seg=self.im_res_gmseg, thr=float(b.split(',')[0]), cmap_col='red-yellow', path_output=self.param_seg.path_results)
printv('\nDone! To view results, type:', self.param.verbose)
printv('fslview '+self.param_seg.fname_im_original+' '+fname_res_gmseg+' -b '+b+' -l '+gm_col+' -t 0.7 '+fname_res_wmseg+' -b '+b+' -l '+wm_col+' -t 0.7 & \n', self.param.verbose, 'info')
if self.param.rm_tmp:
# remove tmp_dir
shutil.rmtree(self.tmp_dir)
def copy_data_to_tmp(self):
# copy input image
if self.param_seg.fname_im is not None:
shutil.copy(self.param_seg.fname_im, self.tmp_dir)
self.param_seg.fname_im = ''.join(extract_fname(self.param_seg.fname_im)[1:])
else:
printv('ERROR: No input image', self.param.verbose, 'error')
# copy sc seg image
if self.param_seg.fname_seg is not None:
shutil.copy(self.param_seg.fname_seg, self.tmp_dir)
self.param_seg.fname_seg = ''.join(extract_fname(self.param_seg.fname_seg)[1:])
else:
printv('ERROR: No SC segmentation image', self.param.verbose, 'error')
# copy level file
if self.param_seg.fname_level is not None:
shutil.copy(self.param_seg.fname_level, self.tmp_dir)
self.param_seg.fname_level = ''.join(extract_fname(self.param_seg.fname_level)[1:])
if self.param_seg.fname_manual_gmseg is not None:
shutil.copy(self.param_seg.fname_manual_gmseg, self.tmp_dir)
self.param_seg.fname_manual_gmseg = ''.join(extract_fname(self.param_seg.fname_manual_gmseg)[1:])
def get_im_from_list(self, data):
im = Image(data)
# set pix dimension
im.hdr.structarr['pixdim'][1] = self.param_data.axial_res
im.hdr.structarr['pixdim'][2] = self.param_data.axial_res
# set the correct orientation
im.setFileName('im_to_orient.nii.gz')
im.save()
im = set_orientation(im, 'IRP')
im = set_orientation(im, 'PIL', data_inversion=True)
return im
def register_target(self):
# create dir to store warping fields
path_warping_fields = 'warp_target/'
if not os.path.exists(path_warping_fields):
os.mkdir(path_warping_fields)
# get 3D images from list of slices
im_dest = self.get_im_from_list(np.array([self.model.mean_image for target_slice in self.target_im]))
im_src = self.get_im_from_list(np.array([target_slice.im for target_slice in self.target_im]))
# register list of target slices on list of model mean image
im_src_reg, fname_src2dest, fname_dest2src = register_data(im_src, im_dest, param_reg=self.param_data.register_param, path_copy_warp=path_warping_fields, rm_tmp=self.param.rm_tmp)
# rename warping fields
fname_src2dest_save = 'warp_target2dic.nii.gz'
fname_dest2src_save = 'warp_dic2target.nii.gz'
shutil.move(path_warping_fields+fname_src2dest, path_warping_fields+fname_src2dest_save)
shutil.move(path_warping_fields+fname_dest2src, path_warping_fields+fname_dest2src_save)
#
for i, target_slice in enumerate(self.target_im):
# set moved image for each slice
target_slice.set(im_m=im_src_reg.data[i])
return path_warping_fields
def normalize_target(self):
# get gm seg from model by level
gm_seg_model, wm_seg_model = self.model.get_gm_wm_by_level()
# for each target slice: normalize
for target_slice in self.target_im:
level_int = int(round(target_slice.level))
if level_int not in self.model.intensities.index:
level_int = 0
norm_im_M = normalize_slice(target_slice.im_M, gm_seg_model[level_int], wm_seg_model[level_int], self.model.intensities['GM'][level_int], self.model.intensities['WM'][level_int], val_min=self.model.intensities['MIN'][level_int], val_max=self.model.intensities['MAX'][level_int])
target_slice.set(im_m=norm_im_M)
def project_target(self):
projected_target_slices = []
for target_slice in self.target_im:
# get slice data in the good shape
slice_data = target_slice.im_M.flatten()
slice_data = slice_data.reshape(1, -1) # data with single sample
# project slice data into the model
slice_data_projected = self.model.fitted_model.transform(slice_data)
projected_target_slices.append(slice_data_projected)
# store projected target slices
self.projected_target = projected_target_slices
def compute_similarities(self):
list_dic_indexes_by_slice = []
for i, target_coord in enumerate(self.projected_target):
list_dic_similarities = []
for j, dic_coord in enumerate(self.model.fitted_data):
# compute square norm using coordinates in the model space
square_norm = np.linalg.norm((target_coord - dic_coord), 2)
# compute similarity with or without levels
if self.param_seg.fname_level is not None:
# EQUATION WITH LEVELS
similarity = exp(-self.param_seg.weight_level * abs(self.target_im[i].level - self.model.slices[j].level)) * exp(-self.param_seg.weight_coord * square_norm)
else:
# EQUATION WITHOUT LEVELS
similarity = exp(-self.param_seg.weight_coord * square_norm)
# add similarity to list
list_dic_similarities.append(similarity)
list_norm_similarities = [float(s)/sum(list_dic_similarities) for s in list_dic_similarities]
# select indexes of most similar slices
list_dic_indexes = []
for j, norm_sim in enumerate(list_norm_similarities):
if norm_sim >= self.param_seg.thr_similarity:
list_dic_indexes.append(j)
# save list of indexes into list by slice
list_dic_indexes_by_slice.append(list_dic_indexes)
return list_dic_indexes_by_slice
def label_fusion(self, list_dic_indexes_by_slice):
for target_slice in self.target_im:
# get list of slices corresponding to the indexes
list_dic_slices = [self.model.slices[j] for j in list_dic_indexes_by_slice[target_slice.id]]
# average slices GM and WM
data_mean_gm, data_mean_wm = average_gm_wm(list_dic_slices)
if self.param_seg.type_seg == 'bin':
# binarize GM seg
data_mean_gm[data_mean_gm >= 0.5] = 1
data_mean_gm[data_mean_gm < 0.5] = 0
# binarize WM seg
data_mean_wm[data_mean_wm >= 0.5] = 1
data_mean_wm[data_mean_wm < 0.5] = 0
# store segmentation into target_im
target_slice.set(gm_seg_m=data_mean_gm, wm_seg_m=data_mean_wm)
def warp_back_seg(self, path_warp):
# get 3D images from list of slices
im_dest = self.get_im_from_list(np.array([target_slice.im for target_slice in self.target_im]))
im_src_gm = self.get_im_from_list(np.array([target_slice.gm_seg_M for target_slice in self.target_im]))
im_src_wm = self.get_im_from_list(np.array([target_slice.wm_seg_M for target_slice in self.target_im]))
#
fname_dic_space2slice_space = slash_at_the_end(path_warp, slash=1)+'warp_dic2target.nii.gz'
interpolation = 'nn' if self.param_seg.type_seg == 'bin' else 'linear'
# warp GM
im_src_gm_reg = apply_transfo(im_src_gm, im_dest, fname_dic_space2slice_space, interp=interpolation, rm_tmp=self.param.rm_tmp)
# warp WM
im_src_wm_reg = apply_transfo(im_src_wm, im_dest, fname_dic_space2slice_space, interp=interpolation, rm_tmp=self.param.rm_tmp)
for i, target_slice in enumerate(self.target_im):
# set GM and WM for each slice
target_slice.set(gm_seg=im_src_gm_reg.data[i], wm_seg=im_src_wm_reg.data[i])
def post_processing(self):
## DO INTERPOLATION BACK TO ORIGINAL IMAGE
# get original SC segmentation oriented in RPI
im_sc_seg_original_rpi = self.info_preprocessing['im_sc_seg_rpi'].copy()
nx_ref, ny_ref, nz_ref, nt_ref, px_ref, py_ref, pz_ref, pt_ref = im_sc_seg_original_rpi.dim
# create res GM seg image
im_res_gmseg = im_sc_seg_original_rpi.copy()
im_res_gmseg.data = np.zeros(im_res_gmseg.data.shape)
# create res WM seg image
im_res_wmseg = im_sc_seg_original_rpi.copy()
im_res_wmseg.data = np.zeros(im_res_wmseg.data.shape)
printv(' Interpolate result back into original space...', self.param.verbose, 'normal')
for iz, im_iz_preprocessed in enumerate(self.info_preprocessing['interpolated_images']):
# im gmseg for slice iz
im_gmseg = im_iz_preprocessed.copy()
im_gmseg.data = np.zeros(im_gmseg.data.shape)
im_gmseg.data = self.target_im[iz].gm_seg
# im wmseg for slice iz
im_wmseg = im_iz_preprocessed.copy()
im_wmseg.data = np.zeros(im_wmseg.data.shape)
im_wmseg.data = self.target_im[iz].wm_seg
for im_res_slice, im_res_tot in [(im_gmseg, im_res_gmseg), (im_wmseg, im_res_wmseg)]:
# get reference image for this slice
# (use only one slice to accelerate interpolation)
im_ref = im_sc_seg_original_rpi.copy()
im_ref.data = im_ref.data[:, :, iz]
im_ref.dim = (nx_ref, ny_ref, 1, nt_ref, px_ref, py_ref, pz_ref, pt_ref)
# correct reference header for this slice
[[x_0_ref, y_0_ref, z_0_ref]] = im_ref.transfo_pix2phys(coordi=[[0, 0, iz]])
im_ref.hdr.as_analyze_map()['qoffset_x'] = x_0_ref
im_ref.hdr.as_analyze_map()['qoffset_y'] = y_0_ref
im_ref.hdr.as_analyze_map()['qoffset_z'] = z_0_ref
im_ref.hdr.set_sform(im_ref.hdr.get_qform())
im_ref.hdr.set_qform(im_ref.hdr.get_qform())
# set im_res_slice header with im_sc_seg_original_rpi origin
im_res_slice.hdr.as_analyze_map()['qoffset_x'] = x_0_ref
im_res_slice.hdr.as_analyze_map()['qoffset_y'] = y_0_ref
im_res_slice.hdr.as_analyze_map()['qoffset_z'] = z_0_ref
im_res_slice.hdr.set_sform(im_res_slice.hdr.get_qform())
im_res_slice.hdr.set_qform(im_res_slice.hdr.get_qform())
# get physical coordinates of center of sc
x_seg, y_seg = (im_sc_seg_original_rpi.data[:, :, iz] > 0).nonzero()
x_center, y_center = np.mean(x_seg), np.mean(y_seg)
[[x_center_phys, y_center_phys, z_center_phys]] = im_sc_seg_original_rpi.transfo_pix2phys(coordi=[[x_center, y_center, iz]])
# get physical coordinates of center of square WITH im_res_slice WITH SAME ORIGIN AS im_sc_seg_original_rpi
sq_size_pix = int(self.param_data.square_size_size_mm / self.param_data.axial_res)
[[x_square_center_phys, y_square_center_phys, z_square_center_phys]] = im_res_slice.transfo_pix2phys(
coordi=[[int(sq_size_pix / 2), int(sq_size_pix / 2), 0]])
# set im_res_slice header by adding center of SC and center of square (in the correct space) to origin
im_res_slice.hdr.as_analyze_map()['qoffset_x'] += x_center_phys - x_square_center_phys
im_res_slice.hdr.as_analyze_map()['qoffset_y'] += y_center_phys - y_square_center_phys
im_res_slice.hdr.as_analyze_map()['qoffset_z'] += z_center_phys
im_res_slice.hdr.set_sform(im_res_slice.hdr.get_qform())
im_res_slice.hdr.set_qform(im_res_slice.hdr.get_qform())
# reshape data
im_res_slice.data = im_res_slice.data.reshape((sq_size_pix, sq_size_pix, 1))
# interpolate to reference image
interp = 0 if self.param_seg.type_seg == 'bin' else 1
im_res_slice_interp = im_res_slice.interpolate_from_image(im_ref, interpolation_mode=interp, border='nearest')
# set correct slice of total image with this slice
if len(im_res_slice_interp.data.shape) == 3:
shape_x, shape_y, shape_z = im_res_slice_interp.data.shape
im_res_slice_interp.data = im_res_slice_interp.data.reshape((shape_x, shape_y))
im_res_tot.data[:, :, iz] = im_res_slice_interp.data
printv(' Reorient resulting segmentations to native orientation...', self.param.verbose, 'normal')
## PUT RES BACK IN ORIGINAL ORIENTATION
im_res_gmseg.setFileName('res_gmseg.nii.gz')
im_res_gmseg.save()
im_res_gmseg = set_orientation(im_res_gmseg, self.info_preprocessing['orientation'])
im_res_wmseg.setFileName('res_wmseg.nii.gz')
im_res_wmseg.save()
im_res_wmseg = set_orientation(im_res_wmseg, self.info_preprocessing['orientation'])
return im_res_gmseg, im_res_wmseg
def validation(self):
tmp_dir_val = 'tmp_validation/'
if not os.path.exists(tmp_dir_val):
os.mkdir(tmp_dir_val)
# copy data into tmp dir val
shutil.copy(self.param_seg.fname_manual_gmseg, tmp_dir_val)
shutil.copy(self.param_seg.fname_seg, tmp_dir_val)
os.chdir(tmp_dir_val)
fname_manual_gmseg = ''.join(extract_fname(self.param_seg.fname_manual_gmseg)[1:])
fname_seg = ''.join(extract_fname(self.param_seg.fname_seg)[1:])
im_gmseg = self.im_res_gmseg.copy()
im_wmseg = self.im_res_wmseg.copy()
if self.param_seg.type_seg == 'prob':
im_gmseg = binarize(im_gmseg, thr_max=0.5, thr_min=0.5)
im_wmseg = binarize(im_wmseg, thr_max=0.5, thr_min=0.5)
fname_gmseg = 'res_gmseg.nii.gz'
im_gmseg.setFileName(fname_gmseg)
im_gmseg.save()
fname_wmseg = 'res_wmseg.nii.gz'
im_wmseg.setFileName(fname_wmseg)
im_wmseg.save()
# get manual WM seg:
fname_manual_wmseg = 'manual_wmseg.nii.gz'
sct_maths.main(args=['-i', fname_seg,
'-sub', fname_manual_gmseg,
'-o', fname_manual_wmseg])
## compute DC:
try:
status_gm, output_gm = run('sct_dice_coefficient -i ' + fname_manual_gmseg + ' -d ' + fname_gmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
status_wm, output_wm = run('sct_dice_coefficient -i ' + fname_manual_wmseg + ' -d ' + fname_wmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
except Exception:
# put ref and res in the same space if needed
fname_manual_gmseg_corrected = add_suffix(fname_manual_gmseg, '_reg')
sct_register_multimodal.main(args=['-i', fname_manual_gmseg,
'-d', fname_gmseg,
'-identity', '1'])
sct_maths.main(args=['-i', fname_manual_gmseg_corrected,
'-bin', '0.1',
'-o', fname_manual_gmseg_corrected])
#
fname_manual_wmseg_corrected = add_suffix(fname_manual_wmseg, '_reg')
sct_register_multimodal.main(args=['-i', fname_manual_wmseg,
'-d', fname_wmseg,
'-identity', '1'])
sct_maths.main(args=['-i', fname_manual_wmseg_corrected,
'-bin', '0.1',
'-o', fname_manual_wmseg_corrected])
# recompute DC
status_gm, output_gm = run('sct_dice_coefficient -i ' + fname_manual_gmseg_corrected + ' -d ' + fname_gmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
status_wm, output_wm = run('sct_dice_coefficient -i ' + fname_manual_wmseg_corrected + ' -d ' + fname_wmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
# save results to a text file
fname_dc = 'dice_coefficient_' + sct.extract_fname(self.param_seg.fname_im)[1] + '.txt'
file_dc = open(fname_dc, 'w')
if self.param_seg.type_seg == 'prob':
file_dc.write('WARNING : the probabilistic segmentations were binarized with a threshold at 0.5 to compute the dice coefficient \n')
file_dc.write('\n--------------------------------------------------------------\nDice coefficient on the Gray Matter segmentation:\n')
file_dc.write(output_gm)
file_dc.write('\n\n--------------------------------------------------------------\nDice coefficient on the White Matter segmentation:\n')
file_dc.write(output_wm)
file_dc.close()
## compute HD and MD:
fname_hd = 'hausdorff_dist_' + sct.extract_fname(self.param_seg.fname_im)[1] + '.txt'
run('sct_compute_hausdorff_distance -i ' + fname_gmseg + ' -d ' + fname_manual_gmseg + ' -thinning 1 -o ' + fname_hd + ' -v ' + str(self.param.verbose))
# get out of tmp dir to copy results to output folder
os.chdir('../..')
shutil.copy(self.tmp_dir+tmp_dir_val+'/'+fname_dc, self.param_seg.path_results)
shutil.copy(self.tmp_dir + tmp_dir_val + '/' + fname_hd, self.param_seg.path_results)
os.chdir(self.tmp_dir)
if self.param.rm_tmp:
shutil.rmtree(tmp_dir_val)
def compute_ratio(self):
type_ratio = self.param_seg.ratio
tmp_dir_ratio = 'tmp_ratio/'
os.mkdir(tmp_dir_ratio)
os.chdir(tmp_dir_ratio)
fname_gmseg = self.im_res_gmseg.absolutepath
fname_wmseg = self.im_res_wmseg.absolutepath
self.im_res_gmseg.save()
self.im_res_wmseg.save()
if self.im_res_gmseg.orientation is not 'RPI':
im_res_gmseg = set_orientation(self.im_res_gmseg, 'RPI')
im_res_wmseg = set_orientation(self.im_res_wmseg, 'RPI')
fname_gmseg = im_res_gmseg.absolutepath
fname_wmseg = im_res_wmseg.absolutepath
#sct_process_segmentation.main(['-i', fname_gmseg, '-p', 'csa', '-ofolder', 'gm_csa'])
run('sct_process_segmentation -i ' + fname_gmseg + ' -p csa -ofolder gm_csa')
#sct_process_segmentation.main(['-i', fname_wmseg, '-p', 'csa', '-ofolder', 'wm_csa'])
run('sct_process_segmentation -i ' + fname_wmseg + ' -p csa -ofolder wm_csa')
gm_csa = open('gm_csa/csa_per_slice.txt', 'r')
wm_csa = open('wm_csa/csa_per_slice.txt', 'r')
gm_csa_lines = gm_csa.readlines()
wm_csa_lines = wm_csa.readlines()
gm_csa.close()
wm_csa.close()
fname_ratio = 'ratio_by_'+type_ratio+'.txt'
file_ratio = open(fname_ratio, 'w')
file_ratio.write(type_ratio + ', ratio GM/WM CSA\n')
csa_gm_wm_by_level = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: [], 13: [], 14: [], 15: [], 16: [], 17: [], 18: [], 19: [], 20: [], 21: [], 22: [], 23: [], 24: []}
for gm_line, wm_line in zip(gm_csa_lines[1:], wm_csa_lines[1:]):
i, gm_area, gm_angle = gm_line.split(',')
j, wm_area, wm_angle = wm_line.split(',')
assert i == j
if type_ratio == 'level':
level_slice = int(self.target_im[int(i)].level)
csa_gm_wm_by_level[level_slice].append((float(gm_area), float(wm_area)))
else:
file_ratio.write(i + ', ' + str(float(gm_area) / float(wm_area)) + '\n')
if type_ratio == 'level':
for l, gm_wm_list in sorted(csa_gm_wm_by_level.items()):
if str(gm_wm_list) != '[]':
csa_gm_list = []
csa_wm_list = []
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = np.mean(csa_gm_list)
csa_wm = np.mean(csa_wm_list)
file_ratio.write(str(l) + ', ' + str(csa_gm / csa_wm) + '\n')
file_ratio.close()
shutil.copy(fname_ratio, '../../'+self.param_seg.path_results+'/'+fname_ratio)
os.chdir('..')
########################################################################################################################
# ------------------------------------------------------ MAIN ------------------------------------------------------- #
########################################################################################################################
def main(args=None):
if args is None:
args = sys.argv[1:]
# create param objects
param_seg = ParamSeg()
param_data = ParamData()
param_model = ParamModel()
param = Param()
# get parser
parser = get_parser()
arguments = parser.parse(args)
# set param arguments ad inputted by user
param_seg.fname_im = arguments["-i"]
param_seg.fname_im_original = arguments["-i"]
param_seg.fname_seg = arguments["-s"]
if '-vertfile' in arguments:
param_seg.fname_level = arguments['-vertfile']
if '-denoising' in arguments:
param_data.denoising = bool(int(arguments['-denoising']))
if '-normalization' in arguments:
param_data.normalization = arguments['-normalization']
if '-p' in arguments:
param_data.register_param = arguments['-p']
if '-w-levels' in arguments:
param_seg.weight_level = arguments['-w-levels']
if '-w-coordi' in arguments:
param_seg.weight_coord = arguments['-w-coordi']
if '-thr-sim' in arguments:
param_seg.thr_similarity = arguments['-thr-sim']
if '-model' in arguments:
param_model.path_model_to_load = os.path.abspath(arguments['-model'])
if '-res-type' in arguments:
param_seg.type_seg= arguments['-res-type']
if '-ratio' in arguments:
param_seg.ratio = arguments['-ratio']
if '-ref' in arguments:
param_seg.fname_manual_gmseg = arguments['-ref']
if '-ofolder' in arguments:
param_seg.path_results= arguments['-ofolder']
if '-qc' in arguments:
param_seg.qc = bool(int(arguments['-qc']))
if '-r' in arguments:
param.rm_tmp= bool(int(arguments['-r']))
if '-v' in arguments:
param.verbose= arguments['-v']
if not os.path.isfile(param_seg.fname_level):
param_seg.fname_level = None
# parse parameters
# TODO refactor
fname_in = param_seg.fname_im_original
seg_gm = SegmentGM(param_seg=param_seg, param_data=param_data, param_model=param_model, param=param)
start = time.time()
seg_gm.segment()
end = time.time()
t = end - start
# Decode the parameters of -param-qc, verification done here because if name of param-qc changes, easier to change here
qcParams = None
if '-param-qc' in arguments:
qcParams = msct_qc.Qc_Params(arguments['-param-qc'])
# Need to verify in the case that "generate" arg is provided and means false else we will generate qc
if qcParams is None or qcParams.generate_report is True:
printv("\nPreparing QC Report...\n")
# There are no way to get the name easily this is why this is hard coded...
# TODO: find a way to get the name
output_filename = param_seg.fname_im_original.split(".")[0]+"_gmseg.nii.gz"
# Qc_Report generates and contains the useful infos for qc generation
qcReport = msct_qc.Qc_Report("sct_segment_graymatter", qcParams, sys.argv[1:], parser.usage.description)
@msct_qc.Qc(qcReport, action_list=[msct_qc.Qc.sequential_seg, msct_qc.Qc.colorbar])
def grayseg_qc(sct_slice, nb_column, thr = 0.5):
"""
:param sct_slice:
:param nb_column:
:param thr: threshold to apply to the segmentation
:return:
"""
# Chosen axe to generate image
img, seg = sct_slice.mosaic(nb_column=nb_column)
seg[seg < thr] = 0
return img, seg
# the wrapped function
grayseg_qc( msct_qc.axial(fname_in, output_filename),qcReport.qc_params.nb_column)
printv('Done in ' + str(int(round(t / 60))) + ' min, ' + str(round(t % 60,1)) + ' sec', param.verbose, 'info')
if __name__ == "__main__":
main()
Ajout threshold
#!/usr/bin/env python
#######################################################################################################################
#
#
# Gray matter segmentation - new implementation
#
# ----------------------------------------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont
# Modified: 2016-06-14
#
# About the license: see the file LICENSE.TXT
########################################################################################################################
'''
INFORMATION:
The model used in this function is compound of:
- a dictionary: a list of slices of WM/GM contrasted images with their manual segmentations [slices.pklz]
- a model representing this dictionary in a reduced space (a PCA or an isomap model as implemented in sk-learn) [fitted_model.pklz]
- the dictionary data fitted to this model (i.e. in the model space) [fitted_data.pklz]
- the averaged median intensity in the white and gray matter in the model [intensities.pklz]
- an information file indicating which parameters were used to construct this model, and te date of computation [info.txt]
A constructed model is provided in the toolbox here: $PATH_SCT/data/gm_model.
It's made from T2* images of 80 subjects and computed with the parameters that gives the best gray matter segmentation results.
However you can compute you own model with your own data or with other parameters and use it to segment gray matter by using the flag -model path_new_gm_model/.
To do so, you should have a folder (path_to_dataset/) containing for each subject (with a folder per subject):
- a WM/GM contrasted image (for ex T2*-w) containing 'im' in its name
- a segmentation of the spinal cord containing 'seg' in its name
- a (or several) manual segmentation(s) of the gray matter containing 'gm' in its(their) name(s)
- a level file containing 'level' in its name : it can be an image containing a level label per slice indicating at wich vertebral level correspond this slice (usually obtained by registering the PAM50 template to the WM/GM contrasted image) or a text file indicating the level of each slice.
For more information on the parameters available to compute the model, type:
msct_multiatlas_seg -h
to compute the model, use the following command line :
msct_multiatlas_seg -path-data path_to_dataset/
Then use the folder gm_model/ (output from msct_multiatlas_seg) in this function the flag -model gm_model/
'''
from msct_multiatlas_seg import Param, ParamData, ParamModel, Model
from msct_gmseg_utils import pre_processing, register_data, apply_transfo, normalize_slice, average_gm_wm, binarize
from sct_utils import printv, tmp_create, extract_fname, add_suffix, slash_at_the_end, run
import sct_image
from sct_image import set_orientation
from msct_image import Image
from msct_parser import *
import sct_maths, sct_register_multimodal
from math import exp
import numpy as np
import shutil, os, sys, time
import msct_qc
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Segmentation of the white and gray matter.'
' The segmentation is based on a multi-atlas method that uses a dictionary of pre-segmented gray matter images (already included in SCT) and finds the most similar images for identifying the gray matter using label fusion approach. The model used by this method contains: a template of the white/gray matter segmentation along the cervical spinal cord, and a PCA reduced space to describe the variability of intensity in that template.'
' This method was inspired from [Asman et al., Medical Image Analysis 2014] and features the following additions:\n'
'- possibility to add information from vertebral levels for improved accuracy\n'
'- intensity normalization of the image to segment (allows the segmentation of any kind of contrast)\n'
'- pre-registration based on non-linear transformations')
parser.add_option(name="-i",
type_value="file",
description="Image to segment",
mandatory=True,
example='t2star.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation",
mandatory=True,
example='sc_seg.nii.gz')
parser.add_option(name="-vertfile",
type_value="file",
description='Labels of vertebral levels. This could either be an image (e.g., label/template/PAM50_levels.nii.gz) or a text file that specifies "slice,level" at each line. Example:\n'
"0,3\n"
"1,3\n"
"2,4\n"
"3,4\n"
"4,4\n",
mandatory=False,
default_value=ParamSeg().fname_level)
parser.add_option(name="-vert",
mandatory=False,
deprecated_by='-vertfile')
parser.add_option(name="-l",
mandatory=False,
deprecated_by='-vertfile')
parser.usage.addSection('SEGMENTATION OPTIONS')
parser.add_option(name="-denoising",
type_value='multiple_choice',
description="1: Adaptative denoising from F. Coupe algorithm, 0: no WARNING: It affects the model you should use (if denoising is applied to the target, the model should have been computed with denoising too)",
mandatory=False,
default_value=int(ParamData().denoising),
example=['0', '1'])
parser.add_option(name="-normalization",
type_value='multiple_choice',
description="Normalization of the target image's intensity using median intensity values of the WM and the GM, recomended with MT images or other types of contrast than T2*",
mandatory=False,
default_value=int(ParamData().normalization),
example=['0', '1'])
parser.add_option(name="-p",
type_value='str',
description="Registration parameters to register the image to segment on the model data. Use the same format as for sct_register_to_template and sct_register_multimodal.",
mandatory=False,
default_value=ParamData().register_param,
example='step=1,type=seg,algo=centermassrot,metric=MeanSquares,smooth=2,iter=1:step=2,type=seg,algo=columnwise,metric=MeanSquares,smooth=3,iter=1:step=3,type=seg,algo=bsplinesyn,metric=MeanSquares,iter=3')
parser.add_option(name="-w-levels",
type_value='float',
description="Weight parameter on the level differences to compute the similarities",
mandatory=False,
default_value=ParamSeg().weight_level,
example=2.0)
parser.add_option(name="-w-coordi",
type_value='float',
description="Weight parameter on the euclidean distance (based on images coordinates in the reduced sapce) to compute the similarities ",
mandatory=False,
default_value=ParamSeg().weight_coord,
example=0.005)
parser.add_option(name="-thr-sim",
type_value='float',
description="Threshold to select the dictionary slices most similar to the slice to segment (similarities are normalized to 1)",
mandatory=False,
default_value=ParamSeg().thr_similarity,
example=0.6)
parser.add_option(name="-model",
type_value="folder",
description="Path to the computed model",
mandatory=False,
example='/home/jdoe/gm_seg_model/')
parser.usage.addSection('\nOUTPUT OTIONS')
parser.add_option(name="-res-type",
type_value='multiple_choice',
description="Type of result segmentation : binary or probabilistic",
mandatory=False,
default_value=ParamSeg().type_seg,
example=['bin', 'prob'])
parser.add_option(name="-ratio",
type_value='multiple_choice',
description="Compute GM/WM CSA ratio by slice or by vertebral level (average across levels)",
mandatory=False,
default_value=ParamSeg().ratio,
example=['0', 'slice', 'level'])
parser.add_option(name="-ref",
type_value="file",
description="Reference segmentation of the gray matter for segmentation validation --> output Dice coefficient and Hausdorff's and median distances)",
mandatory=False,
example='manual_gm_seg.nii.gz')
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder",
mandatory=False,
default_value=ParamSeg().path_results,
example='gm_segmentation_results/')
parser.usage.addSection('MISC')
parser.add_option(name='-qc',
type_value='multiple_choice',
description='Output images for quality control.',
mandatory=False,
example=['0', '1'],
default_value=str(int(ParamSeg().qc)))
parser.add_option(name="-r",
type_value="multiple_choice",
description='Remove temporary files.',
mandatory=False,
default_value=str(int(Param().rm_tmp)),
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="Verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value=str(Param().verbose))
parser.add_option(name="-param-qc",
type_value=[[','], 'str'],
description=msct_qc.Qc_Params.get_qc_params_description(["ofolder", "autoview", "generate", "ncol", "thresh"]),
mandatory=False)
return parser
class ParamSeg:
def __init__(self):
self.fname_im = None
self.fname_im_original = None
self.fname_seg = None
self.fname_level = 'label/template/PAM50_levels.nii.gz'
self.fname_manual_gmseg = None
self.path_results = './'
# param to compute similarities:
self.weight_level = 2.5 # gamma
self.weight_coord = 0.0065 # tau --> need to be validated for specific dataset
self.thr_similarity = 0.0005 # epsilon but on normalized to 1 similarities (by slice of dic and slice of target)
# TODO = find the best thr
self.type_seg = 'prob' # 'prob' or 'bin'
self.ratio = '0' # '0', 'slice' or 'level'
self.qc = True
class SegmentGM:
def __init__(self, param_seg=None, param_model=None, param_data=None, param=None):
self.param_seg = param_seg if param_seg is not None else ParamSeg()
self.param_model = param_model if param_model is not None else ParamModel()
self.param_data = param_data if param_data is not None else ParamData()
self.param = param if param is not None else Param()
# create model:
self.model = Model(param_model=self.param_model, param_data=self.param_data, param=self.param)
# create tmp directory
self.tmp_dir = tmp_create(verbose=self.param.verbose) # path to tmp directory
self.target_im = None # list of slices
self.info_preprocessing = None # dic containing {'orientation': 'xxx', 'im_sc_seg_rpi': im, 'interpolated_images': [list of im = interpolated image data per slice]}
self.projected_target = None # list of coordinates of the target slices in the model reduced space
self.im_res_gmseg = None
self.im_res_wmseg = None
def segment(self):
self.copy_data_to_tmp()
# go to tmp directory
os.chdir(self.tmp_dir)
# load model
self.model.load_model()
self.target_im, self.info_preprocessing = pre_processing(self.param_seg.fname_im, self.param_seg.fname_seg, self.param_seg.fname_level, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, verbose=self.param.verbose, rm_tmp=self.param.rm_tmp)
printv('\nRegister target image to model data...', self.param.verbose, 'normal')
# register target image to model dictionary space
path_warp = self.register_target()
printv('\nNormalize intensity of target image...', self.param.verbose, 'normal')
self.normalize_target()
printv('\nProject target image into the model reduced space...', self.param.verbose, 'normal')
self.project_target()
printv('\nCompute similarities between target slices and model slices using model reduced space...', self.param.verbose, 'normal')
list_dic_indexes_by_slice = self.compute_similarities()
printv('\nLabel fusion of model slices most similar to target slices...', self.param.verbose, 'normal')
self.label_fusion(list_dic_indexes_by_slice)
printv('\nWarp back segmentation into image space...', self.param.verbose, 'normal')
self.warp_back_seg(path_warp)
printv('\nPost-processing...', self.param.verbose, 'normal')
self.im_res_gmseg, self.im_res_wmseg = self.post_processing()
if (self.param_seg.path_results != './') and (not os.path.exists('../'+self.param_seg.path_results)):
# create output folder
printv('\nCreate output folder ...', self.param.verbose, 'normal')
os.chdir('..')
os.mkdir(self.param_seg.path_results)
os.chdir(self.tmp_dir)
if self.param_seg.fname_manual_gmseg is not None:
# compute validation metrics
printv('\nCompute validation metrics...', self.param.verbose, 'normal')
self.validation()
if self.param_seg.ratio is not '0':
printv('\nCompute GM/WM CSA ratio...', self.param.verbose, 'normal')
self.compute_ratio()
# go back to original directory
os.chdir('..')
printv('\nSave resulting GM and WM segmentations...', self.param.verbose, 'normal')
fname_res_gmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_gmseg')
fname_res_wmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_wmseg')
self.im_res_gmseg.setFileName(fname_res_gmseg)
self.im_res_wmseg.setFileName(fname_res_wmseg)
self.im_res_gmseg.save()
self.im_res_wmseg.save()
# save quality control and print info
if self.param_seg.type_seg == 'bin':
wm_col = 'Red'
gm_col = 'Blue'
b = '0,1'
else:
wm_col = 'Blue-Lightblue'
gm_col = 'Red-Yellow'
b = '0.4,1'
if self.param_seg.qc:
# output QC image
printv('\nSave quality control images...', self.param.verbose, 'normal')
im = Image(self.tmp_dir+self.param_seg.fname_im)
im.save_quality_control(plane='axial', n_slices=5, seg=self.im_res_gmseg, thr=float(b.split(',')[0]), cmap_col='red-yellow', path_output=self.param_seg.path_results)
printv('\nDone! To view results, type:', self.param.verbose)
printv('fslview '+self.param_seg.fname_im_original+' '+fname_res_gmseg+' -b '+b+' -l '+gm_col+' -t 0.7 '+fname_res_wmseg+' -b '+b+' -l '+wm_col+' -t 0.7 & \n', self.param.verbose, 'info')
if self.param.rm_tmp:
# remove tmp_dir
shutil.rmtree(self.tmp_dir)
def copy_data_to_tmp(self):
# copy input image
if self.param_seg.fname_im is not None:
shutil.copy(self.param_seg.fname_im, self.tmp_dir)
self.param_seg.fname_im = ''.join(extract_fname(self.param_seg.fname_im)[1:])
else:
printv('ERROR: No input image', self.param.verbose, 'error')
# copy sc seg image
if self.param_seg.fname_seg is not None:
shutil.copy(self.param_seg.fname_seg, self.tmp_dir)
self.param_seg.fname_seg = ''.join(extract_fname(self.param_seg.fname_seg)[1:])
else:
printv('ERROR: No SC segmentation image', self.param.verbose, 'error')
# copy level file
if self.param_seg.fname_level is not None:
shutil.copy(self.param_seg.fname_level, self.tmp_dir)
self.param_seg.fname_level = ''.join(extract_fname(self.param_seg.fname_level)[1:])
if self.param_seg.fname_manual_gmseg is not None:
shutil.copy(self.param_seg.fname_manual_gmseg, self.tmp_dir)
self.param_seg.fname_manual_gmseg = ''.join(extract_fname(self.param_seg.fname_manual_gmseg)[1:])
def get_im_from_list(self, data):
im = Image(data)
# set pix dimension
im.hdr.structarr['pixdim'][1] = self.param_data.axial_res
im.hdr.structarr['pixdim'][2] = self.param_data.axial_res
# set the correct orientation
im.setFileName('im_to_orient.nii.gz')
im.save()
im = set_orientation(im, 'IRP')
im = set_orientation(im, 'PIL', data_inversion=True)
return im
def register_target(self):
# create dir to store warping fields
path_warping_fields = 'warp_target/'
if not os.path.exists(path_warping_fields):
os.mkdir(path_warping_fields)
# get 3D images from list of slices
im_dest = self.get_im_from_list(np.array([self.model.mean_image for target_slice in self.target_im]))
im_src = self.get_im_from_list(np.array([target_slice.im for target_slice in self.target_im]))
# register list of target slices on list of model mean image
im_src_reg, fname_src2dest, fname_dest2src = register_data(im_src, im_dest, param_reg=self.param_data.register_param, path_copy_warp=path_warping_fields, rm_tmp=self.param.rm_tmp)
# rename warping fields
fname_src2dest_save = 'warp_target2dic.nii.gz'
fname_dest2src_save = 'warp_dic2target.nii.gz'
shutil.move(path_warping_fields+fname_src2dest, path_warping_fields+fname_src2dest_save)
shutil.move(path_warping_fields+fname_dest2src, path_warping_fields+fname_dest2src_save)
#
for i, target_slice in enumerate(self.target_im):
# set moved image for each slice
target_slice.set(im_m=im_src_reg.data[i])
return path_warping_fields
def normalize_target(self):
# get gm seg from model by level
gm_seg_model, wm_seg_model = self.model.get_gm_wm_by_level()
# for each target slice: normalize
for target_slice in self.target_im:
level_int = int(round(target_slice.level))
if level_int not in self.model.intensities.index:
level_int = 0
norm_im_M = normalize_slice(target_slice.im_M, gm_seg_model[level_int], wm_seg_model[level_int], self.model.intensities['GM'][level_int], self.model.intensities['WM'][level_int], val_min=self.model.intensities['MIN'][level_int], val_max=self.model.intensities['MAX'][level_int])
target_slice.set(im_m=norm_im_M)
def project_target(self):
projected_target_slices = []
for target_slice in self.target_im:
# get slice data in the good shape
slice_data = target_slice.im_M.flatten()
slice_data = slice_data.reshape(1, -1) # data with single sample
# project slice data into the model
slice_data_projected = self.model.fitted_model.transform(slice_data)
projected_target_slices.append(slice_data_projected)
# store projected target slices
self.projected_target = projected_target_slices
def compute_similarities(self):
list_dic_indexes_by_slice = []
for i, target_coord in enumerate(self.projected_target):
list_dic_similarities = []
for j, dic_coord in enumerate(self.model.fitted_data):
# compute square norm using coordinates in the model space
square_norm = np.linalg.norm((target_coord - dic_coord), 2)
# compute similarity with or without levels
if self.param_seg.fname_level is not None:
# EQUATION WITH LEVELS
similarity = exp(-self.param_seg.weight_level * abs(self.target_im[i].level - self.model.slices[j].level)) * exp(-self.param_seg.weight_coord * square_norm)
else:
# EQUATION WITHOUT LEVELS
similarity = exp(-self.param_seg.weight_coord * square_norm)
# add similarity to list
list_dic_similarities.append(similarity)
list_norm_similarities = [float(s)/sum(list_dic_similarities) for s in list_dic_similarities]
# select indexes of most similar slices
list_dic_indexes = []
for j, norm_sim in enumerate(list_norm_similarities):
if norm_sim >= self.param_seg.thr_similarity:
list_dic_indexes.append(j)
# save list of indexes into list by slice
list_dic_indexes_by_slice.append(list_dic_indexes)
return list_dic_indexes_by_slice
def label_fusion(self, list_dic_indexes_by_slice):
for target_slice in self.target_im:
# get list of slices corresponding to the indexes
list_dic_slices = [self.model.slices[j] for j in list_dic_indexes_by_slice[target_slice.id]]
# average slices GM and WM
data_mean_gm, data_mean_wm = average_gm_wm(list_dic_slices)
if self.param_seg.type_seg == 'bin':
# binarize GM seg
data_mean_gm[data_mean_gm >= 0.5] = 1
data_mean_gm[data_mean_gm < 0.5] = 0
# binarize WM seg
data_mean_wm[data_mean_wm >= 0.5] = 1
data_mean_wm[data_mean_wm < 0.5] = 0
# store segmentation into target_im
target_slice.set(gm_seg_m=data_mean_gm, wm_seg_m=data_mean_wm)
def warp_back_seg(self, path_warp):
# get 3D images from list of slices
im_dest = self.get_im_from_list(np.array([target_slice.im for target_slice in self.target_im]))
im_src_gm = self.get_im_from_list(np.array([target_slice.gm_seg_M for target_slice in self.target_im]))
im_src_wm = self.get_im_from_list(np.array([target_slice.wm_seg_M for target_slice in self.target_im]))
#
fname_dic_space2slice_space = slash_at_the_end(path_warp, slash=1)+'warp_dic2target.nii.gz'
interpolation = 'nn' if self.param_seg.type_seg == 'bin' else 'linear'
# warp GM
im_src_gm_reg = apply_transfo(im_src_gm, im_dest, fname_dic_space2slice_space, interp=interpolation, rm_tmp=self.param.rm_tmp)
# warp WM
im_src_wm_reg = apply_transfo(im_src_wm, im_dest, fname_dic_space2slice_space, interp=interpolation, rm_tmp=self.param.rm_tmp)
for i, target_slice in enumerate(self.target_im):
# set GM and WM for each slice
target_slice.set(gm_seg=im_src_gm_reg.data[i], wm_seg=im_src_wm_reg.data[i])
def post_processing(self):
## DO INTERPOLATION BACK TO ORIGINAL IMAGE
# get original SC segmentation oriented in RPI
im_sc_seg_original_rpi = self.info_preprocessing['im_sc_seg_rpi'].copy()
nx_ref, ny_ref, nz_ref, nt_ref, px_ref, py_ref, pz_ref, pt_ref = im_sc_seg_original_rpi.dim
# create res GM seg image
im_res_gmseg = im_sc_seg_original_rpi.copy()
im_res_gmseg.data = np.zeros(im_res_gmseg.data.shape)
# create res WM seg image
im_res_wmseg = im_sc_seg_original_rpi.copy()
im_res_wmseg.data = np.zeros(im_res_wmseg.data.shape)
printv(' Interpolate result back into original space...', self.param.verbose, 'normal')
for iz, im_iz_preprocessed in enumerate(self.info_preprocessing['interpolated_images']):
# im gmseg for slice iz
im_gmseg = im_iz_preprocessed.copy()
im_gmseg.data = np.zeros(im_gmseg.data.shape)
im_gmseg.data = self.target_im[iz].gm_seg
# im wmseg for slice iz
im_wmseg = im_iz_preprocessed.copy()
im_wmseg.data = np.zeros(im_wmseg.data.shape)
im_wmseg.data = self.target_im[iz].wm_seg
for im_res_slice, im_res_tot in [(im_gmseg, im_res_gmseg), (im_wmseg, im_res_wmseg)]:
# get reference image for this slice
# (use only one slice to accelerate interpolation)
im_ref = im_sc_seg_original_rpi.copy()
im_ref.data = im_ref.data[:, :, iz]
im_ref.dim = (nx_ref, ny_ref, 1, nt_ref, px_ref, py_ref, pz_ref, pt_ref)
# correct reference header for this slice
[[x_0_ref, y_0_ref, z_0_ref]] = im_ref.transfo_pix2phys(coordi=[[0, 0, iz]])
im_ref.hdr.as_analyze_map()['qoffset_x'] = x_0_ref
im_ref.hdr.as_analyze_map()['qoffset_y'] = y_0_ref
im_ref.hdr.as_analyze_map()['qoffset_z'] = z_0_ref
im_ref.hdr.set_sform(im_ref.hdr.get_qform())
im_ref.hdr.set_qform(im_ref.hdr.get_qform())
# set im_res_slice header with im_sc_seg_original_rpi origin
im_res_slice.hdr.as_analyze_map()['qoffset_x'] = x_0_ref
im_res_slice.hdr.as_analyze_map()['qoffset_y'] = y_0_ref
im_res_slice.hdr.as_analyze_map()['qoffset_z'] = z_0_ref
im_res_slice.hdr.set_sform(im_res_slice.hdr.get_qform())
im_res_slice.hdr.set_qform(im_res_slice.hdr.get_qform())
# get physical coordinates of center of sc
x_seg, y_seg = (im_sc_seg_original_rpi.data[:, :, iz] > 0).nonzero()
x_center, y_center = np.mean(x_seg), np.mean(y_seg)
[[x_center_phys, y_center_phys, z_center_phys]] = im_sc_seg_original_rpi.transfo_pix2phys(coordi=[[x_center, y_center, iz]])
# get physical coordinates of center of square WITH im_res_slice WITH SAME ORIGIN AS im_sc_seg_original_rpi
sq_size_pix = int(self.param_data.square_size_size_mm / self.param_data.axial_res)
[[x_square_center_phys, y_square_center_phys, z_square_center_phys]] = im_res_slice.transfo_pix2phys(
coordi=[[int(sq_size_pix / 2), int(sq_size_pix / 2), 0]])
# set im_res_slice header by adding center of SC and center of square (in the correct space) to origin
im_res_slice.hdr.as_analyze_map()['qoffset_x'] += x_center_phys - x_square_center_phys
im_res_slice.hdr.as_analyze_map()['qoffset_y'] += y_center_phys - y_square_center_phys
im_res_slice.hdr.as_analyze_map()['qoffset_z'] += z_center_phys
im_res_slice.hdr.set_sform(im_res_slice.hdr.get_qform())
im_res_slice.hdr.set_qform(im_res_slice.hdr.get_qform())
# reshape data
im_res_slice.data = im_res_slice.data.reshape((sq_size_pix, sq_size_pix, 1))
# interpolate to reference image
interp = 0 if self.param_seg.type_seg == 'bin' else 1
im_res_slice_interp = im_res_slice.interpolate_from_image(im_ref, interpolation_mode=interp, border='nearest')
# set correct slice of total image with this slice
if len(im_res_slice_interp.data.shape) == 3:
shape_x, shape_y, shape_z = im_res_slice_interp.data.shape
im_res_slice_interp.data = im_res_slice_interp.data.reshape((shape_x, shape_y))
im_res_tot.data[:, :, iz] = im_res_slice_interp.data
printv(' Reorient resulting segmentations to native orientation...', self.param.verbose, 'normal')
## PUT RES BACK IN ORIGINAL ORIENTATION
im_res_gmseg.setFileName('res_gmseg.nii.gz')
im_res_gmseg.save()
im_res_gmseg = set_orientation(im_res_gmseg, self.info_preprocessing['orientation'])
im_res_wmseg.setFileName('res_wmseg.nii.gz')
im_res_wmseg.save()
im_res_wmseg = set_orientation(im_res_wmseg, self.info_preprocessing['orientation'])
return im_res_gmseg, im_res_wmseg
def validation(self):
tmp_dir_val = 'tmp_validation/'
if not os.path.exists(tmp_dir_val):
os.mkdir(tmp_dir_val)
# copy data into tmp dir val
shutil.copy(self.param_seg.fname_manual_gmseg, tmp_dir_val)
shutil.copy(self.param_seg.fname_seg, tmp_dir_val)
os.chdir(tmp_dir_val)
fname_manual_gmseg = ''.join(extract_fname(self.param_seg.fname_manual_gmseg)[1:])
fname_seg = ''.join(extract_fname(self.param_seg.fname_seg)[1:])
im_gmseg = self.im_res_gmseg.copy()
im_wmseg = self.im_res_wmseg.copy()
if self.param_seg.type_seg == 'prob':
im_gmseg = binarize(im_gmseg, thr_max=0.5, thr_min=0.5)
im_wmseg = binarize(im_wmseg, thr_max=0.5, thr_min=0.5)
fname_gmseg = 'res_gmseg.nii.gz'
im_gmseg.setFileName(fname_gmseg)
im_gmseg.save()
fname_wmseg = 'res_wmseg.nii.gz'
im_wmseg.setFileName(fname_wmseg)
im_wmseg.save()
# get manual WM seg:
fname_manual_wmseg = 'manual_wmseg.nii.gz'
sct_maths.main(args=['-i', fname_seg,
'-sub', fname_manual_gmseg,
'-o', fname_manual_wmseg])
## compute DC:
try:
status_gm, output_gm = run('sct_dice_coefficient -i ' + fname_manual_gmseg + ' -d ' + fname_gmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
status_wm, output_wm = run('sct_dice_coefficient -i ' + fname_manual_wmseg + ' -d ' + fname_wmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
except Exception:
# put ref and res in the same space if needed
fname_manual_gmseg_corrected = add_suffix(fname_manual_gmseg, '_reg')
sct_register_multimodal.main(args=['-i', fname_manual_gmseg,
'-d', fname_gmseg,
'-identity', '1'])
sct_maths.main(args=['-i', fname_manual_gmseg_corrected,
'-bin', '0.1',
'-o', fname_manual_gmseg_corrected])
#
fname_manual_wmseg_corrected = add_suffix(fname_manual_wmseg, '_reg')
sct_register_multimodal.main(args=['-i', fname_manual_wmseg,
'-d', fname_wmseg,
'-identity', '1'])
sct_maths.main(args=['-i', fname_manual_wmseg_corrected,
'-bin', '0.1',
'-o', fname_manual_wmseg_corrected])
# recompute DC
status_gm, output_gm = run('sct_dice_coefficient -i ' + fname_manual_gmseg_corrected + ' -d ' + fname_gmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
status_wm, output_wm = run('sct_dice_coefficient -i ' + fname_manual_wmseg_corrected + ' -d ' + fname_wmseg + ' -2d-slices 2',error_exit='warning', raise_exception=True)
# save results to a text file
fname_dc = 'dice_coefficient_' + sct.extract_fname(self.param_seg.fname_im)[1] + '.txt'
file_dc = open(fname_dc, 'w')
if self.param_seg.type_seg == 'prob':
file_dc.write('WARNING : the probabilistic segmentations were binarized with a threshold at 0.5 to compute the dice coefficient \n')
file_dc.write('\n--------------------------------------------------------------\nDice coefficient on the Gray Matter segmentation:\n')
file_dc.write(output_gm)
file_dc.write('\n\n--------------------------------------------------------------\nDice coefficient on the White Matter segmentation:\n')
file_dc.write(output_wm)
file_dc.close()
## compute HD and MD:
fname_hd = 'hausdorff_dist_' + sct.extract_fname(self.param_seg.fname_im)[1] + '.txt'
run('sct_compute_hausdorff_distance -i ' + fname_gmseg + ' -d ' + fname_manual_gmseg + ' -thinning 1 -o ' + fname_hd + ' -v ' + str(self.param.verbose))
# get out of tmp dir to copy results to output folder
os.chdir('../..')
shutil.copy(self.tmp_dir+tmp_dir_val+'/'+fname_dc, self.param_seg.path_results)
shutil.copy(self.tmp_dir + tmp_dir_val + '/' + fname_hd, self.param_seg.path_results)
os.chdir(self.tmp_dir)
if self.param.rm_tmp:
shutil.rmtree(tmp_dir_val)
def compute_ratio(self):
type_ratio = self.param_seg.ratio
tmp_dir_ratio = 'tmp_ratio/'
os.mkdir(tmp_dir_ratio)
os.chdir(tmp_dir_ratio)
fname_gmseg = self.im_res_gmseg.absolutepath
fname_wmseg = self.im_res_wmseg.absolutepath
self.im_res_gmseg.save()
self.im_res_wmseg.save()
if self.im_res_gmseg.orientation is not 'RPI':
im_res_gmseg = set_orientation(self.im_res_gmseg, 'RPI')
im_res_wmseg = set_orientation(self.im_res_wmseg, 'RPI')
fname_gmseg = im_res_gmseg.absolutepath
fname_wmseg = im_res_wmseg.absolutepath
#sct_process_segmentation.main(['-i', fname_gmseg, '-p', 'csa', '-ofolder', 'gm_csa'])
run('sct_process_segmentation -i ' + fname_gmseg + ' -p csa -ofolder gm_csa')
#sct_process_segmentation.main(['-i', fname_wmseg, '-p', 'csa', '-ofolder', 'wm_csa'])
run('sct_process_segmentation -i ' + fname_wmseg + ' -p csa -ofolder wm_csa')
gm_csa = open('gm_csa/csa_per_slice.txt', 'r')
wm_csa = open('wm_csa/csa_per_slice.txt', 'r')
gm_csa_lines = gm_csa.readlines()
wm_csa_lines = wm_csa.readlines()
gm_csa.close()
wm_csa.close()
fname_ratio = 'ratio_by_'+type_ratio+'.txt'
file_ratio = open(fname_ratio, 'w')
file_ratio.write(type_ratio + ', ratio GM/WM CSA\n')
csa_gm_wm_by_level = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: [], 13: [], 14: [], 15: [], 16: [], 17: [], 18: [], 19: [], 20: [], 21: [], 22: [], 23: [], 24: []}
for gm_line, wm_line in zip(gm_csa_lines[1:], wm_csa_lines[1:]):
i, gm_area, gm_angle = gm_line.split(',')
j, wm_area, wm_angle = wm_line.split(',')
assert i == j
if type_ratio == 'level':
level_slice = int(self.target_im[int(i)].level)
csa_gm_wm_by_level[level_slice].append((float(gm_area), float(wm_area)))
else:
file_ratio.write(i + ', ' + str(float(gm_area) / float(wm_area)) + '\n')
if type_ratio == 'level':
for l, gm_wm_list in sorted(csa_gm_wm_by_level.items()):
if str(gm_wm_list) != '[]':
csa_gm_list = []
csa_wm_list = []
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = np.mean(csa_gm_list)
csa_wm = np.mean(csa_wm_list)
file_ratio.write(str(l) + ', ' + str(csa_gm / csa_wm) + '\n')
file_ratio.close()
shutil.copy(fname_ratio, '../../'+self.param_seg.path_results+'/'+fname_ratio)
os.chdir('..')
########################################################################################################################
# ------------------------------------------------------ MAIN ------------------------------------------------------- #
########################################################################################################################
def main(args=None):
if args is None:
args = sys.argv[1:]
# create param objects
param_seg = ParamSeg()
param_data = ParamData()
param_model = ParamModel()
param = Param()
# get parser
parser = get_parser()
arguments = parser.parse(args)
# set param arguments ad inputted by user
param_seg.fname_im = arguments["-i"]
param_seg.fname_im_original = arguments["-i"]
param_seg.fname_seg = arguments["-s"]
if '-vertfile' in arguments:
param_seg.fname_level = arguments['-vertfile']
if '-denoising' in arguments:
param_data.denoising = bool(int(arguments['-denoising']))
if '-normalization' in arguments:
param_data.normalization = arguments['-normalization']
if '-p' in arguments:
param_data.register_param = arguments['-p']
if '-w-levels' in arguments:
param_seg.weight_level = arguments['-w-levels']
if '-w-coordi' in arguments:
param_seg.weight_coord = arguments['-w-coordi']
if '-thr-sim' in arguments:
param_seg.thr_similarity = arguments['-thr-sim']
if '-model' in arguments:
param_model.path_model_to_load = os.path.abspath(arguments['-model'])
if '-res-type' in arguments:
param_seg.type_seg= arguments['-res-type']
if '-ratio' in arguments:
param_seg.ratio = arguments['-ratio']
if '-ref' in arguments:
param_seg.fname_manual_gmseg = arguments['-ref']
if '-ofolder' in arguments:
param_seg.path_results= arguments['-ofolder']
if '-qc' in arguments:
param_seg.qc = bool(int(arguments['-qc']))
if '-r' in arguments:
param.rm_tmp= bool(int(arguments['-r']))
if '-v' in arguments:
param.verbose= arguments['-v']
if not os.path.isfile(param_seg.fname_level):
param_seg.fname_level = None
# parse parameters
# TODO refactor
fname_in = param_seg.fname_im_original
seg_gm = SegmentGM(param_seg=param_seg, param_data=param_data, param_model=param_model, param=param)
start = time.time()
seg_gm.segment()
end = time.time()
t = end - start
# Decode the parameters of -param-qc, verification done here because if name of param-qc changes, easier to change here
qcParams = None
if '-param-qc' in arguments:
qcParams = msct_qc.Qc_Params(arguments['-param-qc'])
# Need to verify in the case that "generate" arg is provided and means false else we will generate qc
if qcParams is None or qcParams.generate_report is True:
printv("\nPreparing QC Report...\n")
# There are no way to get the name easily this is why this is hard coded...
# TODO: find a way to get the name
output_filename = param_seg.fname_im_original.split(".")[0]+"_gmseg.nii.gz"
# Qc_Report generates and contains the useful infos for qc generation
qcReport = msct_qc.Qc_Report("sct_segment_graymatter", qcParams, sys.argv[1:], parser.usage.description)
@msct_qc.Qc(qcReport, action_list=[msct_qc.Qc.sequential_seg, msct_qc.Qc.colorbar])
def grayseg_qc(sct_slice, nb_column, thr = 0.5):
"""
:param sct_slice:
:param nb_column:
:param thr: threshold to apply to the segmentation
:return:
"""
# Chosen axe to generate image
img, seg = sct_slice.mosaic(nb_column=nb_column)
seg[seg < thr] = 0
return img, seg
# the wrapped function
grayseg_qc( msct_qc.axial(fname_in, output_filename),qcReport.qc_params.nb_column, qcReport.qc_params.threshold)
printv('Done in ' + str(int(round(t / 60))) + ' min, ' + str(round(t % 60,1)) + ' sec', param.verbose, 'info')
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""0-1 Knapsack Problem
Given weights and values of n "non-splittable" items, put these items in a knapsack of
capacity to get the maximum total value in the knapsack.
"""
def knapsack01_recur(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by naive recursion.
Time complexity: O(2^n), where n is the number of items.
Space complexity: O(n).
"""
if n < 0 or wt_cap == 0:
return 0
elif wt[n] > wt_cap:
return knapsack01_recur(val, wt, wt_cap, n - 1)
else:
val_in = val[n] + knapsack01_recur(val, wt, wt_cap - wt[n], n - 1)
val_ex = knapsack01_recur(val, wt, wt_cap, n - 1)
return max(val_in, val_ex)
def _knapsack01_memo(val, wt, wt_cap, M, n):
if M[n][wt_cap]:
return M[n][wt_cap]
if n < 0 or wt_cap == 0:
return 0
elif wt[n] > wt_cap:
memo = _knapsack01_memo(val, wt, wt_cap, M, n - 1)
else:
val_in = val[n] + _knapsack01_memo(val, wt, wt_cap - wt[n], M, n - 1)
val_ex = _knapsack01_memo(val, wt, wt_cap, M, n - 1)
memo = max(val_in, val_ex)
M[n][wt_cap] = memo
return memo
def knapsack01_memo(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by top-down dynamic programming w/ memoization.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
M = [[None for j in range(wt_cap + 1)] for i in range(n + 1)]
for i in range(n + 1):
M[i][0] = 0
return _knapsack01_memo(val, wt, wt_cap, M, n)
def knapsack_dp(val, wt, wt_cap):
"""0-1 Knapsack Problem by bottom-up dynamic programming.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
M = [[None for j in range(wt_cap + 1)] for i in range(len(wt))]
for i in range(len(wt)):
M[i][0] = 0
for j in range(1, wt_cap + 1):
if wt[0] > j:
M[0][j] = 0
else:
M[0][j] = val[0]
for i in range(1, len(wt)):
for j in range(1, wt_cap + 1):
if wt[i] > j:
M[i][j] = M[i - 1][j]
else:
M[i][j] = max(M[i - 1][j], val[i] + M[i - 1][j - wt[i]])
return M[-1][-1], M
def item_list(M, wt, wt_cap):
items = [0 for _ in range(len(wt))]
j = wt_cap
for i in range(len(wt) - 1, -1, -1):
if i >= 1 and M[i][j] > M[i - 1][j]:
items[i] = 1
j -= wt[i]
elif i == 0 and M[i][j] != 0:
items[i] = 1
return items
def main():
import time
val = [6, 3, 5, 4, 6]
wt = [2, 5, 4, 2, 3]
wt_cap = 10
n = len(wt) - 1
# Ans: 17
start_time = time.time()
print(knapsack01_recur(val, wt, wt_cap, n))
print('Time by recursion: {}'.format(time.time() - start_time))
start_time = time.time()
print(knapsack01_memo(val, wt, wt_cap, n))
print('Time by memo: {}'.format(time.time() - start_time))
start_time = time.time()
max_val, M = knapsack_dp(val, wt, wt_cap)
print(max_val)
print('Time by DP: {}'.format(time.time() - start_time))
print('Items: {}'.format(item_list(M, wt, wt_cap)))
if __name__ == '__main__':
main()
Revise memo approach
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""0-1 Knapsack Problem
Given weights and values of n "non-splittable" items, put these items in a knapsack of
capacity to get the maximum total value in the knapsack.
"""
def knapsack01_recur(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by naive recursion.
Time complexity: O(2^n), where n is the number of items.
Space complexity: O(n).
"""
if n < 0 or wt_cap == 0:
result = 0
elif wt[n] > wt_cap:
result = knapsack01_recur(val, wt, wt_cap, n - 1)
else:
val_in = val[n] + knapsack01_recur(val, wt, wt_cap - wt[n], n - 1)
val_ex = knapsack01_recur(val, wt, wt_cap, n - 1)
result = max(val_in, val_ex)
return result
def _knapsack01_memo(val, wt, wt_cap, M, n):
if M[n][wt_cap]:
return M[n][wt_cap]
if n < 0 or wt_cap == 0:
result = 0
elif wt[n] > wt_cap:
result = _knapsack01_memo(val, wt, wt_cap, M, n - 1)
else:
val_in = val[n] + _knapsack01_memo(val, wt, wt_cap - wt[n], M, n - 1)
val_ex = _knapsack01_memo(val, wt, wt_cap, M, n - 1)
result = max(val_in, val_ex)
M[n][wt_cap] = result
return result
def knapsack01_memo(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by top-down dynamic programming w/ memoization.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
M = [[None for j in range(wt_cap + 1)] for i in range(n + 1)]
for i in range(n + 1):
M[i][0] = 0
return _knapsack01_memo(val, wt, wt_cap, M, n)
def knapsack_dp(val, wt, wt_cap):
"""0-1 Knapsack Problem by bottom-up dynamic programming.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
M = [[None for j in range(wt_cap + 1)] for i in range(len(wt))]
for i in range(len(wt)):
M[i][0] = 0
for j in range(1, wt_cap + 1):
if wt[0] > j:
M[0][j] = 0
else:
M[0][j] = val[0]
for i in range(1, len(wt)):
for j in range(1, wt_cap + 1):
if wt[i] > j:
M[i][j] = M[i - 1][j]
else:
M[i][j] = max(M[i - 1][j], val[i] + M[i - 1][j - wt[i]])
return M[-1][-1], M
def item_list(M, wt, wt_cap):
items = [0 for _ in range(len(wt))]
j = wt_cap
for i in range(len(wt) - 1, -1, -1):
if i >= 1 and M[i][j] > M[i - 1][j]:
items[i] = 1
j -= wt[i]
elif i == 0 and M[i][j] != 0:
items[i] = 1
return items
def main():
import time
val = [6, 3, 5, 4, 6]
wt = [2, 5, 4, 2, 3]
wt_cap = 10
n = len(wt) - 1
# Ans: 17
start_time = time.time()
print(knapsack01_recur(val, wt, wt_cap, n))
print('Time by recursion: {}'.format(time.time() - start_time))
start_time = time.time()
print(knapsack01_memo(val, wt, wt_cap, n))
print('Time by memo: {}'.format(time.time() - start_time))
start_time = time.time()
max_val, M = knapsack_dp(val, wt, wt_cap)
print(max_val)
print('Time by DP: {}'.format(time.time() - start_time))
print('Items: {}'.format(item_list(M, wt, wt_cap)))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""
A simple echo server
"""
import socket
host = ''
port = 8080
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
data = client.recv(size)
if data:
client.send(data)
client.close()
Can now send multiple messages.
#!/usr/bin/env python
# Python 2
"""
A simple echo server
"""
import socket
host = ''
port = 8080
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
data = client.recv(size)
if data:
print(data)
client.send(data)
client.close()
|
# The Baldimtsi-Lysyanskaya Anonymous Credentials Light scheme
# See:
# Baldimtsi, Foteini, and Anna Lysyanskaya. "Anonymous credentials light."
# Proceedings of the 2013 ACM SIGSAC conference on Computer & communications security.
# ACM, 2013.
from petlib.bn import Bn
from petlib.ec import EcGroup, EcPt
from hashlib import sha256
import pytest
def test_protocol():
pass
Full PL protocol described
# The Baldimtsi-Lysyanskaya Anonymous Credentials Light scheme
# See:
# Baldimtsi, Foteini, and Anna Lysyanskaya. "Anonymous credentials light."
# Proceedings of the 2013 ACM SIGSAC conference on Computer & communications security.
# ACM, 2013.
from petlib.bn import Bn
from petlib.ec import EcGroup, EcPt
from hashlib import sha256
from binascii import hexlify
import pytest
def test_protocol():
# Parameters of the PL schemes
G = EcGroup(409)
q = G.order()
g = G.hash_to_point("g")
h = G.hash_to_point("h")
z = G.hash_to_point("z")
hs = [G.hash_to_point("h%s" % i) for i in range(100)]
# Inputs from user
R = q.random()
L1 = 10
L2 = 20
C = R * hs[0] + L1 * hs[1] + L2 * hs[2]
m = "Hello World!"
# Inputs from the Issuer
# TODO: check ZK on C
x = q.random()
y = x * g
# Preparation
rnd = q.random()
z1 = C + rnd * g
z2 = z + (-z1)
## Send: (rnd,) to user
if rnd % q == 0:
raise
z1 = C + rnd * g
gam = q.random()
zet = gam * z
zet1 = gam * z1
zet2 = zet + (-zet1)
tau = q.random()
eta = tau * z
# Validation: Issuer
u, r1p, r2p, cp = [q.random() for _ in range(4)]
a = u * g
a1p = r1p * g + cp * z1
a2p = r2p * h + cp * z2
## Send(a, ap = (a1p, a2p))
# User side
assert G.check_point(a)
assert G.check_point(a1p)
assert G.check_point(a2p)
t1,t2,t3,t4,t5 = [q.random() for _ in range(5)]
alph = a + t1 * g + t2 * y
alph1 = gam * a1p + t3 * g + t4 * zet1
alph2 = gam * a2p + t5 * h + t4 * zet2
# Make epsilon
H = [zet, zet1, alph, alph1, alph2, eta]
Hstr = map(EcPt.export, H) + [m]
Hhex = "|".join(map(hexlify, Hstr))
epsilon = Bn.from_binary(sha256(Hhex).digest()) % q
e = epsilon.mod_sub(t2,q).mod_sub(t4, q)
## Send: (e,) to Issuer
c = e.mod_sub(cp, q)
r = u.mod_sub((c * x), q)
## Send: (c,r, cp, rp = (r1p, r2p)) to User
ro = r.mod_add(t1,q)
om = c.mod_add(t2,q)
ro1p = (gam * r1p + t3) % q
ro2p = (gam * r2p + t5) % q
omp = (cp + t4) % q
mu = (tau - omp * gam) % q
signature = (m, zet, zet1, zet2, om, omp, ro, ro1p, ro2p)
# Check verification equation
lhs = (om + omp) % q
rhs_h = [zet, zet1,
ro * g + om * y,
ro1p * g + omp * zet1,
ro2p * h + omp * zet2, ## problem
mu * z + omp * zet]
Hstr = map(EcPt.export, rhs_h) + [m]
Hhex = "|".join(map(hexlify, Hstr))
rhs = Bn.from_binary(sha256(Hhex).digest()) % q
print rhs == lhs
|
#!/usr/bin/python2
"""
ec2-init.py
---------------------------
Initialize Amazon EC2 Host
Brian Parsons <brian@pmex.com>
Originally Forked from ec2arch by Yejun Yang <yejunx AT gmail DOT com>
https://github.com/yejun/ec2arch/raw/master/ec2
https://aur.archlinux.org/packages.php?ID=40083
Features:
---------
Sets hostname based on instance user-data hostname
Requires:
---------
boto - https://github.com/boto/boto
Changelog:
----------
2012-06-20 - bcp - added bootalert
2012-06-20 - bcp - grabs domain name from user-data and sets DNS for instance ID
2012-09-15 - bcp - added additional grep for hostname and domainname in case both are returned
2012- - bcp - updated for systemd, bash functions moved to single python script
"""
import datetime
import re
import socket
import smtplib
import sys
import subprocess
import urllib
from boto.route53.connection import Route53Connection
from boto.route53.record import ResourceRecordSets
from boto.route53.exception import DNSServerError
from boto.utils import get_instance_metadata, get_instance_userdata
from socket import gethostname
def updatedns(hostname, newip):
try:
hostname
except NameError:
print 'Hostname not specified and not able to detect.'
return(1)
# Add trailing dot to hostname if it doesn't have one
if hostname[-1:] != ".":
hostname += "."
print 'Hostname: %s' % hostname
print 'Current IP: %s' % newip
# Initialize the connection to AWS Route53
route53 = Route53Connection()
# Get the zoneid
try:
route53zones = route53.get_all_hosted_zones()
except DNSServerError, e:
print 'Connection error to AWS. Check your credentials.'
print 'Error %s - %s' % (e.code, str(e))
return(1)
for zone in route53zones['ListHostedZonesResponse']['HostedZones']:
if zone['Name'][0:-1] in hostname:
zoneid = zone['Id'].replace('/hostedzone/', '')
print 'Found Route53 Zone %s for hostname %s' % (zoneid, hostname)
try:
zoneid
except NameError:
print 'Unable to find Route53 Zone for %s' % hostname
return(1)
# Find the old record if it exists
try:
sets = route53.get_all_rrsets(zoneid)
except DNSServerError, e:
print 'Connection error to AWS.'
print 'Error %s - %s' % (e.code, str(e))
return(1)
for rset in sets:
if rset.name == hostname and rset.type == 'A':
curiprecord = rset.resource_records
if type(curiprecord) in [list, tuple, set]:
for record in curiprecord:
curip = record
print 'Current DNS IP: %s' % curip
curttl = rset.ttl
print 'Current DNS TTL: %s' % curttl
if curip != newip:
# Remove the old record
print 'Removing old record...'
change1 = ResourceRecordSets(route53, zoneid)
removeold = change1.add_change("DELETE", hostname, "A", curttl)
removeold.add_value(curip)
change1.commit()
else:
print 'IPs match, not making any changes in DNS.'
return
try:
curip
except NameError:
print 'Hostname %s not found in current zone record' % hostname
# Add the new record
print 'Adding %s to DNS as %s...' % ( hostname, newip)
change2 = ResourceRecordSets(route53, zoneid)
change = change2.add_change("CREATE", hostname, "A", 60)
change.add_value(newip)
change2.commit()
# TODO
# Move variables to /etc/systemd/ec2-init.conf
#
mailto = "brian@pmex.com"
mailfrom = "bootalert@brianparsons.net"
# Collect Meta Data
inst_data = get_instance_metadata()
INSTANCETYPE=inst_data["instance-type"]
INSTANCEID=inst_data["instance-id"]
PUBLICIP=inst_data["public-ipv4"]
PUBLICKEYS=inst_data["public-keys"]
AVAILABILITYZONE=inst_data["placement"]["availability-zone"]
now = datetime.datetime.now()
user_data = get_instance_userdata(sep='|')
try:
hostname = user_data['hostname']
except NameError:
hostname = gethostname()
# set hostname in /etc/hostname
hostfile = open('/etc/hostname', 'w')
hostfile.write(hostname)
hostfile.write('\n')
hostfile.close()
# set hostname with the system
subcmd = "hostname " + hostname
subprocess.call(subcmd,shell=True)
# TODO
# set root key if it doesn't exist
# if file not exist /root/.ssh/authorized_keys
# loop through public-keys, save to file
#
updatedns(hostname, PUBLICIP)
messageheader = "From: EC2-Init <" + mailfrom + ">\n"
messageheader += "To: " + mailto + "\n"
messageheader += "Subject: " + hostname + "\n\n"
message = messageheader + hostname + " booted " + now.strftime("%a %b %d %H:%M:%S %Z %Y") + ". A " + INSTANCETYPE + " in " + AVAILABILITYZONE + " with IP: " + PUBLICIP + ".\n\n"
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(mailfrom, mailto, message)
print "Successfully sent boot alert email"
except SMTPException:
print "Error: unable to send boot alert email"
root key processing for SYSSCR-3
#!/usr/bin/python2
"""
ec2-init.py
---------------------------
Initialize Amazon EC2 Host
Brian Parsons <brian@pmex.com>
Features:
---------
Sets hostname based on instance user-data hostname
Sends email with hostname, instance type, and IP address
Will update DNS in Route53 if boto finds credentials or has IAM role and zone file is found
Requires:
---------
boto - https://github.com/boto/boto
Changelog:
----------
2012-06-20 - bcp - added bootalert
2012-06-20 - bcp - grabs domain name from user-data and sets DNS for instance ID
2012-09-15 - bcp - added additional grep for hostname and domainname in case both are returned
2012-12-14 - bcp - updated for systemd, bash functions moved to single python script
"""
import datetime
import re
import socket
import smtplib
import sys
import subprocess
import urllib
from boto.route53.connection import Route53Connection
from boto.route53.record import ResourceRecordSets
from boto.route53.exception import DNSServerError
from boto.utils import get_instance_metadata, get_instance_userdata
from socket import gethostname
########
##
## updatedns - Updates DNS for given hostname to newip
##
#
def updatedns(hostname, newip):
try:
hostname
except NameError:
print 'Hostname not specified and not able to detect.'
return(1)
# Add trailing dot to hostname if it doesn't have one
if hostname[-1:] != ".":
hostname += "."
print 'Hostname: %s' % hostname
print 'Current IP: %s' % newip
# Initialize the connection to AWS Route53
route53 = Route53Connection()
# Get the zoneid
try:
route53zones = route53.get_all_hosted_zones()
except DNSServerError, e:
print 'Connection error to AWS. Check your credentials.'
print 'Error %s - %s' % (e.code, str(e))
return(1)
for zone in route53zones['ListHostedZonesResponse']['HostedZones']:
if zone['Name'][0:-1] in hostname:
zoneid = zone['Id'].replace('/hostedzone/', '')
print 'Found Route53 Zone %s for hostname %s' % (zoneid, hostname)
try:
zoneid
except NameError:
print 'Unable to find Route53 Zone for %s' % hostname
return(1)
# Find the old record if it exists
try:
sets = route53.get_all_rrsets(zoneid)
except DNSServerError, e:
print 'Connection error to AWS.'
print 'Error %s - %s' % (e.code, str(e))
return(1)
for rset in sets:
if rset.name == hostname and rset.type == 'A':
curiprecord = rset.resource_records
if type(curiprecord) in [list, tuple, set]:
for record in curiprecord:
curip = record
print 'Current DNS IP: %s' % curip
curttl = rset.ttl
print 'Current DNS TTL: %s' % curttl
if curip != newip:
# Remove the old record
print 'Removing old record...'
change1 = ResourceRecordSets(route53, zoneid)
removeold = change1.add_change("DELETE", hostname, "A", curttl)
removeold.add_value(curip)
change1.commit()
else:
print 'IPs match, not making any changes in DNS.'
return
try:
curip
except NameError:
print 'Hostname %s not found in current zone record' % hostname
# Add the new record
print 'Adding %s to DNS as %s...' % ( hostname, newip)
change2 = ResourceRecordSets(route53, zoneid)
change = change2.add_change("CREATE", hostname, "A", 60)
change.add_value(newip)
change2.commit()
# TODO
# Move variables to /etc/systemd/ec2-init.conf
#
mailto = "brian@pmex.com"
mailfrom = "bootalert@brianparsons.net"
# Collect Meta Data
inst_data = get_instance_metadata()
INSTANCETYPE=inst_data["instance-type"]
INSTANCEID=inst_data["instance-id"]
PUBLICIP=inst_data["public-ipv4"]
PUBLICKEYS=inst_data["public-keys"]
AVAILABILITYZONE=inst_data["placement"]["availability-zone"]
now = datetime.datetime.now()
user_data = get_instance_userdata(sep='|')
try:
hostname = user_data['hostname']
except NameError:
hostname = gethostname()
# set hostname in /etc/hostname
try:
with open('/etc/hostname', 'w') as hostfile:
hostfile.write(hostname)
hostfile.write('\n')
hostfile.close()
except IOError as e:
print('Could not open /etc/hostname for writing' + e)
# set hostname with the system
subcmd = "hostname " + hostname
subprocess.call(subcmd,shell=True)
# save public key to authorized_keys file
if type(PUBLICKEYS.items()) in [list, tuple, set]:
try:
currentkeys = open('/root/.ssh/authorized_keys').read()
except IOError as e:
currentkeys = ""
try:
with open('/root/.ssh/authorized_keys', 'w') as authkeyfile:
for key in PUBLICKEYS.items():
if not record[1][0] in currentkeys:
authkeyfile.write(record[1][0])
authkeyfile.write('\n')
authkeyfile.close()
except IOError as e:
print 'Could not open authorized_keys file for writing!' + e
# update dns
updatedns(hostname, PUBLICIP)
# compose boot email
messageheader = "From: EC2-Init <" + mailfrom + ">\n"
messageheader += "To: " + mailto + "\n"
messageheader += "Subject: " + hostname + "\n\n"
message = messageheader + hostname + " booted " + now.strftime("%a %b %d %H:%M:%S %Z %Y") + ". A " + INSTANCETYPE + " in " + AVAILABILITYZONE + " with IP: " + PUBLICIP + ".\n\n"
# send boot email
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(mailfrom, mailto, message)
except smtplib.SMTPException:
print("Error: unable to send boot alert email")
|
#!/usr/bin/env python
#
# This program returns the gray matter segmentation given anatomical, spinal cord segmentation and t2star images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Sara Dupont
# Modified: 2015-05-20
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sct_utils as sct
import os
import time
import sys
import getopt
from msct_parser import *
from msct_image import Image, get_dimension
import random
from msct_multiatlas_seg import Model, SegmentationParam, GMsegSupervisedMethod
from msct_gmseg_utils import *
from sct_image import set_orientation, get_orientation, orientation,pad_image
import shutil
class Preprocessing:
def __init__(self, target_fname, sc_seg_fname, tmp_dir='', t2_data=None, level_fname=None, denoising=True):
# initiate de file names and copy the files into the temporary directory
self.original_target = 'target.nii.gz'
self.original_sc_seg = 'target_sc_seg.nii.gz'
self.resample_to = 0.3
if level_fname is not None:
t2_data = None
level_fname_nii = check_file_to_niigz(level_fname)
if level_fname_nii:
level_path, level_file_name, level_ext = sct.extract_fname(level_fname_nii)
sct.run('cp ' + level_fname_nii + ' ' + tmp_dir + '/' + level_file_name + level_ext)
else:
level_path = level_file_name = level_ext = None
if t2_data is not None:
self.t2 = 't2.nii.gz'
self.t2_seg = 't2_seg.nii.gz'
self.t2_landmarks = 't2_landmarks.nii.gz'
else:
self.t2 = self.t2_seg = self.t2_landmarks = None
sct.run('cp ' + target_fname + ' ' + tmp_dir + '/' + self.original_target)
sct.run('cp ' + sc_seg_fname + ' ' + tmp_dir + '/' + self.original_sc_seg)
if t2_data is not None:
sct.run('cp ' + t2_data[0] + ' ' + tmp_dir + '/' + self.t2)
sct.run('cp ' + t2_data[1] + ' ' + tmp_dir + '/' + self.t2_seg)
sct.run('cp ' + t2_data[2] + ' ' + tmp_dir + '/' + self.t2_landmarks)
# preprocessing
os.chdir(tmp_dir)
im_target = Image(self.original_target)
im_sc_seg = Image(self.original_sc_seg)
self.original_header = im_target.hdr
self.original_orientation = im_target.orientation
index_x = self.original_orientation.find('R') if 'R' in self.original_orientation else self.original_orientation.find('L')
index_y = self.original_orientation.find('P') if 'P' in self.original_orientation else self.original_orientation.find('A')
index_z = self.original_orientation.find('I') if 'I' in self.original_orientation else self.original_orientation.find('S')
# resampling of the images
nx, ny, nz, nt, px, py, pz, pt = im_target.dim
pix_dim = [px, py, pz]
self.original_px = pix_dim[index_x]
self.original_py = pix_dim[index_y]
if round(self.original_px, 2) != self.resample_to or round(self.original_py, 2) != self.resample_to:
self.t2star = resample_image(self.original_target, npx=self.resample_to, npy=self.resample_to)
self.sc_seg = resample_image(self.original_sc_seg, binary=True, npx=self.resample_to, npy=self.resample_to)
# denoising (optional)
im_target = Image(self.t2star)
if denoising:
from sct_maths import denoise_ornlm
im_target.data = denoise_ornlm(im_target.data)
im_target.save()
self.t2star = im_target.file_name + im_target.ext
box_size = int(22.5/self.resample_to)
# Pad in case the spinal cord is too close to the edges
pad_size = box_size/2 + 2
self.pad = [str(pad_size)]*3
self.pad[index_z] = str(0)
t2star_pad = sct.add_suffix(self.t2star, '_pad')
sc_seg_pad = sct.add_suffix(self.sc_seg, '_pad')
sct.run('sct_image -i '+self.t2star+' -pad '+self.pad[0]+','+self.pad[1]+','+self.pad[2]+' -o '+t2star_pad)
sct.run('sct_image -i '+self.sc_seg+' -pad '+self.pad[0]+','+self.pad[1]+','+self.pad[2]+' -o '+sc_seg_pad)
self.t2star = t2star_pad
self.sc_seg = sc_seg_pad
# put data in RPI
t2star_rpi = sct.add_suffix(self.t2star, '_RPI')
sc_seg_rpi = sct.add_suffix(self.sc_seg, '_RPI')
sct.run('sct_image -i '+self.t2star+' -setorient RPI -o '+t2star_rpi)
sct.run('sct_image -i '+self.sc_seg+' -setorient RPI -o '+sc_seg_rpi)
self.t2star = t2star_rpi
self.sc_seg = sc_seg_rpi
self.square_mask, self.processed_target = crop_t2_star(self.t2star, self.sc_seg, box_size=box_size)
self.level_fname = None
if t2_data is not None:
self.level_fname = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
elif level_fname is not None:
self.level_fname = level_file_name + level_ext
level_orientation = get_orientation(self.level_fname, filename=True)
if level_orientation != 'IRP':
self.level_fname = set_orientation(self.level_fname, 'IRP', filename=True)
os.chdir('..')
class FullGmSegmentation:
def __init__(self, target_fname, sc_seg_fname, t2_data, level_fname, ref_gm_seg=None, model=None, compute_ratio=False, param=None):
before = time.time()
self.param = param
sct.printv('\nBuilding the appearance model...', verbose=self.param.verbose, type='normal')
if model is None:
self.model = Model(model_param=self.param, k=0.8)
else:
self.model = model
sct.printv('\n--> OK !', verbose=self.param.verbose, type='normal')
self.target_fname = check_file_to_niigz(target_fname)
self.sc_seg_fname = check_file_to_niigz(sc_seg_fname)
self.t2_data = t2_data
if level_fname is not None:
self.level_fname = check_file_to_niigz(level_fname)
else:
self.level_fname = level_fname
self.ref_gm_seg_fname = ref_gm_seg
self.tmp_dir = 'tmp_' + sct.extract_fname(self.target_fname)[1] + '_' + time.strftime("%y%m%d%H%M%S")+ '_'+str(random.randint(1, 1000000))+'/'
sct.run('mkdir ' + self.tmp_dir)
self.gm_seg = None
self.res_names = {}
self.dice_name = None
self.hausdorff_name = None
self.segmentation_pipeline()
# Generate output files:
for res_fname in self.res_names.values():
sct.generate_output_file(self.tmp_dir+res_fname, self.param.output_path+res_fname)
if self.ref_gm_seg_fname is not None:
sct.generate_output_file(self.tmp_dir+self.dice_name, self.param.output_path+self.dice_name)
sct.generate_output_file(self.tmp_dir+self.hausdorff_name, self.param.output_path+self.hausdorff_name)
if compute_ratio:
sct.generate_output_file(self.tmp_dir+self.ratio_name, self.param.output_path+self.ratio_name)
after = time.time()
sct.printv('Done! (in ' + str(after-before) + ' sec) \nTo see the result, type :')
if self.param.res_type == 'binary':
wm_col = 'Red'
gm_col = 'Blue'
b = '0,1'
else:
wm_col = 'Blue-Lightblue'
gm_col = 'Red-Yellow'
b = '0.3,1'
sct.printv('fslview ' + self.target_fname + ' '+self.param.output_path+self.res_names['wm_seg']+' -l '+wm_col+' -t 0.4 -b '+b+' '+self.param.output_path+self.res_names['gm_seg']+' -l '+gm_col+' -t 0.4 -b '+b+' &', param.verbose, 'info')
if self.param.qc:
# output QC image
im = Image(self.target_fname)
im_gmseg = Image(self.param.output_path+self.res_names['gm_seg'])
im.save_quality_control(plane='axial', n_slices=5, seg=im_gmseg, thr=float(b.split(',')[0]), cmap_col='red-yellow', path_output=self.param.output_path)
if self.param.remove_tmp:
sct.printv('Remove temporary folder ...', self.param.verbose, 'normal')
sct.run('rm -rf '+self.tmp_dir)
# ------------------------------------------------------------------------------------------------------------------
def segmentation_pipeline(self):
sct.printv('\nDoing target pre-processing ...', verbose=self.param.verbose, type='normal')
self.preprocessed = Preprocessing(self.target_fname, self.sc_seg_fname, tmp_dir=self.tmp_dir, t2_data=self.t2_data, level_fname=self.level_fname, denoising=self.param.target_denoising)
os.chdir(self.tmp_dir)
if self.preprocessed.level_fname is not None:
self.level_to_use = self.preprocessed.level_fname
else:
self.level_to_use = None
sct.printv('\nDoing target gray matter segmentation ...', verbose=self.param.verbose, type='normal')
self.gm_seg = GMsegSupervisedMethod(self.preprocessed.processed_target, self.level_to_use, self.model, gm_seg_param=self.param)
sct.printv('\nDoing result post-processing ...', verbose=self.param.verbose, type='normal')
self.post_processing()
if self.ref_gm_seg_fname is not None:
os.chdir('..')
ref_gmseg = 'ref_gmseg.nii.gz'
sct.run('cp ' + self.ref_gm_seg_fname + ' ' + self.tmp_dir + '/' + ref_gmseg)
os.chdir(self.tmp_dir)
sct.printv('Computing Dice coefficient and Hausdorff distance ...', verbose=self.param.verbose, type='normal')
self.dice_name, self.hausdorff_name = self.validation(ref_gmseg)
if compute_ratio:
sct.printv('\nComputing ratio GM/WM ...', verbose=self.param.verbose, type='normal')
self.ratio_name = self.compute_ratio(type=compute_ratio)
os.chdir('..')
# ------------------------------------------------------------------------------------------------------------------
def post_processing(self):
square_mask = Image(self.preprocessed.square_mask)
tmp_res_names = []
for res_im in [self.gm_seg.res_wm_seg, self.gm_seg.res_gm_seg, self.gm_seg.corrected_wm_seg]:
res_im_original_space = inverse_square_crop(res_im, square_mask)
res_im_original_space.save()
res_im_original_space = set_orientation(res_im_original_space, self.preprocessed.original_orientation)
res_im_original_space.save()
res_fname_original_space = res_im_original_space.file_name
ext = res_im_original_space.ext
# crop from the same pad size
output_crop = res_fname_original_space+'_crop'
sct.run('sct_crop_image -i '+res_fname_original_space+ext+' -dim 0,1,2 -start '+self.preprocessed.pad[0]+','+self.preprocessed.pad[1]+','+self.preprocessed.pad[2]+' -end -'+self.preprocessed.pad[0]+',-'+self.preprocessed.pad[1]+',-'+self.preprocessed.pad[2]+' -o '+output_crop+ext)
res_fname_original_space = output_crop
target_path, target_name, target_ext = sct.extract_fname(self.target_fname)
res_name = target_name + res_im.file_name[len(sct.extract_fname(self.preprocessed.processed_target)[1]):] + '.nii.gz'
if self.param.res_type == 'binary':
bin = True
else:
bin = False
old_res_name = resample_image(res_fname_original_space+ext, npx=self.preprocessed.original_px, npy=self.preprocessed.original_py, binary=bin)
if self.param.res_type == 'prob':
# sct.run('fslmaths ' + old_res_name + ' -thr 0.05 ' + old_res_name)
sct.run('sct_maths -i ' + old_res_name + ' -thr 0.05 -o ' + old_res_name)
sct.run('cp ' + old_res_name + ' '+res_name)
tmp_res_names.append(res_name)
self.res_names['wm_seg'] = tmp_res_names[0]
self.res_names['gm_seg'] = tmp_res_names[1]
self.res_names['corrected_wm_seg'] = tmp_res_names[2]
# ------------------------------------------------------------------------------------------------------------------
def compute_ratio(self, type='slice'):
from numpy import mean, nonzero
from math import isnan
ratio_dir = 'ratio/'
sct.run('mkdir '+ratio_dir)
if type is not 'slice':
assert self.preprocessed.level_fname is not None, 'No vertebral level information, you cannot compute GM/WM ratio per vertebral level.'
levels = [int(round(mean(dat[nonzero(dat)]), 0)) if not isnan(mean(dat[nonzero(dat)])) else 0 for dat in Image(self.preprocessed.level_fname).data]
csa_gm_wm_by_level = {}
for l in levels:
csa_gm_wm_by_level[l] = []
gm_seg = 'res_gmseg.nii.gz'
wm_seg = 'res_wmseg.nii.gz'
sct.run('cp '+self.res_names['gm_seg']+' '+ratio_dir+gm_seg)
sct.run('cp '+self.res_names['corrected_wm_seg']+' '+ratio_dir+wm_seg)
# go to ratio folder
os.chdir(ratio_dir)
sct.run('sct_process_segmentation -i '+gm_seg+' -p csa -o gm_csa ', error_exit='warning')
sct.run('mv csa.txt gm_csa.txt')
sct.run('sct_process_segmentation -i '+wm_seg+' -p csa -o wm_csa ', error_exit='warning')
sct.run('mv csa.txt wm_csa.txt')
gm_csa = open('gm_csa.txt', 'r')
wm_csa = open('wm_csa.txt', 'r')
ratio_fname = 'ratio.txt'
ratio = open('../'+ratio_fname, 'w')
gm_lines = gm_csa.readlines()
wm_lines = wm_csa.readlines()
gm_csa.close()
wm_csa.close()
ratio.write(type+' , ratio GM/WM \n')
for gm_line, wm_line in zip(gm_lines, wm_lines):
i, gm_area = gm_line.split(',')
j, wm_area = wm_line.split(',')
assert i == j
if type is not 'slice':
csa_gm_wm_by_level[levels[int(i)]].append((float(gm_area), float(wm_area)))
else:
ratio.write(i+','+str(float(gm_area)/float(wm_area))+'\n')
if type == 'level':
for l, gm_wm_list in sorted(csa_gm_wm_by_level.items()):
csa_gm_list = []
csa_wm_list = []
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = mean(csa_gm_list)
csa_wm = mean(csa_wm_list)
ratio.write(str(l)+','+str(csa_gm/csa_wm)+'\n')
elif type is not 'slice':
li, lf = type.split(':')
level_str_to_int = {'C1': 1, 'C2': 2, 'C3': 3, 'C4': 4, 'C5': 5, 'C6': 6, 'C7': 7, 'T1': 8, 'T2': 9}
li = level_str_to_int[li]
lf = level_str_to_int[lf]
csa_gm_list = []
csa_wm_list = []
for l in range(li, lf+1):
gm_wm_list = csa_gm_wm_by_level[l]
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = mean(csa_gm_list)
csa_wm = mean(csa_wm_list)
ratio.write(type+','+str(csa_gm/csa_wm)+'\n')
ratio.close()
os.chdir('..')
return ratio_fname
# ------------------------------------------------------------------------------------------------------------------
def validation(self, ref_gmseg):
ext = '.nii.gz'
validation_dir = 'validation/'
sct.run('mkdir ' + validation_dir)
gm_seg = 'res_gmseg.nii.gz'
wm_seg = 'res_wmseg.nii.gz'
# Copy images to the validation folder
sct.run('cp '+ref_gmseg+' '+validation_dir+ref_gmseg)
sct.run('cp '+self.preprocessed.original_sc_seg+' '+validation_dir+self.preprocessed.original_sc_seg)
sct.run('cp '+self.res_names['gm_seg']+' '+validation_dir+gm_seg)
sct.run('cp '+self.res_names['wm_seg']+' '+validation_dir+wm_seg)
# go to validation folder
os.chdir(validation_dir)
# get reference WM segmentation from SC segmentation and reference GM segmentation
ref_wmseg = 'ref_wmseg.nii.gz'
sct.run('sct_maths -i '+self.preprocessed.original_sc_seg+' -sub '+ref_gmseg+' -o '+ref_wmseg)
# Binarize results if it was probabilistic results
if self.param.res_type == 'prob':
sct.run('sct_maths -i '+gm_seg+' -thr 0.5 -o '+gm_seg)
sct.run('sct_maths -i '+wm_seg+' -thr 0.4999 -o '+wm_seg)
sct.run('sct_maths -i '+gm_seg+' -bin -o '+gm_seg)
sct.run('sct_maths -i '+wm_seg+' -bin -o '+wm_seg)
# Compute Dice coefficient
try:
status_gm, output_gm = sct.run('sct_dice_coefficient -i '+ref_gmseg+' -d '+gm_seg+' -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
# put the result and the reference in the same space using a registration with ANTs with no iteration:
corrected_ref_gmseg = sct.extract_fname(ref_gmseg)[1]+'_in_res_space'+ext
sct.run('isct_antsRegistration -d 3 -t Translation[0] -m MI['+gm_seg+','+ref_gmseg+',1,16] -o [reg_ref_to_res,'+corrected_ref_gmseg+'] -n BSpline[3] -c 0 -f 1 -s 0')
sct.run('sct_maths -i '+corrected_ref_gmseg+' -thr 0.1 -o '+corrected_ref_gmseg)
sct.run('sct_maths -i '+corrected_ref_gmseg+' -bin -o '+corrected_ref_gmseg)
status_gm, output_gm = sct.run('sct_dice_coefficient -i '+corrected_ref_gmseg+' -d '+gm_seg+' -2d-slices 2', error_exit='warning')
try:
status_wm, output_wm = sct.run('sct_dice_coefficient -i '+ref_wmseg+' -d '+wm_seg+' -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
# put the result and the reference in the same space using a registration with ANTs with no iteration:
corrected_ref_wmseg = sct.extract_fname(ref_wmseg)[1]+'_in_res_space'+ext
sct.run('isct_antsRegistration -d 3 -t Translation[0] -m MI['+wm_seg+','+ref_wmseg+',1,16] -o [reg_ref_to_res,'+corrected_ref_wmseg+'] -n BSpline[3] -c 0 -f 1 -s 0')
sct.run('sct_maths -i '+corrected_ref_wmseg+' -thr 0.1 -o '+corrected_ref_wmseg)
sct.run('sct_maths -i '+corrected_ref_wmseg+' -bin -o '+corrected_ref_wmseg)
status_wm, output_wm = sct.run('sct_dice_coefficient -i '+corrected_ref_wmseg+' -d '+wm_seg+' -2d-slices 2', error_exit='warning')
dice_name = 'dice_' + sct.extract_fname(self.target_fname)[1] + '_' + self.param.res_type + '.txt'
dice_fic = open('../'+dice_name, 'w')
if self.param.res_type == 'prob':
dice_fic.write('WARNING : the probabilistic segmentations were binarized with a threshold at 0.5 to compute the dice coefficient \n')
dice_fic.write('\n--------------------------------------------------------------\n'
'Dice coefficient on the Gray Matter segmentation:\n')
dice_fic.write(output_gm)
dice_fic.write('\n\n--------------------------------------------------------------\n'
'Dice coefficient on the White Matter segmentation:\n')
dice_fic.write(output_wm)
dice_fic.close()
# Compute Hausdorff distance
hd_name = 'hd_' + sct.extract_fname(self.target_fname)[1] + '_' + self.param.res_type + '.txt'
sct.run('sct_compute_hausdorff_distance -i '+gm_seg+' -d '+ref_gmseg+' -thinning 1 -o '+hd_name+' -v '+str(self.param.verbose))
sct.run('mv ./' + hd_name + ' ../')
os.chdir('..')
return dice_name, hd_name
########################################################################################################################
# ------------------------------------------------------ MAIN ------------------------------------------------------- #
########################################################################################################################
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Segmentation of the white/gray matter on a T2star or MT image\n'
'Multi-Atlas based method: the model containing a template of the white/gray matter segmentation along the cervical spinal cord, and a PCA space to describe the variability of intensity in that template is provided in the toolbox. ')
parser.add_option(name="-i",
type_value="file",
description="Target image to segment",
mandatory=True,
example='t2star.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation of the target",
mandatory=True,
example='sc_seg.nii.gz')
parser.usage.addSection('STRONGLY RECOMMENDED ARGUMENTS\n'
'Choose one of them')
parser.add_option(name="-vert",
type_value="file",
description="Image containing level labels for the target"
"If -l is used, no need to provide t2 data",
mandatory=False,
example='MNI-Poly-AMU_level_IRP.nii.gz')
parser.add_option(name="-l",
type_value=None,
description="Image containing level labels for the target"
"If -l is used, no need to provide t2 data",
mandatory=False,
deprecated_by='-vert')
parser.add_option(name="-t2",
type_value=[[','], 'file'],
description="T2 data associated to the input image : used to register the template on the T2star and get the vertebral levels\n"
"In this order, without whitespace : t2_image,t2_sc_segmentation,t2_landmarks\n(see: http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/)",
mandatory=False,
default_value=None,
example='t2.nii.gz,t2_seg.nii.gz,landmarks.nii.gz')
parser.usage.addSection('SEGMENTATION OPTIONS')
parser.add_option(name="-use-levels",
type_value='multiple_choice',
description="Use the level information for the model or not",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-weight",
type_value='float',
description="weight parameter on the level differences to compute the similarities (beta)",
mandatory=False,
default_value=2.5,
example=2.0)
parser.add_option(name="-denoising",
type_value='multiple_choice',
description="1: Adaptative denoising from F. Coupe algorithm, 0: no WARNING: It affects the model you should use (if denoising is applied to the target, the model should have been coputed with denoising too",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-normalize",
type_value='multiple_choice',
description="Normalization of the target image's intensity using median intensity values of the WM and the GM, recomended with MT images or other types of contrast than T2*",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-medians",
type_value=[[','], 'float'],
description="Median intensity values in the target white matter and gray matter (separated by a comma without white space)\n"
"If not specified, the mean intensity values of the target WM and GM are estimated automatically using the dictionary average segmentation by level.\n"
"Only if the -normalize flag is used",
mandatory=False,
default_value=None,
example=["450,540"])
parser.add_option(name="-model",
type_value="folder",
description="Path to the model data",
mandatory=False,
example='/home/jdoe/gm_seg_model_data/')
parser.usage.addSection('OUTPUT OTIONS')
parser.add_option(name="-res-type",
type_value='multiple_choice',
description="Type of result segmentation : binary or probabilistic",
mandatory=False,
default_value='prob',
example=['binary', 'prob'])
parser.add_option(name="-ratio",
type_value='multiple_choice',
description="Compute GM/WM ratio by slice or by vertebral level (average across levels)",
mandatory=False,
default_value='0',
example=['0', 'slice', 'level'])
parser.add_option(name="-ratio-level",
type_value='str',
description="Compute GM/WM ratio across several vertebral levels.",
mandatory=False,
default_value='0',
example='C2:C4')
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder",
mandatory=False,
default_value='./',
example='gm_segmentation_results/')
parser.add_option(name="-ref",
type_value="file",
description="Reference segmentation of the gray matter for segmentation validation (outputs Dice coefficient and Hausdoorff's distance)",
mandatory=False,
example='manual_gm_seg.nii.gz')
parser.usage.addSection('MISC')
parser.add_option(name='-qc',
type_value='multiple_choice',
description='Output images for quality control.',
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-r",
type_value="multiple_choice",
description='Remove temporary files.',
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
if __name__ == "__main__":
param = SegmentationParam()
input_target_fname = None
input_sc_seg_fname = None
input_t2_data = None
input_level_fname = None
input_ref_gm_seg = None
compute_ratio = False
if param.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
fname_input = param.path_model + "/errsm_34.nii.gz"
fname_input = param.path_model + "/errsm_34_seg_in.nii.gz"
else:
param_default = SegmentationParam()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
input_target_fname = arguments["-i"]
input_sc_seg_fname = arguments["-s"]
if "-model" in arguments:
param.path_model = arguments["-model"]
param.todo_model = 'load'
param.output_path = sct.slash_at_the_end(arguments["-ofolder"], slash=1)
if "-t2" in arguments:
input_t2_data = arguments["-t2"]
if "-vert" in arguments:
input_level_fname = arguments["-vert"]
if "-use-levels" in arguments:
param.use_levels = bool(int(arguments["-use-levels"]))
if "-weight" in arguments:
param.weight_gamma = arguments["-weight"]
if "-denoising" in arguments:
param.target_denoising = bool(int(arguments["-denoising"]))
if "-normalize" in arguments:
param.target_normalization = bool(int(arguments["-normalize"]))
if "-means" in arguments:
param.target_means = arguments["-means"]
if "-ratio" in arguments:
if arguments["-ratio"] == '0':
compute_ratio = False
else:
compute_ratio = arguments["-ratio"]
if "-ratio-level" in arguments:
if arguments["-ratio-level"] == '0':
compute_ratio = False
else:
if ':' in arguments["-ratio-level"]:
compute_ratio = arguments["-ratio-level"]
else:
sct.printv('WARNING: -ratio-level function should be used with a range of vertebral levels (for ex: "C2:C5"). Ignoring option.', 1, 'warning')
if "-res-type" in arguments:
param.res_type = arguments["-res-type"]
if "-ref" in arguments:
input_ref_gm_seg = arguments["-ref"]
param.verbose = int(arguments["-v"])
param.qc = int(arguments["-qc"])
param.remove_tmp = int(arguments["-r"])
if input_level_fname is None and input_t2_data is None:
param.use_levels = False
param.weight_gamma = 0
gmsegfull = FullGmSegmentation(input_target_fname, input_sc_seg_fname, input_t2_data, input_level_fname, ref_gm_seg=input_ref_gm_seg, compute_ratio=compute_ratio, param=param)
REF: cleaner code for preprocessing
Former-commit-id: d68e727a1af2b66ccfe7092ec166183c6f143811
Former-commit-id: 26ad6e6008a8391e24f9bd1081d9e84612d9df4c
#!/usr/bin/env python
#
# This program returns the gray matter segmentation given anatomical, spinal cord segmentation and t2star images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Sara Dupont
# Modified: 2015-05-20
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sct_utils as sct
import os
import time
import sys
import getopt
from msct_parser import *
from msct_image import Image, get_dimension
import random
from msct_multiatlas_seg import Model, SegmentationParam, GMsegSupervisedMethod
from msct_gmseg_utils import *
from sct_image import set_orientation, get_orientation, orientation,pad_image
import shutil
class Preprocessing:
def __init__(self, target_fname, sc_seg_fname, tmp_dir='', t2_data=None, level_fname=None, denoising=True):
# initiate de file names and copy the files into the temporary directory
self.original_target = 'target.nii.gz'
self.original_sc_seg = 'target_sc_seg.nii.gz'
self.resample_to = 0.3
self.tmp_dir = tmp_dir
self.denoising = denoising
if level_fname is not None:
t2_data = None
level_fname_nii = check_file_to_niigz(level_fname)
if level_fname_nii:
path_level, file_level, ext_level = sct.extract_fname(level_fname_nii)
self.fname_level = file_level + ext_level
sct.run('cp ' + level_fname_nii + ' ' + tmp_dir + '/' + self.fname_level)
else:
self.fname_level = None
if t2_data is not None:
self.t2 = 't2.nii.gz'
self.t2_seg = 't2_seg.nii.gz'
self.t2_landmarks = 't2_landmarks.nii.gz'
else:
self.t2 = self.t2_seg = self.t2_landmarks = None
# processes:
self.copy_to_tmp(target_fname=target_fname, sc_seg_fname=sc_seg_fname, t2_data=t2_data)
def copy_to_tmp(self, target_fname, sc_seg_fname, t2_data=None):
sct.run('cp ' + target_fname + ' ' + self.tmp_dir + '/' + self.original_target)
sct.run('cp ' + sc_seg_fname + ' ' + self.tmp_dir + '/' + self.original_sc_seg)
if self.t2 is not None:
sct.run('cp ' + t2_data[0] + ' ' + self.tmp_dir + '/' + self.t2)
sct.run('cp ' + t2_data[1] + ' ' + self.tmp_dir + '/' + self.t2_seg)
sct.run('cp ' + t2_data[2] + ' ' + self.tmp_dir + '/' + self.t2_landmarks)
def process(self):
# preprocessing
os.chdir(self.tmp_dir)
im_target = Image(self.original_target)
im_sc_seg = Image(self.original_sc_seg)
self.original_header = im_target.hdr
self.original_orientation = im_target.orientation
index_x = self.original_orientation.find('R') if 'R' in self.original_orientation else self.original_orientation.find('L')
index_y = self.original_orientation.find('P') if 'P' in self.original_orientation else self.original_orientation.find('A')
index_z = self.original_orientation.find('I') if 'I' in self.original_orientation else self.original_orientation.find('S')
# resampling of the images
nx, ny, nz, nt, px, py, pz, pt = im_target.dim
pix_dim = [px, py, pz]
self.original_px = pix_dim[index_x]
self.original_py = pix_dim[index_y]
if round(self.original_px, 2) != self.resample_to or round(self.original_py, 2) != self.resample_to:
self.t2star = resample_image(self.original_target, npx=self.resample_to, npy=self.resample_to)
self.sc_seg = resample_image(self.original_sc_seg, binary=True, npx=self.resample_to, npy=self.resample_to)
# denoising (optional)
im_target = Image(self.t2star)
if self.denoising:
from sct_maths import denoise_ornlm
im_target.data = denoise_ornlm(im_target.data)
im_target.save()
self.t2star = im_target.file_name + im_target.ext
box_size = int(22.5/self.resample_to)
# Pad in case the spinal cord is too close to the edges
pad_size = box_size/2 + 2
self.pad = [str(pad_size)]*3
self.pad[index_z] = str(0)
t2star_pad = sct.add_suffix(self.t2star, '_pad')
sc_seg_pad = sct.add_suffix(self.sc_seg, '_pad')
sct.run('sct_image -i '+self.t2star+' -pad '+self.pad[0]+','+self.pad[1]+','+self.pad[2]+' -o '+t2star_pad)
sct.run('sct_image -i '+self.sc_seg+' -pad '+self.pad[0]+','+self.pad[1]+','+self.pad[2]+' -o '+sc_seg_pad)
self.t2star = t2star_pad
self.sc_seg = sc_seg_pad
# put data in RPI
t2star_rpi = sct.add_suffix(self.t2star, '_RPI')
sc_seg_rpi = sct.add_suffix(self.sc_seg, '_RPI')
sct.run('sct_image -i '+self.t2star+' -setorient RPI -o '+t2star_rpi)
sct.run('sct_image -i '+self.sc_seg+' -setorient RPI -o '+sc_seg_rpi)
self.t2star = t2star_rpi
self.sc_seg = sc_seg_rpi
self.square_mask, self.processed_target = crop_t2_star(self.t2star, self.sc_seg, box_size=box_size)
if self.t2 is not None:
self.fname_level = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
elif self.fname_level is not None:
level_orientation = get_orientation(self.fname_level, filename=True)
if level_orientation != 'IRP':
self.fname_level = set_orientation(self.fname_level, 'IRP', filename=True)
os.chdir('..')
class FullGmSegmentation:
def __init__(self, target_fname, sc_seg_fname, t2_data, level_fname, ref_gm_seg=None, model=None, compute_ratio=False, param=None):
before = time.time()
self.param = param
sct.printv('\nBuilding the appearance model...', verbose=self.param.verbose, type='normal')
if model is None:
self.model = Model(model_param=self.param, k=0.8)
else:
self.model = model
sct.printv('\n--> OK !', verbose=self.param.verbose, type='normal')
self.target_fname = check_file_to_niigz(target_fname)
self.sc_seg_fname = check_file_to_niigz(sc_seg_fname)
self.t2_data = t2_data
if level_fname is not None:
self.level_fname = check_file_to_niigz(level_fname)
else:
self.level_fname = level_fname
self.ref_gm_seg_fname = ref_gm_seg
self.tmp_dir = 'tmp_' + sct.extract_fname(self.target_fname)[1] + '_' + time.strftime("%y%m%d%H%M%S")+ '_'+str(random.randint(1, 1000000))+'/'
sct.run('mkdir ' + self.tmp_dir)
self.gm_seg = None
self.res_names = {}
self.dice_name = None
self.hausdorff_name = None
self.segmentation_pipeline()
# Generate output files:
for res_fname in self.res_names.values():
sct.generate_output_file(self.tmp_dir+res_fname, self.param.output_path+res_fname)
if self.ref_gm_seg_fname is not None:
sct.generate_output_file(self.tmp_dir+self.dice_name, self.param.output_path+self.dice_name)
sct.generate_output_file(self.tmp_dir+self.hausdorff_name, self.param.output_path+self.hausdorff_name)
if compute_ratio:
sct.generate_output_file(self.tmp_dir+self.ratio_name, self.param.output_path+self.ratio_name)
after = time.time()
sct.printv('Done! (in ' + str(after-before) + ' sec) \nTo see the result, type :')
if self.param.res_type == 'binary':
wm_col = 'Red'
gm_col = 'Blue'
b = '0,1'
else:
wm_col = 'Blue-Lightblue'
gm_col = 'Red-Yellow'
b = '0.3,1'
sct.printv('fslview ' + self.target_fname + ' '+self.param.output_path+self.res_names['wm_seg']+' -l '+wm_col+' -t 0.4 -b '+b+' '+self.param.output_path+self.res_names['gm_seg']+' -l '+gm_col+' -t 0.4 -b '+b+' &', param.verbose, 'info')
if self.param.qc:
# output QC image
im = Image(self.target_fname)
im_gmseg = Image(self.param.output_path+self.res_names['gm_seg'])
im.save_quality_control(plane='axial', n_slices=5, seg=im_gmseg, thr=float(b.split(',')[0]), cmap_col='red-yellow', path_output=self.param.output_path)
if self.param.remove_tmp:
sct.printv('Remove temporary folder ...', self.param.verbose, 'normal')
sct.run('rm -rf '+self.tmp_dir)
# ------------------------------------------------------------------------------------------------------------------
def segmentation_pipeline(self):
sct.printv('\nDoing target pre-processing ...', verbose=self.param.verbose, type='normal')
self.preprocessed = Preprocessing(self.target_fname, self.sc_seg_fname, tmp_dir=self.tmp_dir, t2_data=self.t2_data, level_fname=self.level_fname, denoising=self.param.target_denoising)
self.preprocessed.process()
os.chdir(self.tmp_dir)
if self.preprocessed.fname_level is not None:
self.level_to_use = self.preprocessed.fname_level
else:
self.level_to_use = None
sct.printv('\nDoing target gray matter segmentation ...', verbose=self.param.verbose, type='normal')
self.gm_seg = GMsegSupervisedMethod(self.preprocessed.processed_target, self.level_to_use, self.model, gm_seg_param=self.param)
sct.printv('\nDoing result post-processing ...', verbose=self.param.verbose, type='normal')
self.post_processing()
if self.ref_gm_seg_fname is not None:
os.chdir('..')
ref_gmseg = 'ref_gmseg.nii.gz'
sct.run('cp ' + self.ref_gm_seg_fname + ' ' + self.tmp_dir + '/' + ref_gmseg)
os.chdir(self.tmp_dir)
sct.printv('Computing Dice coefficient and Hausdorff distance ...', verbose=self.param.verbose, type='normal')
self.dice_name, self.hausdorff_name = self.validation(ref_gmseg)
if compute_ratio:
sct.printv('\nComputing ratio GM/WM ...', verbose=self.param.verbose, type='normal')
self.ratio_name = self.compute_ratio(type=compute_ratio)
os.chdir('..')
# ------------------------------------------------------------------------------------------------------------------
def post_processing(self):
square_mask = Image(self.preprocessed.square_mask)
tmp_res_names = []
for res_im in [self.gm_seg.res_wm_seg, self.gm_seg.res_gm_seg, self.gm_seg.corrected_wm_seg]:
res_im_original_space = inverse_square_crop(res_im, square_mask)
res_im_original_space.save()
res_im_original_space = set_orientation(res_im_original_space, self.preprocessed.original_orientation)
res_im_original_space.save()
res_fname_original_space = res_im_original_space.file_name
ext = res_im_original_space.ext
# crop from the same pad size
output_crop = res_fname_original_space+'_crop'
sct.run('sct_crop_image -i '+res_fname_original_space+ext+' -dim 0,1,2 -start '+self.preprocessed.pad[0]+','+self.preprocessed.pad[1]+','+self.preprocessed.pad[2]+' -end -'+self.preprocessed.pad[0]+',-'+self.preprocessed.pad[1]+',-'+self.preprocessed.pad[2]+' -o '+output_crop+ext)
res_fname_original_space = output_crop
target_path, target_name, target_ext = sct.extract_fname(self.target_fname)
res_name = target_name + res_im.file_name[len(sct.extract_fname(self.preprocessed.processed_target)[1]):] + '.nii.gz'
if self.param.res_type == 'binary':
bin = True
else:
bin = False
old_res_name = resample_image(res_fname_original_space+ext, npx=self.preprocessed.original_px, npy=self.preprocessed.original_py, binary=bin)
if self.param.res_type == 'prob':
# sct.run('fslmaths ' + old_res_name + ' -thr 0.05 ' + old_res_name)
sct.run('sct_maths -i ' + old_res_name + ' -thr 0.05 -o ' + old_res_name)
sct.run('cp ' + old_res_name + ' '+res_name)
tmp_res_names.append(res_name)
self.res_names['wm_seg'] = tmp_res_names[0]
self.res_names['gm_seg'] = tmp_res_names[1]
self.res_names['corrected_wm_seg'] = tmp_res_names[2]
# ------------------------------------------------------------------------------------------------------------------
def compute_ratio(self, type='slice'):
from numpy import mean, nonzero
from math import isnan
ratio_dir = 'ratio/'
sct.run('mkdir '+ratio_dir)
if type is not 'slice':
assert self.preprocessed.fname_level is not None, 'No vertebral level information, you cannot compute GM/WM ratio per vertebral level.'
levels = [int(round(mean(dat[nonzero(dat)]), 0)) if not isnan(mean(dat[nonzero(dat)])) else 0 for dat in Image(self.preprocessed.fname_level).data]
csa_gm_wm_by_level = {}
for l in levels:
csa_gm_wm_by_level[l] = []
gm_seg = 'res_gmseg.nii.gz'
wm_seg = 'res_wmseg.nii.gz'
sct.run('cp '+self.res_names['gm_seg']+' '+ratio_dir+gm_seg)
sct.run('cp '+self.res_names['corrected_wm_seg']+' '+ratio_dir+wm_seg)
# go to ratio folder
os.chdir(ratio_dir)
sct.run('sct_process_segmentation -i '+gm_seg+' -p csa -o gm_csa ', error_exit='warning')
sct.run('mv csa.txt gm_csa.txt')
sct.run('sct_process_segmentation -i '+wm_seg+' -p csa -o wm_csa ', error_exit='warning')
sct.run('mv csa.txt wm_csa.txt')
gm_csa = open('gm_csa.txt', 'r')
wm_csa = open('wm_csa.txt', 'r')
ratio_fname = 'ratio.txt'
ratio = open('../'+ratio_fname, 'w')
gm_lines = gm_csa.readlines()
wm_lines = wm_csa.readlines()
gm_csa.close()
wm_csa.close()
ratio.write(type+' , ratio GM/WM \n')
for gm_line, wm_line in zip(gm_lines, wm_lines):
i, gm_area = gm_line.split(',')
j, wm_area = wm_line.split(',')
assert i == j
if type is not 'slice':
csa_gm_wm_by_level[levels[int(i)]].append((float(gm_area), float(wm_area)))
else:
ratio.write(i+','+str(float(gm_area)/float(wm_area))+'\n')
if type == 'level':
for l, gm_wm_list in sorted(csa_gm_wm_by_level.items()):
csa_gm_list = []
csa_wm_list = []
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = mean(csa_gm_list)
csa_wm = mean(csa_wm_list)
ratio.write(str(l)+','+str(csa_gm/csa_wm)+'\n')
elif type is not 'slice':
li, lf = type.split(':')
level_str_to_int = {'C1': 1, 'C2': 2, 'C3': 3, 'C4': 4, 'C5': 5, 'C6': 6, 'C7': 7, 'T1': 8, 'T2': 9}
li = level_str_to_int[li]
lf = level_str_to_int[lf]
csa_gm_list = []
csa_wm_list = []
for l in range(li, lf+1):
gm_wm_list = csa_gm_wm_by_level[l]
for gm, wm in gm_wm_list:
csa_gm_list.append(gm)
csa_wm_list.append(wm)
csa_gm = mean(csa_gm_list)
csa_wm = mean(csa_wm_list)
ratio.write(type+','+str(csa_gm/csa_wm)+'\n')
ratio.close()
os.chdir('..')
return ratio_fname
# ------------------------------------------------------------------------------------------------------------------
def validation(self, ref_gmseg):
ext = '.nii.gz'
validation_dir = 'validation/'
sct.run('mkdir ' + validation_dir)
gm_seg = 'res_gmseg.nii.gz'
wm_seg = 'res_wmseg.nii.gz'
# Copy images to the validation folder
sct.run('cp '+ref_gmseg+' '+validation_dir+ref_gmseg)
sct.run('cp '+self.preprocessed.original_sc_seg+' '+validation_dir+self.preprocessed.original_sc_seg)
sct.run('cp '+self.res_names['gm_seg']+' '+validation_dir+gm_seg)
sct.run('cp '+self.res_names['wm_seg']+' '+validation_dir+wm_seg)
# go to validation folder
os.chdir(validation_dir)
# get reference WM segmentation from SC segmentation and reference GM segmentation
ref_wmseg = 'ref_wmseg.nii.gz'
sct.run('sct_maths -i '+self.preprocessed.original_sc_seg+' -sub '+ref_gmseg+' -o '+ref_wmseg)
# Binarize results if it was probabilistic results
if self.param.res_type == 'prob':
sct.run('sct_maths -i '+gm_seg+' -thr 0.5 -o '+gm_seg)
sct.run('sct_maths -i '+wm_seg+' -thr 0.4999 -o '+wm_seg)
sct.run('sct_maths -i '+gm_seg+' -bin -o '+gm_seg)
sct.run('sct_maths -i '+wm_seg+' -bin -o '+wm_seg)
# Compute Dice coefficient
try:
status_gm, output_gm = sct.run('sct_dice_coefficient -i '+ref_gmseg+' -d '+gm_seg+' -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
# put the result and the reference in the same space using a registration with ANTs with no iteration:
corrected_ref_gmseg = sct.extract_fname(ref_gmseg)[1]+'_in_res_space'+ext
sct.run('isct_antsRegistration -d 3 -t Translation[0] -m MI['+gm_seg+','+ref_gmseg+',1,16] -o [reg_ref_to_res,'+corrected_ref_gmseg+'] -n BSpline[3] -c 0 -f 1 -s 0')
sct.run('sct_maths -i '+corrected_ref_gmseg+' -thr 0.1 -o '+corrected_ref_gmseg)
sct.run('sct_maths -i '+corrected_ref_gmseg+' -bin -o '+corrected_ref_gmseg)
status_gm, output_gm = sct.run('sct_dice_coefficient -i '+corrected_ref_gmseg+' -d '+gm_seg+' -2d-slices 2', error_exit='warning')
try:
status_wm, output_wm = sct.run('sct_dice_coefficient -i '+ref_wmseg+' -d '+wm_seg+' -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
# put the result and the reference in the same space using a registration with ANTs with no iteration:
corrected_ref_wmseg = sct.extract_fname(ref_wmseg)[1]+'_in_res_space'+ext
sct.run('isct_antsRegistration -d 3 -t Translation[0] -m MI['+wm_seg+','+ref_wmseg+',1,16] -o [reg_ref_to_res,'+corrected_ref_wmseg+'] -n BSpline[3] -c 0 -f 1 -s 0')
sct.run('sct_maths -i '+corrected_ref_wmseg+' -thr 0.1 -o '+corrected_ref_wmseg)
sct.run('sct_maths -i '+corrected_ref_wmseg+' -bin -o '+corrected_ref_wmseg)
status_wm, output_wm = sct.run('sct_dice_coefficient -i '+corrected_ref_wmseg+' -d '+wm_seg+' -2d-slices 2', error_exit='warning')
dice_name = 'dice_' + sct.extract_fname(self.target_fname)[1] + '_' + self.param.res_type + '.txt'
dice_fic = open('../'+dice_name, 'w')
if self.param.res_type == 'prob':
dice_fic.write('WARNING : the probabilistic segmentations were binarized with a threshold at 0.5 to compute the dice coefficient \n')
dice_fic.write('\n--------------------------------------------------------------\n'
'Dice coefficient on the Gray Matter segmentation:\n')
dice_fic.write(output_gm)
dice_fic.write('\n\n--------------------------------------------------------------\n'
'Dice coefficient on the White Matter segmentation:\n')
dice_fic.write(output_wm)
dice_fic.close()
# Compute Hausdorff distance
hd_name = 'hd_' + sct.extract_fname(self.target_fname)[1] + '_' + self.param.res_type + '.txt'
sct.run('sct_compute_hausdorff_distance -i '+gm_seg+' -d '+ref_gmseg+' -thinning 1 -o '+hd_name+' -v '+str(self.param.verbose))
sct.run('mv ./' + hd_name + ' ../')
os.chdir('..')
return dice_name, hd_name
########################################################################################################################
# ------------------------------------------------------ MAIN ------------------------------------------------------- #
########################################################################################################################
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Segmentation of the white/gray matter on a T2star or MT image\n'
'Multi-Atlas based method: the model containing a template of the white/gray matter segmentation along the cervical spinal cord, and a PCA space to describe the variability of intensity in that template is provided in the toolbox. ')
parser.add_option(name="-i",
type_value="file",
description="Target image to segment",
mandatory=True,
example='t2star.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation of the target",
mandatory=True,
example='sc_seg.nii.gz')
parser.usage.addSection('STRONGLY RECOMMENDED ARGUMENTS\n'
'Choose one of them')
parser.add_option(name="-vert",
type_value="file",
description="Image containing level labels for the target"
"If -l is used, no need to provide t2 data",
mandatory=False,
example='MNI-Poly-AMU_level_IRP.nii.gz')
parser.add_option(name="-l",
type_value=None,
description="Image containing level labels for the target"
"If -l is used, no need to provide t2 data",
mandatory=False,
deprecated_by='-vert')
parser.add_option(name="-t2",
type_value=[[','], 'file'],
description="T2 data associated to the input image : used to register the template on the T2star and get the vertebral levels\n"
"In this order, without whitespace : t2_image,t2_sc_segmentation,t2_landmarks\n(see: http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/)",
mandatory=False,
default_value=None,
example='t2.nii.gz,t2_seg.nii.gz,landmarks.nii.gz')
parser.usage.addSection('SEGMENTATION OPTIONS')
parser.add_option(name="-use-levels",
type_value='multiple_choice',
description="Use the level information for the model or not",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-weight",
type_value='float',
description="weight parameter on the level differences to compute the similarities (beta)",
mandatory=False,
default_value=2.5,
example=2.0)
parser.add_option(name="-denoising",
type_value='multiple_choice',
description="1: Adaptative denoising from F. Coupe algorithm, 0: no WARNING: It affects the model you should use (if denoising is applied to the target, the model should have been coputed with denoising too",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-normalize",
type_value='multiple_choice',
description="Normalization of the target image's intensity using median intensity values of the WM and the GM, recomended with MT images or other types of contrast than T2*",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-medians",
type_value=[[','], 'float'],
description="Median intensity values in the target white matter and gray matter (separated by a comma without white space)\n"
"If not specified, the mean intensity values of the target WM and GM are estimated automatically using the dictionary average segmentation by level.\n"
"Only if the -normalize flag is used",
mandatory=False,
default_value=None,
example=["450,540"])
parser.add_option(name="-model",
type_value="folder",
description="Path to the model data",
mandatory=False,
example='/home/jdoe/gm_seg_model_data/')
parser.usage.addSection('OUTPUT OTIONS')
parser.add_option(name="-res-type",
type_value='multiple_choice',
description="Type of result segmentation : binary or probabilistic",
mandatory=False,
default_value='prob',
example=['binary', 'prob'])
parser.add_option(name="-ratio",
type_value='multiple_choice',
description="Compute GM/WM ratio by slice or by vertebral level (average across levels)",
mandatory=False,
default_value='0',
example=['0', 'slice', 'level'])
parser.add_option(name="-ratio-level",
type_value='str',
description="Compute GM/WM ratio across several vertebral levels.",
mandatory=False,
default_value='0',
example='C2:C4')
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder",
mandatory=False,
default_value='./',
example='gm_segmentation_results/')
parser.add_option(name="-ref",
type_value="file",
description="Reference segmentation of the gray matter for segmentation validation (outputs Dice coefficient and Hausdoorff's distance)",
mandatory=False,
example='manual_gm_seg.nii.gz')
parser.usage.addSection('MISC')
parser.add_option(name='-qc',
type_value='multiple_choice',
description='Output images for quality control.',
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-r",
type_value="multiple_choice",
description='Remove temporary files.',
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
if __name__ == "__main__":
param = SegmentationParam()
input_target_fname = None
input_sc_seg_fname = None
input_t2_data = None
input_level_fname = None
input_ref_gm_seg = None
compute_ratio = False
if param.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
fname_input = param.path_model + "/errsm_34.nii.gz"
fname_input = param.path_model + "/errsm_34_seg_in.nii.gz"
else:
param_default = SegmentationParam()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
input_target_fname = arguments["-i"]
input_sc_seg_fname = arguments["-s"]
if "-model" in arguments:
param.path_model = arguments["-model"]
param.todo_model = 'load'
param.output_path = sct.slash_at_the_end(arguments["-ofolder"], slash=1)
if "-t2" in arguments:
input_t2_data = arguments["-t2"]
if "-vert" in arguments:
input_level_fname = arguments["-vert"]
if "-use-levels" in arguments:
param.use_levels = bool(int(arguments["-use-levels"]))
if "-weight" in arguments:
param.weight_gamma = arguments["-weight"]
if "-denoising" in arguments:
param.target_denoising = bool(int(arguments["-denoising"]))
if "-normalize" in arguments:
param.target_normalization = bool(int(arguments["-normalize"]))
if "-means" in arguments:
param.target_means = arguments["-means"]
if "-ratio" in arguments:
if arguments["-ratio"] == '0':
compute_ratio = False
else:
compute_ratio = arguments["-ratio"]
if "-ratio-level" in arguments:
if arguments["-ratio-level"] == '0':
compute_ratio = False
else:
if ':' in arguments["-ratio-level"]:
compute_ratio = arguments["-ratio-level"]
else:
sct.printv('WARNING: -ratio-level function should be used with a range of vertebral levels (for ex: "C2:C5"). Ignoring option.', 1, 'warning')
if "-res-type" in arguments:
param.res_type = arguments["-res-type"]
if "-ref" in arguments:
input_ref_gm_seg = arguments["-ref"]
param.verbose = int(arguments["-v"])
param.qc = int(arguments["-qc"])
param.remove_tmp = int(arguments["-r"])
if input_level_fname is None and input_t2_data is None:
param.use_levels = False
param.weight_gamma = 0
gmsegfull = FullGmSegmentation(input_target_fname, input_sc_seg_fname, input_t2_data, input_level_fname, ref_gm_seg=input_ref_gm_seg, compute_ratio=compute_ratio, param=param)
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable-msg=C0301
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_LANG, \
CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_CERN_SITE
import cPickle
from zlib import compress
from invenio.messages import gettext_set_language
class InvenioWebAccessFireroleError(Exception):
"""Just an Exception to discover if it's a FireRole problem"""
pass
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py access_control_admin.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
""" Serialized compiled default role definition"""
CFG_ACC_EMPTY_ROLE_DEFINITION_SER=compress(cPickle.dumps((False, False, ()), -1))
""" Source of the default role definition"""
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC='deny any'
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
if CFG_CERN_SITE:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS=['859__f', '270__m']
else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS=['8560_f']
# Use external source for access control?
# Atleast one must be added
# Adviced not to change the name, since it is used to identify the account
# Format is: System name: (System class, Default True/Flase), atleast one
# must be default
CFG_EXTERNAL_AUTHENTICATION = {"Local": (None, True)}
# Variables to set to the SSO Authentication name if using SSO
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
if CFG_CERN_SITE:
if False: #FIXME set this to True when we deploy SSO at CERN
import external_authentication_sso as ea_sso
CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
# Link to reach in order to logout from SSO
CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
CFG_EXTERNAL_AUTHENTICATION = {CFG_EXTERNAL_AUTH_USING_SSO : (ea_sso.ExternalAuthSSO(), True)}
else:
import external_authentication_cern as ea_cern
CFG_EXTERNAL_AUTHENTICATION = {"Local": (None, False), \
"CERN": (ea_cern.ExternalAuthCern(), True)}
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'))
# Demo site roles
DEF_DEMO_ROLES = (('photocurator', 'Photo collection curator', 'deny any'),
('thesesviewer', 'Theses viewer', 'allow group "Theses viewers"\nallow apache_group "theses"'),
('thesescurator', 'Theses collection curator', 'deny any'),
('bookcurator', 'Book collection curator', 'deny any'),
('restrictedpicturesviewer', 'Restricted pictures viewer', 'deny any'),
('curator', 'Curator', 'deny any'),
('basketusers', 'User who can use baskets', 'deny email "hyde@cds.cern.ch"\nallow any'))
DEF_DEMO_USER_ROLES = (('jekyll@cds.cern.ch', 'thesesviewer'),
('dorian.gray@cds.cern.ch', 'bookcurator'),
('balthasar.montague@cds.cern.ch', 'curator'),
('romeo.montague@cds.cern.ch', 'restrictedpicturesviewer'),
('romeo.montague@cds.cern.ch', 'thesescurator'),
('juliet.capulet@cds.cern.ch', 'restrictedpicturesviewer'),
('juliet.capulet@cds.cern.ch', 'photocurator'))
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgbibharvest', 'configure BibHarvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoaiarchive', 'run oaiarchive task', '', 'no'),
('runbibedit', 'run BibEdit', 'collection', 'yes'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name', 'yes'),
('viewcomment', 'view comments', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('fulltext', 'administrate Fulltext', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes')
)
# Default authorizations
# role action arglistid optional arguments
DEF_AUTHS = (('basketusers', 'usebaskets', -1, 0, {}),
('loanusers', 'useloans', -1, 0, {}),
('groupusers', 'usegroups', -1, 0, {}),
('alertusers', 'usealerts', -1, 0, {}),
('messageusers', 'usemessages', -1, 0, {}),
('holdingsusers', 'viewholdings', -1, 1, {}),
('statisticsusers', 'viewstatistics', -1, 1, {}))
# Demo site authorizations
# role action arglistid optional arguments
DEF_DEMO_AUTHS = (
('photocurator', 'runwebcoll', -1, 0, {'collection': 'Pictures'}),
('restrictedpicturesviewer', 'viewrestrdoc', -1, 0, {'status': 'restricted_picture'}),
('thesesviewer', VIEWRESTRCOLL, -1, 0, {'collection': 'Theses'}),
('bookcurator', 'referee', -1, 0, {'doctype': 'DEMOBOO', 'categ': '*'}),
('curator', 'runbibedit', -1, 1, {}),
('thesescurator', 'runbibedit', -1, 0, {'collection': 'Theses'}),
('thesescurator', VIEWRESTRCOLL, -1, 0, {'collection': 'Theses'}),
('photocurator', 'runbibedit', -1, 0, {'collection': 'Pictures'}),
('bookcurator', 'runbibedit', -1, 0, {'collection': 'Books'})
)
_ = gettext_set_language(CFG_SITE_LANG)
# Activities (i.e. actions) for which it exist an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run BibEdit"), "%s/record/edit/?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibharvest' : (_("Configure BibHarvest"), "%s/admin/bibharvest/bibharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/bibharvest/oaiarchiveadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed'
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'Error(1): You are not authorized to perform this action.',
2: 'Error(2): You are not authorized to perform any action.',
3: 'Error(3): The action %s does not exist.',
4: 'Error(4): Unexpected error occurred.',
5: 'Error(5): Missing mandatory keyword argument(s) for this action.',
6: 'Error(6): Guest accounts are not authorized to perform this action.',
7: 'Error(7): Not enough arguments, user ID and action name required.',
8: 'Error(8): Incorrect keyword argument(s) for this action.',
9: """Error(9): Account '%s' is not yet activated.""",
10: """Error(10): You were not authorized by the authentication method '%s'.""",
11: """Error(11): The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Error(12): Selected login method '%s' does not exist.""",
13: """Error(13): Could not register '%s' account.""",
14: """Error(14): Could not login using '%s', because this user is unknown.""",
15: """Error(15): Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """Error(16): External authentication troubles using '%s' (maybe temporary network problems).""",
17: """Error(17): You have not yet confirmed the email address for the '%s' authentication method.""",
18: """Error(18): The administrator has not yet activated your account for the '%s' authentication method.""",
19: """Error(19): The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible."""
}
WebSubmit: more support for FCKeditor attachments
* Parses the input fields for links to files uploaded via FCKeditor,
attach these files to the record via FFT, and replaces the links
with the newly created local URLs (/recid/files/...).
* New 'attachsubmissionfile' action for allowing upload of files
with FCKeditor during submission.
* Moved FCKConfig.EditorAreaCSS and FCKConfig.EnterMode variable to
the config file so that they can be customized if needed.
* Use FCKeditor version 2.6.4.
* Uploaded FCKeditor files go to /var/tmp/ instead of /var/data/, so
that they can be collected via FFT.
* Get temporary files uploaded by FCKeditor from
CFG_SITE_URL/submit/getattachedfile/ instead of
CFG_SITE_URL/submit/getfile/
* Added Move_FCKeditor_Files_to_Storage.py function.
* Fixed docstrings.
* Try to create icon only if user is authorized to upload files.
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable-msg=C0301
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_LANG, \
CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_CERN_SITE
import cPickle
from zlib import compress
from invenio.messages import gettext_set_language
class InvenioWebAccessFireroleError(Exception):
"""Just an Exception to discover if it's a FireRole problem"""
pass
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py access_control_admin.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
""" Serialized compiled default role definition"""
CFG_ACC_EMPTY_ROLE_DEFINITION_SER=compress(cPickle.dumps((False, False, ()), -1))
""" Source of the default role definition"""
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC='deny any'
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
if CFG_CERN_SITE:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS=['859__f', '270__m']
else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS=['8560_f']
# Use external source for access control?
# Atleast one must be added
# Adviced not to change the name, since it is used to identify the account
# Format is: System name: (System class, Default True/Flase), atleast one
# must be default
CFG_EXTERNAL_AUTHENTICATION = {"Local": (None, True)}
# Variables to set to the SSO Authentication name if using SSO
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
if CFG_CERN_SITE:
if False: #FIXME set this to True when we deploy SSO at CERN
import external_authentication_sso as ea_sso
CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
# Link to reach in order to logout from SSO
CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
CFG_EXTERNAL_AUTHENTICATION = {CFG_EXTERNAL_AUTH_USING_SSO : (ea_sso.ExternalAuthSSO(), True)}
else:
import external_authentication_cern as ea_cern
CFG_EXTERNAL_AUTHENTICATION = {"Local": (None, False), \
"CERN": (ea_cern.ExternalAuthCern(), True)}
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'))
# Demo site roles
DEF_DEMO_ROLES = (('photocurator', 'Photo collection curator', 'deny any'),
('thesesviewer', 'Theses viewer', 'allow group "Theses viewers"\nallow apache_group "theses"'),
('thesescurator', 'Theses collection curator', 'deny any'),
('bookcurator', 'Book collection curator', 'deny any'),
('restrictedpicturesviewer', 'Restricted pictures viewer', 'deny any'),
('curator', 'Curator', 'deny any'),
('basketusers', 'User who can use baskets', 'deny email "hyde@cds.cern.ch"\nallow any'))
DEF_DEMO_USER_ROLES = (('jekyll@cds.cern.ch', 'thesesviewer'),
('dorian.gray@cds.cern.ch', 'bookcurator'),
('balthasar.montague@cds.cern.ch', 'curator'),
('romeo.montague@cds.cern.ch', 'restrictedpicturesviewer'),
('romeo.montague@cds.cern.ch', 'thesescurator'),
('juliet.capulet@cds.cern.ch', 'restrictedpicturesviewer'),
('juliet.capulet@cds.cern.ch', 'photocurator'))
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgbibharvest', 'configure BibHarvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoaiarchive', 'run oaiarchive task', '', 'no'),
('runbibedit', 'run BibEdit', 'collection', 'yes'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name', 'yes'),
('viewcomment', 'view comments', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('attachsubmissionfile', 'upload files to drop box during submission', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('fulltext', 'administrate Fulltext', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes')
)
# Default authorizations
# role action arglistid optional arguments
DEF_AUTHS = (('basketusers', 'usebaskets', -1, 0, {}),
('loanusers', 'useloans', -1, 0, {}),
('groupusers', 'usegroups', -1, 0, {}),
('alertusers', 'usealerts', -1, 0, {}),
('messageusers', 'usemessages', -1, 0, {}),
('holdingsusers', 'viewholdings', -1, 1, {}),
('statisticsusers', 'viewstatistics', -1, 1, {}))
# Demo site authorizations
# role action arglistid optional arguments
DEF_DEMO_AUTHS = (
('photocurator', 'runwebcoll', -1, 0, {'collection': 'Pictures'}),
('restrictedpicturesviewer', 'viewrestrdoc', -1, 0, {'status': 'restricted_picture'}),
('thesesviewer', VIEWRESTRCOLL, -1, 0, {'collection': 'Theses'}),
('bookcurator', 'referee', -1, 0, {'doctype': 'DEMOBOO', 'categ': '*'}),
('curator', 'runbibedit', -1, 1, {}),
('thesescurator', 'runbibedit', -1, 0, {'collection': 'Theses'}),
('thesescurator', VIEWRESTRCOLL, -1, 0, {'collection': 'Theses'}),
('photocurator', 'runbibedit', -1, 0, {'collection': 'Pictures'}),
('bookcurator', 'runbibedit', -1, 0, {'collection': 'Books'})
)
_ = gettext_set_language(CFG_SITE_LANG)
# Activities (i.e. actions) for which it exist an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run BibEdit"), "%s/record/edit/?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibharvest' : (_("Configure BibHarvest"), "%s/admin/bibharvest/bibharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/bibharvest/oaiarchiveadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed'
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'Error(1): You are not authorized to perform this action.',
2: 'Error(2): You are not authorized to perform any action.',
3: 'Error(3): The action %s does not exist.',
4: 'Error(4): Unexpected error occurred.',
5: 'Error(5): Missing mandatory keyword argument(s) for this action.',
6: 'Error(6): Guest accounts are not authorized to perform this action.',
7: 'Error(7): Not enough arguments, user ID and action name required.',
8: 'Error(8): Incorrect keyword argument(s) for this action.',
9: """Error(9): Account '%s' is not yet activated.""",
10: """Error(10): You were not authorized by the authentication method '%s'.""",
11: """Error(11): The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Error(12): Selected login method '%s' does not exist.""",
13: """Error(13): Could not register '%s' account.""",
14: """Error(14): Could not login using '%s', because this user is unknown.""",
15: """Error(15): Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """Error(16): External authentication troubles using '%s' (maybe temporary network problems).""",
17: """Error(17): You have not yet confirmed the email address for the '%s' authentication method.""",
18: """Error(18): The administrator has not yet activated your account for the '%s' authentication method.""",
19: """Error(19): The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible."""
}
|
import cPickle
from collections import OrderedDict
import json
from flask import Flask, request
import sys
from cnn_text_trainer.rw.datasets import clean_str
__author__ = 'devashish.shankar'
#General refactoring, comments, etc.
app = Flask(__name__)
app.config['SECRET_KEY'] = 'F34TF$($e34D'; #Required for flask server TODO check
@app.route('/healthcheck')
def healthcheck():
return json.dumps({})
@app.route('/')
def home():
#The tweet to classify
try:
tweet=request.args['text'].lower()
except Exception as e:
print "Error processing request. Improper format of request.args['text'] might be causing an issue. Returning empty array"
print "request.args['text'] = ",request.args['text']
return json.dumps({})
#The path to file containing the model
model=str(request.args['model'])
#Should the tweet be preprocessed
preprocess=str(request.args['preprocess']).lower()
#Lazily load the model
if model not in models:
print "Model not in memory: ",model
print "Loading model"
models[model]=cPickle.load(open(model,"rb"))
if(load_word_vecs):
print "Adding wordvecs"
models[model].add_global_word_vecs(wordvecs)
print "Done"
if preprocess == "True":
tweet = clean_str(tweet)
[y_pred,prob_pred] = models[model].classify([{'text':tweet}])
labels = models[model].labels
label_to_prob={}
for i in range(len(labels)):
label_to_prob[labels[i]]=prob_pred[0][i]
return json.dumps(label_to_prob)
import logging
# Log only in production mode.
if not app.debug:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
class LimitedSizeDict(OrderedDict):
def __init__(self, *args, **kwds):
self.size_limit = kwds.pop("size_limit", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.size_limit is not None:
while len(self) > self.size_limit:
self.popitem(last=False)
#In memory dictionary which will load all the models lazily
models=LimitedSizeDict(size_limit=10)
#In memory dictionary which will load all the word vectors lazily
wordvecs={}
load_word_vecs = False
if __name__ == "__main__":
if len(sys.argv)<4:
print "Usage: server.py"
print "\t<port number to deploy the app>"
print "\t<enable flask debug mode (true/false). >"
print "\t<load word vectors in memory (true/false). This will give accuracy gains, but will have a lot of memory pressure. If false, words not encountered during training are skipped while predicting >"
exit(0)
port=int(sys.argv[1])
debug = sys.argv[2].lower()=="true"
load_word_vecs = sys.argv[3].lower()=="true"
#run app..
app.run(debug=debug,host='0.0.0.0',port=port,threaded=True)
change load_word_vec to default true
import cPickle
from collections import OrderedDict
import json
from flask import Flask, request
import sys
from cnn_text_trainer.rw.datasets import clean_str
__author__ = 'devashish.shankar'
#General refactoring, comments, etc.
app = Flask(__name__)
app.config['SECRET_KEY'] = 'F34TF$($e34D'; #Required for flask server TODO check
@app.route('/healthcheck')
def healthcheck():
return json.dumps({})
@app.route('/')
def home():
#The tweet to classify
try:
tweet=request.args['text'].lower()
except Exception as e:
print "Error processing request. Improper format of request.args['text'] might be causing an issue. Returning empty array"
print "request.args['text'] = ",request.args['text']
return json.dumps({})
#The path to file containing the model
model=str(request.args['model'])
#Should the tweet be preprocessed
preprocess=str(request.args['preprocess']).lower()
#Lazily load the model
if model not in models:
print "Model not in memory: ",model
print "Loading model"
models[model]=cPickle.load(open(model,"rb"))
if(load_word_vecs):
print "Adding wordvecs"
models[model].add_global_word_vecs(wordvecs)
print "Done"
if preprocess == "True":
tweet = clean_str(tweet)
[y_pred,prob_pred] = models[model].classify([{'text':tweet}])
labels = models[model].labels
label_to_prob={}
for i in range(len(labels)):
label_to_prob[labels[i]]=prob_pred[0][i]
return json.dumps(label_to_prob)
import logging
# Log only in production mode.
if not app.debug:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
class LimitedSizeDict(OrderedDict):
def __init__(self, *args, **kwds):
self.size_limit = kwds.pop("size_limit", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.size_limit is not None:
while len(self) > self.size_limit:
self.popitem(last=False)
#In memory dictionary which will load all the models lazily
models=LimitedSizeDict(size_limit=10)
#In memory dictionary which will load all the word vectors lazily
wordvecs={}
load_word_vecs = True
if __name__ == "__main__":
if len(sys.argv)<4:
print "Usage: server.py"
print "\t<port number to deploy the app>"
print "\t<enable flask debug mode (true/false). >"
print "\t<load word vectors in memory (true/false). This will give accuracy gains, but will have a lot of memory pressure. If false, words not encountered during training are skipped while predicting >"
exit(0)
port=int(sys.argv[1])
debug = sys.argv[2].lower()=="true"
load_word_vecs = sys.argv[3].lower()=="true"
#run app..
app.run(debug=debug,host='0.0.0.0',port=port,threaded=True)
|
import re
import sys
import igraph
from termcolor import colored
import time
from . import tuples, pw, homopolymeric, ProgressIndicator, lib, ffi
class OverlapGraph(object):
"""Wraps an :class:`igraph.Graph` object with additional methods to build
and process an overlap graph.
Attributes:
iG (igraph.Graph): The graph object.
Args:
G (Optional[igraph.Graph]): The graph object to initialize with; no
processing is done and if the object is ``None`` a new directed
graph is instantiated.
"""
def __init__(self, G=None):
self.iG = G if G else igraph.Graph(directed=True)
assert(isinstance(self.iG, igraph.Graph))
self.v_highlight = '#b5ffb5'
self.e_highlight = '#00b400'
def _endpoint_names(self, eid):
"""Internal helper: igraph is not too happy when we munge vertex IDs
since it keeps renumbering them according to its memory allocation
scheme. Instead convert everything to "name"s which are the original
sequence IDs.
See: https://lists.nongnu.org/archive/html/igraph-help/2010-03/msg00078.html
"""
if isinstance(eid, igraph.Edge):
eid = eid.index
uid, vid = self.iG.es[eid].tuple
return self.iG.vs[uid]['name'], self.iG.vs[vid]['name']
def _vids_to_names(self, vids):
return [self.iG.vs[vid]['name'] for vid in vids]
def eid_to_str(self, eid, maxlen=50):
"""Prepares an edge for pretty printing. Truncates and paths the end
point labels (``name`` is used as label) to ensure they both have
length ``maxlen``.
"""
u, v = self._endpoint_names(eid)
u, v = u[:maxlen].ljust(maxlen), v[:maxlen].rjust(maxlen)
w = self.iG.es[eid]['weight']
w = ('+--[%.2f]-->' % w).ljust(20)
return '%s %s %s\n' % (u, w, v)
def break_cycles(self, method='ip'):
"""Removes a
`feedback arc set <https://en.wikipedia.org/wiki/Feedback_arc_set>`__
from the graph. Depending on the ``method`` the result may not be
optimal.
Keyword Args:
method (str): The FAS discovery algorithm; passed to
:func:`igraph.Graph.feedback_arc_set`. Default uses an
integer programming formulation which is guaranteed to be
optimal but is slow on large graphs. The alternative is
``eades`` which uses a suboptimal `heuristic
<http://www.sciencedirect.com/science/article/pii/002001909390079O>`__.
"""
if self.iG.is_dag():
return
rm = self.iG.feedback_arc_set(
weights=self.iG.es['weight'], method=method
)
for e in rm:
sys.stderr.write('removed edge: %s' % self.eid_to_str(e))
self.iG.delete_edges(rm)
def longest_path(self, exclude=[], equal_weights=False):
"""Finds the heaviest path (and potantially the longest path in the
sense of number of edges) of the graph, excluding vertices whose name
is included in ``exclude``. This, naturally requires that the graph is
acyclic. Assuming the graph is a DAG, we can find the longest path in
two steps:
- Find a topological ordering of the graph in :math:`O(|V|+|E|)` time,
- Find a heaviest path using the sorting in :math:`O(|V|)` time.
Keyword Arguments:
exclude (Optional[List[str]]): A list of vertex names to be
excluded from the graph when finding the longest path. This is
only of use to :func:`all_longest_paths`.
equal_weights (Optional[bool]): If truthy, all edges are considered
equal in which sense the solution is the literal longest path.
Returns:
list[str]: A list of vertex names in order of appearance in the
longest path.
"""
def weight_of_edge(u, v):
if equal_weights:
return 1
else:
return self.iG.es['weight'][self.iG.get_eid(u, v)]
sorting = self._vids_to_names(self.iG.topological_sorting())
sorting = [v for v in sorting if v not in exclude]
# longest paths ending at each vertex keyed by vertex. Each entry is a
# tuple of (<weight, from>) where `from` is any of the predecessors
# giving the maximum weight.
longest_paths = {}
for v in sorting:
if v in exclude:
continue
incoming = self._vids_to_names(self.iG.predecessors(v))
incoming = [x for x in incoming if x not in exclude]
if not incoming:
longest_paths[v] = (0, None)
else:
w = lambda x: longest_paths[x][0] + weight_of_edge(x, v)
cands = [(w(u), u) for u in incoming]
longest_paths[v] = sorted(
cands, key=lambda x: x[0], reverse=True
)[0]
if not longest_paths:
return []
# Find the terminal vertex of the longest path:
end = sorted(
longest_paths.items(), key=lambda x: x[1][0], reverse=True
)[0][0]
path = []
# Trace back the entire path:
while end and longest_paths:
path = [end] + path
end = longest_paths.pop(end)[1]
# Don't report trivial paths:
return path if len(path) > 1 else []
def all_longest_paths(self, equal_weights=False):
"""Repeatedly finds the longest path in the graph while excluding
vertices that are already included in a path. See :func:`longest_path`.
All keyword arguments are passed as-is to :func:`longest_path`.
Returns:
List[List[str]]: A list of paths, each a list of vertex names in
order of appearance in the path.
"""
paths = []
exclude = []
while True:
path = self.longest_path(exclude=exclude)
if not path:
break
paths += [path]
exclude += path
return paths
def layout(self, full=False, equal_weights=False):
"""Finds the heaviest path (or potentially the longest path) of the
directed graph and creates a new :class:`OverlapGraph` containing only
this layout path. Optionally, we can demand that ALL longest paths of
the graph are reported (to ensure all vertices are included in some
sub-layout), see :func:`all_longest_paths`.
Keyword Args:
full (bool): If truthy, an effort is made to add other paths to
cover all vertices of the graph.
equal_weights (Optional[bool]): see :func:`longest_path`.
Returns:
assembly.OverlapGraph: A linear subgraph (the heaviest path).
Raises:
AssertionError: If the graph is not acyclic.
"""
assert(self.iG.is_dag())
if full:
paths = self.all_longest_paths(equal_weights=equal_weights)
else:
paths = [self.longest_path(equal_weights=equal_weights)]
eids = []
for path in paths:
for idx in range(1, len(path)):
eids += [self.iG.get_eid(path[idx-1], path[idx])]
return OverlapGraph(self.iG.subgraph_edges(eids))
# the paths are names not ids
def draw(self, fname, **kw):
"""Draws the graph and potentially highlights provided paths.
Keyword Arguments:
highlight_paths ([List[List[str]]]): A list of paths to be
highlighted. All edges of the path and the starting vertex
are highlighted green.
edge_color ([List|str]): Passed to :func:`igraph.Graph.plot`.
Default is all black unless paths to be highlighted are
specified. If provided, overrides path highlighting.
vertex_color ([List|str]): Passed to :func:`igraph.Graph.plot`.
Default is all white unless paths to be highlighted are
specified in which case starting vertices are green.
edge_width ([List|float]): Passed to :func:`igraph.Graph.plot`.
Default is 10 for edges in highlighted path and 1 otherwise.
edge_arrow_widge ([List|float]): Passed to
:func:`igraph.Graph.plot`. Default is 3 for highlighted edges
and 1 otherwise.
edge_curvred (float): Passed to :func:`igraph.Graph.plot`. Default
is 0.1.
"""
highlight_paths = kw.get('highlight_paths', [])
def e_in_path(eid):
u, v = self._endpoint_names(eid)
return any([
u in p and v in p and
p.index(u) == p.index(v) - 1 for p in highlight_paths
])
v_start_path = lambda v: any([p[0] == v for p in highlight_paths])
# Sugiyama works on non-DAG graphs as well
n = len(self.iG.vs)
layout_kw = {'maxiter': n * 20, 'weights': None}
if 'weight' in self.iG.es.attributes():
layout_kw['weights'] = 'weight'
plot_kw = {
'layout': self.iG.layout_sugiyama(**layout_kw),
'bbox': (n*150, n*150),
'vertex_size': 150,
'vertex_label': [x.replace(' ', '\n') for x in self.iG.vs['name']],
'vertex_label_size': 18,
'vertex_color': kw.get('vertex_color', [self.v_highlight if v_start_path(v) else 'white' for v in self.iG.vs['name']]),
'edge_width': kw.get('edge_width', [10 if e_in_path(e) else 1 for e in self.iG.es]),
'edge_arrow_width': kw.get('edge_arrow_width', [3 if e_in_path(e) else 1 for e in self.iG.es]),
'edge_color': kw.get('edge_color', [self.e_highlight if e_in_path(e) else 'black' for e in self.iG.es]),
'edge_curved': kw.get('edge_curved', 0.1),
'margin': 200,
}
igraph.plot(self.iG, fname, **plot_kw)
def diff_text(self, OG, f=sys.stdout, summary_only=True, weights_from='theirs'):
"""Prints a diff-style comparison of our :attr:`iG` against another
given :class:`OverlapGraph` and writes the output to the given file
handle. Missing edges are printed in red with a leading '-' and added
edges are printed in green with a leading '+'.
Args:
OG (OverlapGraph): The "to" directed graph ("from" is us).
Keyword Args:
f (Optional[file]): Open file handle to which output is written;
default is ``sys.stdout``.
summary_only (Optional[bool]): Only show a summary of changes and
not edge-by-edge diff; default is True.
weights_from (Optional[str]): Which graph's edge weights should be
used for common edges, either 'ours' or 'theirs'; default is
'theirs'.
"""
sE1 = set([self._endpoint_names(e) for e in self.iG.es])
sE2 = set([OG._endpoint_names(e) for e in OG.iG.es])
assert(weights_from in ['ours', 'theirs'])
def _edge_str(endpoints):
if endpoints in sE1 and endpoints in sE2:
if weights_from == 'ours':
return self.eid_to_str(self.iG.get_eid(*endpoints))
else:
return OG.eid_to_str(OG.iG.get_eid(*endpoints))
if endpoints in sE1:
return self.eid_to_str(self.iG.get_eid(*endpoints))
elif endpoints in sE2:
return OG.eid_to_str(OG.iG.get_eid(*endpoints))
else:
raise RuntimeError("This should not have happened")
missing, added, both = sE1 - sE2, sE2 - sE1, sE1.intersection(sE2)
missing_pctg = len(missing)*100.0/len(sE1)
added_pctg = len(added)*100.0/len(sE1)
f.write(
'G1 (%d edges) --> G2 (%d edges): %%%.2f lost, %%%.2f added\n' %
(len(sE1), len(sE2), missing_pctg, added_pctg)
)
if summary_only:
return
diff = [('-', edge) for edge in missing] + \
[('+', edge) for edge in added] + [(None, edge) for edge in both]
for edge in sorted(diff, cmp=lambda x, y: cmp(x[1], y[1])):
color = None
prefix = ' ' if edge[0] is None else edge[0]
line = '%s %s' % (prefix, _edge_str(edge[1]))
if edge[0] == '-':
color = 'red'
elif edge[0] == '+':
color = 'green'
if color and f.isatty():
f.write(colored(line, color=color))
else:
f.write(line)
def diff_draw(self, OG, fname, figsize=None):
"""Draws the difference between our :attr:`iG` against another
given :class:`OverlapGraph`. Shared edges are in black, missing edges
(from ours to ``OG``) are in red and added edges are in green.
Args:
OG (OverlapGraph): The "to" directed graph ("from" is us).
fname (string): Path to which plot is saved, passed as is to
:func:`draw`.
"""
e_to_names = lambda G, e: (G.vs[e[0]]['name'], G.vs[e[1]]['name'])
sE1 = set([self._endpoint_names(e) for e in self.iG.es])
sE2 = set([OG._endpoint_names(e) for e in OG.iG.es])
G = OverlapGraph()
G.iG.add_vertices(list(
set(self.iG.vs['name']).union(set(OG.iG.vs['name']))
))
G.iG.add_edges(list(sE1.union(sE2)))
both, missing, added = sE1.intersection(sE2), sE1 - sE2, sE2 - sE1
edge_color = []
for e in G.iG.es:
e = G._endpoint_names(e)
if e in both:
edge_color += ['black']
elif e in missing:
edge_color += ['red']
elif e in added:
edge_color += ['green']
vertex_color = ['white' if v.degree(mode=igraph.IN) else self.v_highlight for v in G.iG.vs]
G.draw(fname, edge_color=edge_color, vertex_color=vertex_color,
edge_width=5, edge_arrow_width=3, edge_curved=0.01)
def save(self, fname):
"""Saves the graph in GML format
Args:
fname (str): path to GML file.
"""
self.iG.write_gml(fname)
class OverlapBuilder(object):
"""Provided a :class:`align.tuples.Index` builds an overlap graph of all
the sequences. All sequences must have already been indexed in the
tuples database. For example::
B = tuples.TuplesDB('path/to/file.db', alphabet=seq.Alphabet("ACGT"))
I = tuples.Index(B, wordlen=10)
C = align.AlignParams(
... # snip
)
G = assembly.OverlapBuilder(I, C).build()
G.save(path)
All arguments to the constructor become class attributes with the same
name.
Attributes:
index (tuples.Index): A tuples index that responds to
:func:`align.tuples.Index.seeds`.
align_params (pw.AlignParams): The alignment parameters for the
rolling alignment.
hp_condenser (homopolymeric.HpCondenser): If specified,
all alignments will be performed in condensed alphabet.
Consequently, all other arguments are interpretted in the condensed
alphabet.
drop_threshold (float): What constitutes a drop in the
score from one window to the next, default is 0. This means that
if the overall score does not strictly increase (or the score of
the new window is not positive) we drop the seed.
window (int): The size of the rolling window.
max_succ_drops (int): Maximum number of "drops" until the
segment is dropped, default is 3.
"""
def __init__(self, index, align_params, **kwargs):
self.hp_condenser = kwargs.get('hp_condenser', None)
if self.hp_condenser:
assert(isinstance(self.hp_condenser, homopolymeric.HpCondenser))
self.index, self.align_params = index, align_params
self.window = kwargs.get('window', 20)
self.drop_threshold = kwargs.get('drop_threshold', 0)
self.min_overlap_score = kwargs.get('min_overlap_score', self.drop_threshold)
self.max_succ_drops = kwargs.get('max_succ_drops', 3)
def build(self, profile=False):
"""Builds a weighted, directed graph by using tuple methods. The
process has 2 steps:
* Find all seeds using :func:`align.tuples.Index.seeds`,
* Extend all seeds to suffix-prefix segments using :func:`extend`.
The resulting graph may not necessarily be acyclic. For further
processing (e.g to find the layout) we need to ensure the overlap
graph is acyclic. For this, see :func:`OverlapGraph.break_cycles`.
Keyword Args:
profile (Optional[bool]): If truthy, instead of reporting
percentage progress time consumption is reported at *every*
step (for every pair of sequences). This generates *a lot* of
output.
Returns:
assembly.OverlapGraph: The overlap graph, potentially containing
cycles.
"""
vs = set()
es, ws = [], []
seqinfo = self.index.tuplesdb.seqinfo()
seqids = seqinfo.keys()
msg = 'Extending seeds on potentially homologous sequences'
num_pairs = self.index.num_potential_homolog_pairs()
indicator = ProgressIndicator(msg, num_pairs, percentage=False)
progress_cnt = 0
if not profile:
indicator.start()
for S_id in seqids:
for T_id in self.index.potential_homologs(S_id):
S_info, T_info = seqinfo[S_id], seqinfo[T_id]
S_min_idx, T_min_idx = S_info['start'], T_info['start']
S_max_idx = S_info['start'] + S_info['length']
T_max_idx = T_info['start'] + T_info['length']
S_name = '%s %d-%d #%d' \
% (S_info['name'], S_min_idx, S_max_idx, S_id)
T_name = '%s %d-%d #%d' \
% (T_info['name'], T_min_idx, T_max_idx, T_id)
if profile:
sys.stderr.write('"%s" and "%s": ' % (S_name, T_name))
else:
indicator.progress()
vs = vs.union([S_name, T_name])
# do they have any seeds in common?
_t_seeds = time.time()
seeds = self.index.seeds(S_id, T_id)
if profile:
_t_seeds = 1000 * (time.time() - _t_seeds)
sys.stderr.write(
'found %d seeds (%.0f ms)' % (len(seeds), _t_seeds)
)
if not seeds:
sys.stderr.write('.\n')
if not seeds:
continue
# are the seeds part of an overlap?
S = self.index.tuplesdb.loadseq(S_id)
T = self.index.tuplesdb.loadseq(T_id)
# Calculate the score of each seed
for seed in seeds:
seed.tx.score = self.align_params.score(
S, T, seed.tx.opseq,
S_min_idx=seed.tx.S_idx, T_min_idx=seed.tx.T_idx
)
if self.hp_condenser:
seeds = [self.hp_condenser.condense_seed(S, T, seed) for seed in seeds]
S = self.hp_condenser.condense_sequence(S)
T = self.hp_condenser.condense_sequence(T)
_t_extend = time.time()
overlap = self.extend(S, T, seeds)
if profile:
_t_extend = 1000 * (time.time() - _t_extend)
sys.stderr.write(
' overlaps (%.0f ms): %s\n'
% (_t_extend, '+' if overlap else '-')
)
if not overlap:
continue
S_len = lib.tx_seq_len(overlap.tx.c_obj, 'S')
T_len = lib.tx_seq_len(overlap.tx.c_obj, 'T')
assert(overlap.tx.T_idx * overlap.tx.S_idx == 0)
if abs(overlap.tx.S_idx - overlap.tx.T_idx) < self.window or \
abs(overlap.tx.S_idx + S_len - (overlap.tx.T_idx + T_len)) < self.window:
# end points are too close, ignore
continue
if overlap.tx.S_idx == 0 and overlap.tx.T_idx == 0:
if S_len < T_len:
es += [(S_name, T_name)]
elif S_len > T_len:
es += [(T_name, S_name)]
elif overlap.tx.T_idx == 0:
es += [(S_name, T_name)]
elif overlap.tx.S_idx == 0:
es += [(T_name, S_name)]
else:
raise RuntimeError("This should not have happened")
ws += [overlap.tx.score]
if profile:
sys.stderr.write('\n')
else:
indicator.finish()
G = OverlapGraph()
G.iG.add_vertices(list(vs))
es = [(G.iG.vs.find(name=u), G.iG.vs.find(name=v)) for u, v in es]
G.iG.add_edges(es)
G.iG.es['weight'] = ws
return G
def extend(self, S, T, segments):
"""Wraps :c:func:`extend()`: given two sequences and a number of
matching segments returns the first fully extended segment.
Args:
S (seq.Sequence): The "from" sequence.
T (seq.Sequence): The "to" sequence.
segments (List[tuples.Segment]): The starting segments. If called
from :func:`build`, these are seeds but no assumption is made.
Returns:
tuples.Segment: A segment corresponding to an overlap alignment.
"""
segs = ffi.new('segment* []', [seg.c_obj for seg in segments])
res = lib.extend(segs, len(segs),
S.c_idxseq, T.c_idxseq, len(S), len(T), self.align_params.c_obj,
self.window, self.max_succ_drops, self.drop_threshold,
self.min_overlap_score)
return tuples.Segment(c_obj=res) if res != ffi.NULL else None
introduce min_margin (instead of using window size)
import re
import sys
import igraph
from termcolor import colored
import time
from . import tuples, pw, homopolymeric, ProgressIndicator, lib, ffi
class OverlapGraph(object):
"""Wraps an :class:`igraph.Graph` object with additional methods to build
and process an overlap graph.
Attributes:
iG (igraph.Graph): The graph object.
Args:
G (Optional[igraph.Graph]): The graph object to initialize with; no
processing is done and if the object is ``None`` a new directed
graph is instantiated.
"""
def __init__(self, G=None):
self.iG = G if G else igraph.Graph(directed=True)
assert(isinstance(self.iG, igraph.Graph))
self.v_highlight = '#b5ffb5'
self.e_highlight = '#00b400'
def _endpoint_names(self, eid):
"""Internal helper: igraph is not too happy when we munge vertex IDs
since it keeps renumbering them according to its memory allocation
scheme. Instead convert everything to "name"s which are the original
sequence IDs.
See: https://lists.nongnu.org/archive/html/igraph-help/2010-03/msg00078.html
"""
if isinstance(eid, igraph.Edge):
eid = eid.index
uid, vid = self.iG.es[eid].tuple
return self.iG.vs[uid]['name'], self.iG.vs[vid]['name']
def _vids_to_names(self, vids):
return [self.iG.vs[vid]['name'] for vid in vids]
def eid_to_str(self, eid, maxlen=50):
"""Prepares an edge for pretty printing. Truncates and paths the end
point labels (``name`` is used as label) to ensure they both have
length ``maxlen``.
"""
u, v = self._endpoint_names(eid)
u, v = u[:maxlen].ljust(maxlen), v[:maxlen].rjust(maxlen)
w = self.iG.es[eid]['weight']
w = ('+--[%.2f]-->' % w).ljust(20)
return '%s %s %s\n' % (u, w, v)
def break_cycles(self, method='ip'):
"""Removes a
`feedback arc set <https://en.wikipedia.org/wiki/Feedback_arc_set>`__
from the graph. Depending on the ``method`` the result may not be
optimal.
Keyword Args:
method (str): The FAS discovery algorithm; passed to
:func:`igraph.Graph.feedback_arc_set`. Default uses an
integer programming formulation which is guaranteed to be
optimal but is slow on large graphs. The alternative is
``eades`` which uses a suboptimal `heuristic
<http://www.sciencedirect.com/science/article/pii/002001909390079O>`__.
"""
if self.iG.is_dag():
return
rm = self.iG.feedback_arc_set(
weights=self.iG.es['weight'], method=method
)
for e in rm:
sys.stderr.write('removed edge: %s' % self.eid_to_str(e))
self.iG.delete_edges(rm)
def longest_path(self, exclude=[], equal_weights=False):
"""Finds the heaviest path (and potantially the longest path in the
sense of number of edges) of the graph, excluding vertices whose name
is included in ``exclude``. This, naturally requires that the graph is
acyclic. Assuming the graph is a DAG, we can find the longest path in
two steps:
- Find a topological ordering of the graph in :math:`O(|V|+|E|)` time,
- Find a heaviest path using the sorting in :math:`O(|V|)` time.
Keyword Arguments:
exclude (Optional[List[str]]): A list of vertex names to be
excluded from the graph when finding the longest path. This is
only of use to :func:`all_longest_paths`.
equal_weights (Optional[bool]): If truthy, all edges are considered
equal in which sense the solution is the literal longest path.
Returns:
list[str]: A list of vertex names in order of appearance in the
longest path.
"""
def weight_of_edge(u, v):
if equal_weights:
return 1
else:
return self.iG.es['weight'][self.iG.get_eid(u, v)]
sorting = self._vids_to_names(self.iG.topological_sorting())
sorting = [v for v in sorting if v not in exclude]
# longest paths ending at each vertex keyed by vertex. Each entry is a
# tuple of (<weight, from>) where `from` is any of the predecessors
# giving the maximum weight.
longest_paths = {}
for v in sorting:
if v in exclude:
continue
incoming = self._vids_to_names(self.iG.predecessors(v))
incoming = [x for x in incoming if x not in exclude]
if not incoming:
longest_paths[v] = (0, None)
else:
w = lambda x: longest_paths[x][0] + weight_of_edge(x, v)
cands = [(w(u), u) for u in incoming]
longest_paths[v] = sorted(
cands, key=lambda x: x[0], reverse=True
)[0]
if not longest_paths:
return []
# Find the terminal vertex of the longest path:
end = sorted(
longest_paths.items(), key=lambda x: x[1][0], reverse=True
)[0][0]
path = []
# Trace back the entire path:
while end and longest_paths:
path = [end] + path
end = longest_paths.pop(end)[1]
# Don't report trivial paths:
return path if len(path) > 1 else []
def all_longest_paths(self, equal_weights=False):
"""Repeatedly finds the longest path in the graph while excluding
vertices that are already included in a path. See :func:`longest_path`.
All keyword arguments are passed as-is to :func:`longest_path`.
Returns:
List[List[str]]: A list of paths, each a list of vertex names in
order of appearance in the path.
"""
paths = []
exclude = []
while True:
path = self.longest_path(exclude=exclude)
if not path:
break
paths += [path]
exclude += path
return paths
def layout(self, full=False, equal_weights=False):
"""Finds the heaviest path (or potentially the longest path) of the
directed graph and creates a new :class:`OverlapGraph` containing only
this layout path. Optionally, we can demand that ALL longest paths of
the graph are reported (to ensure all vertices are included in some
sub-layout), see :func:`all_longest_paths`.
Keyword Args:
full (bool): If truthy, an effort is made to add other paths to
cover all vertices of the graph.
equal_weights (Optional[bool]): see :func:`longest_path`.
Returns:
assembly.OverlapGraph: A linear subgraph (the heaviest path).
Raises:
AssertionError: If the graph is not acyclic.
"""
assert(self.iG.is_dag())
if full:
paths = self.all_longest_paths(equal_weights=equal_weights)
else:
paths = [self.longest_path(equal_weights=equal_weights)]
eids = []
for path in paths:
for idx in range(1, len(path)):
eids += [self.iG.get_eid(path[idx-1], path[idx])]
return OverlapGraph(self.iG.subgraph_edges(eids))
# the paths are names not ids
def draw(self, fname, **kw):
"""Draws the graph and potentially highlights provided paths.
Keyword Arguments:
highlight_paths ([List[List[str]]]): A list of paths to be
highlighted. All edges of the path and the starting vertex
are highlighted green.
edge_color ([List|str]): Passed to :func:`igraph.Graph.plot`.
Default is all black unless paths to be highlighted are
specified. If provided, overrides path highlighting.
vertex_color ([List|str]): Passed to :func:`igraph.Graph.plot`.
Default is all white unless paths to be highlighted are
specified in which case starting vertices are green.
edge_width ([List|float]): Passed to :func:`igraph.Graph.plot`.
Default is 10 for edges in highlighted path and 1 otherwise.
edge_arrow_widge ([List|float]): Passed to
:func:`igraph.Graph.plot`. Default is 3 for highlighted edges
and 1 otherwise.
edge_curvred (float): Passed to :func:`igraph.Graph.plot`. Default
is 0.1.
"""
highlight_paths = kw.get('highlight_paths', [])
def e_in_path(eid):
u, v = self._endpoint_names(eid)
return any([
u in p and v in p and
p.index(u) == p.index(v) - 1 for p in highlight_paths
])
v_start_path = lambda v: any([p[0] == v for p in highlight_paths])
# Sugiyama works on non-DAG graphs as well
n = len(self.iG.vs)
layout_kw = {'maxiter': n * 20, 'weights': None}
if 'weight' in self.iG.es.attributes():
layout_kw['weights'] = 'weight'
plot_kw = {
'layout': self.iG.layout_sugiyama(**layout_kw),
'bbox': (n*150, n*150),
'vertex_size': 150,
'vertex_label': [x.replace(' ', '\n') for x in self.iG.vs['name']],
'vertex_label_size': 18,
'vertex_color': kw.get('vertex_color', [self.v_highlight if v_start_path(v) else 'white' for v in self.iG.vs['name']]),
'edge_width': kw.get('edge_width', [10 if e_in_path(e) else 1 for e in self.iG.es]),
'edge_arrow_width': kw.get('edge_arrow_width', [3 if e_in_path(e) else 1 for e in self.iG.es]),
'edge_color': kw.get('edge_color', [self.e_highlight if e_in_path(e) else 'black' for e in self.iG.es]),
'edge_curved': kw.get('edge_curved', 0.1),
'margin': 200,
}
igraph.plot(self.iG, fname, **plot_kw)
def diff_text(self, OG, f=sys.stdout, summary_only=True, weights_from='theirs'):
"""Prints a diff-style comparison of our :attr:`iG` against another
given :class:`OverlapGraph` and writes the output to the given file
handle. Missing edges are printed in red with a leading '-' and added
edges are printed in green with a leading '+'.
Args:
OG (OverlapGraph): The "to" directed graph ("from" is us).
Keyword Args:
f (Optional[file]): Open file handle to which output is written;
default is ``sys.stdout``.
summary_only (Optional[bool]): Only show a summary of changes and
not edge-by-edge diff; default is True.
weights_from (Optional[str]): Which graph's edge weights should be
used for common edges, either 'ours' or 'theirs'; default is
'theirs'.
"""
sE1 = set([self._endpoint_names(e) for e in self.iG.es])
sE2 = set([OG._endpoint_names(e) for e in OG.iG.es])
assert(weights_from in ['ours', 'theirs'])
def _edge_str(endpoints):
if endpoints in sE1 and endpoints in sE2:
if weights_from == 'ours':
return self.eid_to_str(self.iG.get_eid(*endpoints))
else:
return OG.eid_to_str(OG.iG.get_eid(*endpoints))
if endpoints in sE1:
return self.eid_to_str(self.iG.get_eid(*endpoints))
elif endpoints in sE2:
return OG.eid_to_str(OG.iG.get_eid(*endpoints))
else:
raise RuntimeError("This should not have happened")
missing, added, both = sE1 - sE2, sE2 - sE1, sE1.intersection(sE2)
missing_pctg = len(missing)*100.0/len(sE1)
added_pctg = len(added)*100.0/len(sE1)
f.write(
'G1 (%d edges) --> G2 (%d edges): %%%.2f lost, %%%.2f added\n' %
(len(sE1), len(sE2), missing_pctg, added_pctg)
)
if summary_only:
return
diff = [('-', edge) for edge in missing] + \
[('+', edge) for edge in added] + [(None, edge) for edge in both]
for edge in sorted(diff, cmp=lambda x, y: cmp(x[1], y[1])):
color = None
prefix = ' ' if edge[0] is None else edge[0]
line = '%s %s' % (prefix, _edge_str(edge[1]))
if edge[0] == '-':
color = 'red'
elif edge[0] == '+':
color = 'green'
if color and f.isatty():
f.write(colored(line, color=color))
else:
f.write(line)
def diff_draw(self, OG, fname, figsize=None):
"""Draws the difference between our :attr:`iG` against another
given :class:`OverlapGraph`. Shared edges are in black, missing edges
(from ours to ``OG``) are in red and added edges are in green.
Args:
OG (OverlapGraph): The "to" directed graph ("from" is us).
fname (string): Path to which plot is saved, passed as is to
:func:`draw`.
"""
e_to_names = lambda G, e: (G.vs[e[0]]['name'], G.vs[e[1]]['name'])
sE1 = set([self._endpoint_names(e) for e in self.iG.es])
sE2 = set([OG._endpoint_names(e) for e in OG.iG.es])
G = OverlapGraph()
G.iG.add_vertices(list(
set(self.iG.vs['name']).union(set(OG.iG.vs['name']))
))
G.iG.add_edges(list(sE1.union(sE2)))
both, missing, added = sE1.intersection(sE2), sE1 - sE2, sE2 - sE1
edge_color = []
for e in G.iG.es:
e = G._endpoint_names(e)
if e in both:
edge_color += ['black']
elif e in missing:
edge_color += ['red']
elif e in added:
edge_color += ['green']
vertex_color = ['white' if v.degree(mode=igraph.IN) else self.v_highlight for v in G.iG.vs]
G.draw(fname, edge_color=edge_color, vertex_color=vertex_color,
edge_width=5, edge_arrow_width=3, edge_curved=0.01)
def save(self, fname):
"""Saves the graph in GML format
Args:
fname (str): path to GML file.
"""
self.iG.write_gml(fname)
class OverlapBuilder(object):
"""Provided a :class:`align.tuples.Index` builds an overlap graph of all
the sequences. All sequences must have already been indexed in the
tuples database. For example::
B = tuples.TuplesDB('path/to/file.db', alphabet=seq.Alphabet("ACGT"))
I = tuples.Index(B, wordlen=10)
C = align.AlignParams(
... # snip
)
G = assembly.OverlapBuilder(I, C).build()
G.save(path)
All arguments to the constructor become class attributes with the same
name.
Attributes:
index (tuples.Index): A tuples index that responds to
:func:`align.tuples.Index.seeds`.
align_params (pw.AlignParams): The alignment parameters for the
rolling alignment.
hp_condenser (homopolymeric.HpCondenser): If specified,
all alignments will be performed in condensed alphabet.
Consequently, all other arguments are interpretted in the condensed
alphabet.
drop_threshold (float): What constitutes a drop in the
score from one window to the next, default is 0. This means that
if the overall score does not strictly increase (or the score of
the new window is not positive) we drop the seed.
window (int): The size of the rolling window.
max_succ_drops (int): Maximum number of "drops" until the
segment is dropped, default is 3.
"""
# FIXME document min_overlap_score
# FIXME document min_margin
def __init__(self, index, align_params, **kwargs):
self.hp_condenser = kwargs.get('hp_condenser', None)
if self.hp_condenser:
assert(isinstance(self.hp_condenser, homopolymeric.HpCondenser))
self.index, self.align_params = index, align_params
self.window = kwargs.get('window', 20)
self.drop_threshold = kwargs.get('drop_threshold', 0)
self.min_overlap_score = kwargs.get('min_overlap_score', self.drop_threshold)
self.min_margin = kwargs.get('min_margin', self.window)
self.max_succ_drops = kwargs.get('max_succ_drops', 3)
def build(self, profile=False):
"""Builds a weighted, directed graph by using tuple methods. The
process has 2 steps:
* Find all seeds using :func:`align.tuples.Index.seeds`,
* Extend all seeds to suffix-prefix segments using :func:`extend`.
The resulting graph may not necessarily be acyclic. For further
processing (e.g to find the layout) we need to ensure the overlap
graph is acyclic. For this, see :func:`OverlapGraph.break_cycles`.
Keyword Args:
profile (Optional[bool]): If truthy, instead of reporting
percentage progress time consumption is reported at *every*
step (for every pair of sequences). This generates *a lot* of
output.
Returns:
assembly.OverlapGraph: The overlap graph, potentially containing
cycles.
"""
vs = set()
es, ws = [], []
seqinfo = self.index.tuplesdb.seqinfo()
seqids = seqinfo.keys()
msg = 'Extending seeds on potentially homologous sequences'
num_pairs = self.index.num_potential_homolog_pairs()
indicator = ProgressIndicator(msg, num_pairs, percentage=False)
progress_cnt = 0
if not profile:
indicator.start()
for S_id in seqids:
for T_id in self.index.potential_homologs(S_id):
S_info, T_info = seqinfo[S_id], seqinfo[T_id]
S_min_idx, T_min_idx = S_info['start'], T_info['start']
S_max_idx = S_info['start'] + S_info['length']
T_max_idx = T_info['start'] + T_info['length']
S_name = '%s %d-%d #%d' \
% (S_info['name'], S_min_idx, S_max_idx, S_id)
T_name = '%s %d-%d #%d' \
% (T_info['name'], T_min_idx, T_max_idx, T_id)
if profile:
sys.stderr.write('"%s" and "%s": ' % (S_name, T_name))
else:
indicator.progress()
vs = vs.union([S_name, T_name])
# do they have any seeds in common?
_t_seeds = time.time()
seeds = self.index.seeds(S_id, T_id)
if profile:
_t_seeds = 1000 * (time.time() - _t_seeds)
sys.stderr.write(
'found %d seeds (%.0f ms)' % (len(seeds), _t_seeds)
)
if not seeds:
sys.stderr.write('.\n')
if not seeds:
continue
# are the seeds part of an overlap?
S = self.index.tuplesdb.loadseq(S_id)
T = self.index.tuplesdb.loadseq(T_id)
# Calculate the score of each seed
for seed in seeds:
seed.tx.score = self.align_params.score(
S, T, seed.tx.opseq,
S_min_idx=seed.tx.S_idx, T_min_idx=seed.tx.T_idx
)
if self.hp_condenser:
seeds = [self.hp_condenser.condense_seed(S, T, seed) for seed in seeds]
S = self.hp_condenser.condense_sequence(S)
T = self.hp_condenser.condense_sequence(T)
_t_extend = time.time()
overlap = self.extend(S, T, seeds)
if profile:
_t_extend = 1000 * (time.time() - _t_extend)
sys.stderr.write(
' overlaps (%.0f ms): %s\n'
% (_t_extend, '+' if overlap else '-')
)
if not overlap:
continue
S_len = lib.tx_seq_len(overlap.tx.c_obj, 'S')
T_len = lib.tx_seq_len(overlap.tx.c_obj, 'T')
assert(overlap.tx.T_idx * overlap.tx.S_idx == 0)
lmargin = abs(overlap.tx.S_idx - overlap.tx.T_idx)
rmargin = abs(overlap.tx.S_idx + S_len - (overlap.tx.T_idx + T_len))
if lmargin < self.min_margin or rmargin < self.min_margin:
# end points are too close, ignore
continue
if overlap.tx.S_idx == 0 and overlap.tx.T_idx == 0:
if S_len < T_len:
es += [(S_name, T_name)]
elif S_len > T_len:
es += [(T_name, S_name)]
elif overlap.tx.T_idx == 0:
es += [(S_name, T_name)]
elif overlap.tx.S_idx == 0:
es += [(T_name, S_name)]
else:
raise RuntimeError("This should not have happened")
ws += [overlap.tx.score]
if profile:
sys.stderr.write('\n')
else:
indicator.finish()
G = OverlapGraph()
G.iG.add_vertices(list(vs))
es = [(G.iG.vs.find(name=u), G.iG.vs.find(name=v)) for u, v in es]
G.iG.add_edges(es)
G.iG.es['weight'] = ws
return G
def extend(self, S, T, segments):
"""Wraps :c:func:`extend()`: given two sequences and a number of
matching segments returns the first fully extended segment.
Args:
S (seq.Sequence): The "from" sequence.
T (seq.Sequence): The "to" sequence.
segments (List[tuples.Segment]): The starting segments. If called
from :func:`build`, these are seeds but no assumption is made.
Returns:
tuples.Segment: A segment corresponding to an overlap alignment.
"""
segs = ffi.new('segment* []', [seg.c_obj for seg in segments])
res = lib.extend(segs, len(segs),
S.c_idxseq, T.c_idxseq, len(S), len(T), self.align_params.c_obj,
self.window, self.max_succ_drops, self.drop_threshold,
self.min_overlap_score)
return tuples.Segment(c_obj=res) if res != ffi.NULL else None
|
#!/usr/bin/env python
# This tools generates local_settings_generated.py using the template
from __future__ import print_function
import sys, os, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
def generate_camo_config_file(camo_key):
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# Secret key generation taken from Django's startproject.py
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser()
secrets_file.read(output_filename)
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = ['[secrets]\n']
def config_line(var, value):
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
# Write the Camo config file directly
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write("".join(lines))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
development = False
extra_args = sys.argv[1:]
if len(extra_args) and extra_args[0] in ('-d', '--development'):
development = True
generate_secrets(development)
generate_secrets: Silence mypy error with configparser.
See https://github.com/python/typeshed/issues/307.
#!/usr/bin/env python
# This tools generates local_settings_generated.py using the template
from __future__ import print_function
import sys, os, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
def generate_camo_config_file(camo_key):
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# Secret key generation taken from Django's startproject.py
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/issues/307
secrets_file.read(output_filename)
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = ['[secrets]\n']
def config_line(var, value):
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
# Write the Camo config file directly
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write("".join(lines))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
development = False
extra_args = sys.argv[1:]
if len(extra_args) and extra_args[0] in ('-d', '--development'):
development = True
generate_secrets(development)
|
import socket, time, threading, sys, signal, errno
from threading import Thread
if (len(sys.argv) < 2):
print "Server usage: python server.py PORT"
sys.exit(0)
MIN_THREADS = 4 # Minimum number of workers at start and at any point
MAX_THREADS = 32 # Maximum number of workers
TOLERANCE = 4 # Minimum difference before resizing the pool, to prevent constant resizing (inertia)
J_MSG = "JOIN_CHATROOM: "
L_MSG = "LEAVE_CHATROOM: "
IP_MSG = "CLIENT_IP: "
P_MSG = "PORT: "
JID_MSG = "JOIN_ID: "
NAME_MSG = "CLIENT_NAME: "
DIS_MSG = "DISCONNECT: "
CHAT_MSG = "CHAT: "
MSG = "MESSAGE: "
PORT = int(sys.argv[1])
class Room():
def __init__(self):
# This will contain [CLIENT_NAME, MESSAGE, set(ID)]
self.messages = []
self.clients = []
class ChatState():
def __init__(self):
self.idCounter = 0
self.refCounter = 0
# Associating a name with a ref
self.roomRefs = {}
# Associating a ref with a Room object
self.rooms = {}
class Pool():
def __init__(self):
self.lockClients = threading.Lock()
self.lockState = threading.Lock()
self.clients = []
self.workers = []
self.state = ChatState()
self.threadCounter = 0
self.killRequested = False
for counter in range(MIN_THREADS):
self.workers.append(Worker(self, self.threadCounter))
self.workers[counter].start()
self.threadCounter += 1
def killWorker(self, worker):
if (len(self.workers) - self.killedSoFar) <= MIN_THREADS:
return False
if self.killedSoFar >= self.maxKill:
return False
if worker.conn is None:
worker.useless = True # This thread will eventually die now
self.killedSoFar += 1
return True
return False
def assignClient(self, conn):
conn.setblocking(0)
self.lockClients.acquire()
self.clients.append(conn)
# Maybe our workers pool needs to be resized, and we need the lock to do so
difference = len(self.clients) - len(self.workers)
if abs(difference) > TOLERANCE:
if difference > 0:
# Spawn workers
for counter in range(difference):
if len(self.workers) >= MAX_THREADS:
break
self.workers.append(Worker(self, self.threadCounter))
self.workers[-1].start()
self.threadCounter += 1
else:
# Kill workers
self.maxKill = abs(difference)
self.killedSoFar = 0
self.workers = [w for w in self.workers if not self.killWorker(w)]
self.lockClients.release()
def kill(self):
self.killRequested = True
class Server(Thread):
def __init__(self, pool):
Thread.__init__(self)
self.daemon = True # This thread may die while waiting for a client
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(("0.0.0.0", PORT))
self.pool = pool
def run(self):
while True:
# At most 4 queued clients
self.server.listen(4)
(conn, (ip,port)) = self.server.accept()
# If the server is already overloaded, reject this client
if len(self.pool.clients) > MAX_THREADS:
print "Burnout! Server rejected client"
conn.close()
else:
print "Server received client connection and added it to queue"
self.pool.assignClient(conn)
class Worker(Thread):
def __init__(self, pool, id):
Thread.__init__(self)
self.pool = pool
self.conn = None
self.id = id
self.useless = False
self.myRooms = []
def constructReply(self, data):
reply = "HELO {0}\nIP:{1}\nPort:{2}\nStudentID:{3}\n".format(data, socket.gethostbyname(socket.gethostname()), PORT, 16336617)
return reply
def constructJoinReply(self, roomName, roomRef, clientId):
reply = ("JOINED_CHATROOM: {0}\n"
"SERVER_IP: {1}\n"
"PORT: {2}\n"
"ROOM_REF: {3}\n"
"JOIN_ID: {4}\n"
).format(roomName, socket.gethostbyname(socket.gethostname()), PORT, roomRef, clientId)
return reply
def constructLeaveReply(self, roomRef, clientId):
reply = ("LEFT_CHATROOM: {0}\n"
"JOIN_ID: {1}\n"
).format(roomRef, clientId)
return reply
def constructMessage(self, roomRef, clientName, message):
reply = ("CHAT: {0}\n"
"CLIENT_NAME: {1}\n"
"MESSAGE: {2}\n"
).format(roomRef, clientName, message)
return reply
def sendClient(self, content):
while not (self.pool.killRequested or self.useless):
try:
self.conn.send(content)
print "Thread {0} sent this to client: {1}".format(self.id, content)
break
except socket.error as e:
if e.errno == errno.ECONNRESET:
break
def handleResponse(self, data):
# Thread pool protocol
if data == "KILL_SERVICE\n":
self.pool.kill()
return True
elif data.startswith("HELO "):
self.sendClient(self.constructReply(data[5:].rstrip()))
return False
# Chat protocol
elif data.startswith(J_MSG):
roomName = data.splitlines()[0][len(J_MSG):]
clientName = data.splitlines()[3][len(NAME_MSG):]
# Get client ID, room ref, broadcast and append client to users
self.pool.lockState.acquire()
clientId = self.pool.state.idCounter
self.pool.state.idCounter += 1
if roomName in self.pool.state.roomRefs:
roomRef = self.pool.state.roomRefs[roomName]
else:
roomRef = self.pool.state.refCounter
self.pool.state.roomRefs[roomName] = roomRef
self.pool.state.rooms[roomRef] = Room()
self.pool.state.refCounter += 1
room = self.pool.state.rooms[roomRef]
if (len(room.clients) > 0):
joinMessage = "{0} has joined the chatroom".format(clientName)
room.messages.append([clientName, joinMessage, set(room.clients)])
room.clients.append(clientId)
self.pool.lockState.release()
self.myRooms.append((roomRef, clientId))
self.sendClient(self.constructJoinReply(roomName, roomRef, clientId))
return False
elif data.startswith(L_MSG):
roomRef = int(data.splitlines()[0][len(L_MSG):])
clientId = int(data.splitlines()[1][len(JID_MSG):])
clientName = data.splitlines()[2][len(NAME_MSG):]
# Discard any messages left for us, and leave chatroom
if (roomRef, clientId) in self.myRooms:
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
room.messages[:] = [m for m in room.messages if m[2]]
room.clients.remove(clientId)
if (len(room.clients) > 0):
leaveMessage = "{0} has left the chatroom".format(clientName)
room.messages.append([clientName, leaveMessage, set(room.clients)])
self.pool.lockState.release()
self.myRooms.remove((roomRef, clientId))
self.sendClient(self.constructLeaveReply(roomRef, clientId))
return False
elif data.startswith(CHAT_MSG):
roomRef = int(data.splitlines()[0][len(CHAT_MSG):])
clientId = int(data.splitlines()[1][len(JID_MSG):])
clientName = data.splitlines()[2][len(NAME_MSG):]
message = data.splitlines()[3][len(MSG):]
# Append message so that all threads can read it (including this one)
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
if (len(room.clients) > 0):
room.messages.append([clientName, message, set(room.clients)])
self.pool.lockState.release()
return False
elif data.startswith(DIS_MSG):
clientName = data.splitlines()[2][len(NAME_MSG):]
# Discard any messages left for us, and leave all chatrooms
for t in self.myRooms:
roomRef = t[0]
clientId = t[1]
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
room.messages[:] = [m for m in room.messages if m[2]]
room.clients.remove(clientId)
if (len(room.clients) > 0):
discMessage = "{0} was disconnected".format(clientName)
room.messages.append([clientName, discMessage, set(room.clients)])
self.pool.lockState.release()
self.myRooms = []
return True
def readMessages(self):
self.pool.lockState.acquire()
for t in self.myRooms:
roomRef = t[0]
clientId = t[1]
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
self.sendClient(self.constructMessage(roomRef, room.messages[index][0], room.messages[index][1]))
room.messages[:] = [m for m in room.messages if m[2]]
self.pool.lockState.release()
def run(self):
while not (self.pool.killRequested or self.useless):
# Try to get a client
self.pool.lockClients.acquire()
if (len(self.pool.clients) > 0 and not (self.pool.killRequested or self.useless)):
self.conn = self.pool.clients.pop(0)
self.pool.lockClients.release()
# If we didn't get a client, try again
if self.conn is None:
continue
print "Thread {0} fetched a client".format(self.id)
# Serve client
while not (self.pool.killRequested or self.useless):
self.readMessages()
try:
data = self.conn.recv(2048).replace("\\n", '\n')
print "Thread {0} received data {1}".format(self.id, data.rstrip())
if data == "":
break
if self.handleResponse(data):
break
except socket.error as e2:
if e2.errno == errno.ECONNRESET:
break
print "Thread {0} closing client socket".format(self.id)
self.conn.close()
self.conn = None
print "Thread {0} dying".format(self.id)
print "--- Preparing thread pool..."
workerPool = Pool()
print "--- Creating CTRL-C signal handler..."
def signalHandler(signal, frame):
print "Server received CTRL-C, nuking all threads"
workerPool.kill()
signal.signal(signal.SIGINT, signalHandler)
print "--- TCP server starting..."
serverThread = Server(workerPool)
serverThread.start()
print "--- Server is ready!"
while True:
if workerPool.killRequested:
for worker in workerPool.workers:
worker.join()
break
Change joining order so that client can see his joining message
import socket, time, threading, sys, signal, errno
from threading import Thread
if (len(sys.argv) < 2):
print "Server usage: python server.py PORT"
sys.exit(0)
MIN_THREADS = 4 # Minimum number of workers at start and at any point
MAX_THREADS = 32 # Maximum number of workers
TOLERANCE = 4 # Minimum difference before resizing the pool, to prevent constant resizing (inertia)
J_MSG = "JOIN_CHATROOM: "
L_MSG = "LEAVE_CHATROOM: "
IP_MSG = "CLIENT_IP: "
P_MSG = "PORT: "
JID_MSG = "JOIN_ID: "
NAME_MSG = "CLIENT_NAME: "
DIS_MSG = "DISCONNECT: "
CHAT_MSG = "CHAT: "
MSG = "MESSAGE: "
PORT = int(sys.argv[1])
class Room():
def __init__(self):
# This will contain [CLIENT_NAME, MESSAGE, set(ID)]
self.messages = []
self.clients = []
class ChatState():
def __init__(self):
self.idCounter = 0
self.refCounter = 0
# Associating a name with a ref
self.roomRefs = {}
# Associating a ref with a Room object
self.rooms = {}
class Pool():
def __init__(self):
self.lockClients = threading.Lock()
self.lockState = threading.Lock()
self.clients = []
self.workers = []
self.state = ChatState()
self.threadCounter = 0
self.killRequested = False
for counter in range(MIN_THREADS):
self.workers.append(Worker(self, self.threadCounter))
self.workers[counter].start()
self.threadCounter += 1
def killWorker(self, worker):
if (len(self.workers) - self.killedSoFar) <= MIN_THREADS:
return False
if self.killedSoFar >= self.maxKill:
return False
if worker.conn is None:
worker.useless = True # This thread will eventually die now
self.killedSoFar += 1
return True
return False
def assignClient(self, conn):
conn.setblocking(0)
self.lockClients.acquire()
self.clients.append(conn)
# Maybe our workers pool needs to be resized, and we need the lock to do so
difference = len(self.clients) - len(self.workers)
if abs(difference) > TOLERANCE:
if difference > 0:
# Spawn workers
for counter in range(difference):
if len(self.workers) >= MAX_THREADS:
break
self.workers.append(Worker(self, self.threadCounter))
self.workers[-1].start()
self.threadCounter += 1
else:
# Kill workers
self.maxKill = abs(difference)
self.killedSoFar = 0
self.workers = [w for w in self.workers if not self.killWorker(w)]
self.lockClients.release()
def kill(self):
self.killRequested = True
class Server(Thread):
def __init__(self, pool):
Thread.__init__(self)
self.daemon = True # This thread may die while waiting for a client
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(("0.0.0.0", PORT))
self.pool = pool
def run(self):
while True:
# At most 4 queued clients
self.server.listen(4)
(conn, (ip,port)) = self.server.accept()
# If the server is already overloaded, reject this client
if len(self.pool.clients) > MAX_THREADS:
print "Burnout! Server rejected client"
conn.close()
else:
print "Server received client connection and added it to queue"
self.pool.assignClient(conn)
class Worker(Thread):
def __init__(self, pool, id):
Thread.__init__(self)
self.pool = pool
self.conn = None
self.id = id
self.useless = False
self.myRooms = []
def constructReply(self, data):
reply = "HELO {0}\nIP:{1}\nPort:{2}\nStudentID:{3}\n".format(data, socket.gethostbyname(socket.gethostname()), PORT, 16336617)
return reply
def constructJoinReply(self, roomName, roomRef, clientId):
reply = ("JOINED_CHATROOM: {0}\n"
"SERVER_IP: {1}\n"
"PORT: {2}\n"
"ROOM_REF: {3}\n"
"JOIN_ID: {4}\n"
).format(roomName, socket.gethostbyname(socket.gethostname()), PORT, roomRef, clientId)
return reply
def constructLeaveReply(self, roomRef, clientId):
reply = ("LEFT_CHATROOM: {0}\n"
"JOIN_ID: {1}\n"
).format(roomRef, clientId)
return reply
def constructMessage(self, roomRef, clientName, message):
reply = ("CHAT: {0}\n"
"CLIENT_NAME: {1}\n"
"MESSAGE: {2}\n"
).format(roomRef, clientName, message)
return reply
def sendClient(self, content):
while not (self.pool.killRequested or self.useless):
try:
self.conn.send(content)
print "Thread {0} sent this to client: {1}".format(self.id, content)
break
except socket.error as e:
if e.errno == errno.ECONNRESET:
break
def handleResponse(self, data):
# Thread pool protocol
if data == "KILL_SERVICE\n":
self.pool.kill()
return True
elif data.startswith("HELO "):
self.sendClient(self.constructReply(data[5:].rstrip()))
return False
# Chat protocol
elif data.startswith(J_MSG):
roomName = data.splitlines()[0][len(J_MSG):]
clientName = data.splitlines()[3][len(NAME_MSG):]
# Get client ID, room ref, broadcast and append client to users
self.pool.lockState.acquire()
clientId = self.pool.state.idCounter
self.pool.state.idCounter += 1
if roomName in self.pool.state.roomRefs:
roomRef = self.pool.state.roomRefs[roomName]
else:
roomRef = self.pool.state.refCounter
self.pool.state.roomRefs[roomName] = roomRef
self.pool.state.rooms[roomRef] = Room()
self.pool.state.refCounter += 1
room = self.pool.state.rooms[roomRef]
room.clients.append(clientId)
if (len(room.clients) > 0):
joinMessage = "{0} has joined the chatroom".format(clientName)
room.messages.append([clientName, joinMessage, set(room.clients)])
self.pool.lockState.release()
self.myRooms.append((roomRef, clientId))
self.sendClient(self.constructJoinReply(roomName, roomRef, clientId))
return False
elif data.startswith(L_MSG):
roomRef = int(data.splitlines()[0][len(L_MSG):])
clientId = int(data.splitlines()[1][len(JID_MSG):])
clientName = data.splitlines()[2][len(NAME_MSG):]
# Discard any messages left for us, and leave chatroom
if (roomRef, clientId) in self.myRooms:
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
room.messages[:] = [m for m in room.messages if m[2]]
room.clients.remove(clientId)
if (len(room.clients) > 0):
leaveMessage = "{0} has left the chatroom".format(clientName)
room.messages.append([clientName, leaveMessage, set(room.clients)])
self.pool.lockState.release()
self.myRooms.remove((roomRef, clientId))
self.sendClient(self.constructLeaveReply(roomRef, clientId))
return False
elif data.startswith(CHAT_MSG):
roomRef = int(data.splitlines()[0][len(CHAT_MSG):])
clientId = int(data.splitlines()[1][len(JID_MSG):])
clientName = data.splitlines()[2][len(NAME_MSG):]
message = data.splitlines()[3][len(MSG):]
# Append message so that all threads can read it (including this one)
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
if (len(room.clients) > 0):
room.messages.append([clientName, message, set(room.clients)])
self.pool.lockState.release()
return False
elif data.startswith(DIS_MSG):
clientName = data.splitlines()[2][len(NAME_MSG):]
# Discard any messages left for us, and leave all chatrooms
for t in self.myRooms:
roomRef = t[0]
clientId = t[1]
self.pool.lockState.acquire()
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
room.messages[:] = [m for m in room.messages if m[2]]
room.clients.remove(clientId)
if (len(room.clients) > 0):
discMessage = "{0} was disconnected".format(clientName)
room.messages.append([clientName, discMessage, set(room.clients)])
self.pool.lockState.release()
self.myRooms = []
return True
def readMessages(self):
self.pool.lockState.acquire()
for t in self.myRooms:
roomRef = t[0]
clientId = t[1]
room = self.pool.state.rooms[roomRef]
for index in range(len(room.messages)):
if clientId in room.messages[index][2]:
room.messages[index][2].remove(clientId)
self.sendClient(self.constructMessage(roomRef, room.messages[index][0], room.messages[index][1]))
room.messages[:] = [m for m in room.messages if m[2]]
self.pool.lockState.release()
def run(self):
while not (self.pool.killRequested or self.useless):
# Try to get a client
self.pool.lockClients.acquire()
if (len(self.pool.clients) > 0 and not (self.pool.killRequested or self.useless)):
self.conn = self.pool.clients.pop(0)
self.pool.lockClients.release()
# If we didn't get a client, try again
if self.conn is None:
continue
print "Thread {0} fetched a client".format(self.id)
# Serve client
while not (self.pool.killRequested or self.useless):
self.readMessages()
try:
data = self.conn.recv(2048).replace("\\n", '\n')
print "Thread {0} received data {1}".format(self.id, data.rstrip())
if data == "":
break
if self.handleResponse(data):
break
except socket.error as e2:
if e2.errno == errno.ECONNRESET:
break
print "Thread {0} closing client socket".format(self.id)
self.conn.close()
self.conn = None
print "Thread {0} dying".format(self.id)
print "--- Preparing thread pool..."
workerPool = Pool()
print "--- Creating CTRL-C signal handler..."
def signalHandler(signal, frame):
print "Server received CTRL-C, nuking all threads"
workerPool.kill()
signal.signal(signal.SIGINT, signalHandler)
print "--- TCP server starting..."
serverThread = Server(workerPool)
serverThread.start()
print "--- Server is ready!"
while True:
if workerPool.killRequested:
for worker in workerPool.workers:
worker.join()
break
|
import numpy as np
from sklearn.utils.validation import check_array, check_random_state
def pairwise_transform(X, Y):
"""Form comparable pairs with interval-annotated entries.
Parameters
----------
X: array-like, shape (n_samples x n_features)
The feature representation of the instances.
Y: array_like, shape (n_samples x 2)
The lower and upper bounds of the interval of each instance.
"""
X = check_array(X, accept_sparse=None)
Y = check_array(Y, accept_sparse=None)
if Y.shape[1] != 2:
raise ValueError("Y must have two columns, represeting the lower "
"and upper bound of the interval for each entry.")
n_samples = X.shape[0]
idx = np.arange(n_samples)
chunks = []
chunk_idx = []
for k, (x, (y_min, y_max)) in enumerate(zip(X, Y)):
X_rest, Y_rest = X[1 + k:], Y[1 + k:]
idx_rest = idx[1 + k:]
before = Y_rest[:, 1] < y_min
after = Y_rest[:, 0] > y_max
if np.sum(before):
chunks.append(X_rest[before] - x)
chunk_idx.append(np.array([(i, k) for i in idx_rest[before]]))
if np.sum(after):
chunks.append(x - X_rest[after])
chunk_idx.append(np.array([(k, i) for i in idx_rest[after]]))
if len(chunks):
return np.row_stack(chunks), np.row_stack(chunk_idx)
else:
raise ValueError("Empty slice: no pairs can be formed.")
# return X[:0].copy(), np.array([[]]) # fail silently
def flip_pairs(X_pairwise, random_state=None):
rng = check_random_state(random_state)
n_pairs = X_pairwise.shape[0]
y = np.ones(n_pairs)
flip = rng.choice(range(n_pairs), size=n_pairs / 2, replace=False)
y[flip] = -1
X_flipped = X_pairwise * y[:, np.newaxis]
return X_flipped, y
if __name__ == '__main__':
X = np.arange(6)[:, np.newaxis]
Y = [[4, 7], [1, 3], [2, 4], [8, 15], [5, 6], [1, 2]]
X_pw, res = pairwise_transform(X, Y)
X_pw, y_pw = flip_pairs(X_pw, random_state=0)
for x, y, i in zip(X_pw, y_pw, res):
print x, y, i
support sparse matrices
import numpy as np
from sklearn.utils.validation import check_array, check_random_state
def _safe_sparse_add_row(X, row):
"""In-place add row to matrix, supporting sparse matrices."""
if sp.issparse(X):
for k in range(X.shape[0]):
X[k] = X[k] + row # worth cythonizing?
else:
X += row
return X
def pairwise_transform(X, Y):
"""Form comparable pairs with interval-annotated entries.
Parameters
----------
X: array-like, shape (n_samples x n_features)
The feature representation of the instances.
Y: array_like, shape (n_samples x 2)
The lower and upper bounds of the interval of each instance.
"""
X = check_array(X, accept_sparse='csr')
Y = check_array(Y, accept_sparse=None)
if Y.shape[1] != 2:
raise ValueError("Y must have two columns, represeting the lower "
"and upper bound of the interval for each entry.")
#n_samples = X.shape[0]
#idx = np.arange(n_samples)
chunks = []
#chunk_idx = []
for k, (x, (y_min, y_max)) in enumerate(zip(X, Y)):
X_rest, Y_rest = X[1 + k:], Y[1 + k:]
#idx_rest = idx[1 + k:]
before = Y_rest[:, 1] < y_min
after = Y_rest[:, 0] > y_max
if np.sum(before):
X_bef = X_rest[before].copy()
chunks.append(_safe_sparse_add_row(X_bef, -x))
#chunk_idx.append(np.array([(i, k) for i in idx_rest[before]]))
if np.sum(after):
X_aft = X_rest[after].copy()
chunks.append(-(_safe_sparse_add_row(X_aft, -x)))
#chunk_idx.append(np.array([(k, i) for i in idx_rest[after]]))
if len(chunks):
return sp.vstack(chunks) if sp.issparse(X) else np.vstack(chunks)
# , np.row_stack(chunk_idx)
else:
raise ValueError("Empty slice: no pairs can be formed.")
# return X[:0].copy(), np.array([[]]) # fail silently
def flip_pairs(X_pairwise, random_state=None):
rng = check_random_state(random_state)
n_pairs = X_pairwise.shape[0]
y = np.ones(n_pairs)
flip = rng.choice(range(n_pairs), size=n_pairs / 2, replace=False)
y[flip] = -1
if sp.issparse(X_pairwise):
X_flipped = sp.diags([y], [0]) * X_pairwise
else:
X_flipped = X_pairwise * y[:, np.newaxis]
return X_flipped, y
if __name__ == '__main__':
#X = np.arange(6)[:, np.newaxis]
import scipy.sparse as sp
X = np.random.randn(6, 50)
X[X < 0] = 0
Xsp = sp.csr_matrix(X)
Y = [[4, 7], [1, 3], [2, 4], [8, 15], [5, 6], [1, 2]]
X_pw = pairwise_transform(X, Y)
X_pw_sp = pairwise_transform(Xsp, Y)
print np.linalg.norm(X_pw - X_pw_sp)
X_pw, y_pw = flip_pairs(X_pw, random_state=0)
X_pw_sp, y_pw = flip_pairs(X_pw_sp, random_state=0)
print np.linalg.norm(X_pw - X_pw_sp)
#for x, y in zip(X_pw, y_pw):
# print x, y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.