text stringlengths 0 1.05M | meta dict |
|---|---|
from dbfUtils import *
from struct import unpack
from math import *
from random import uniform, random
def load_shape(shapefile):
global records
global record_dict
record_dict = {}
records = open(shapefile, mode='rb')
#unpack the header
header1 = unpack('>7i', records.read(28))
header2 = unpack('<2i', records.read(8))
header3 = unpack('<8d', records.read(64))
#shapefil type (point, line, polygon, etc)
shape_type = header2[1]
load_dbf(shapefile)
find_records(record_dict, shape_type)
print '\nsuccess!'
return record_dict
def load_dbf(filename):
global db
global fieldnames
file_list = list(filename)
del file_list[-3:]
file_list.append('dbf')
dbf_filename = ''.join(file_list)
dbf = open(dbf_filename, 'rb')
db = list(dbfreader(dbf))
dbf.close()
fieldnames = db[0]
def get_header():
return unpack('>2i', records.read(8)) #pertinent info about individual record
def get_point_records(main_dict):
#rec header
record_header = get_header()
record_coordinates = unpack('<2d', records.read(16)) #min & max's, then number of parts/points
main_dict[record_header[0]] = {}
count = 1
main_dict[record_header[0]]['shapes'] = {}
main_dict[record_header[0]]['shapes'][count] = {}
main_dict[record_header[0]]['shapes'][count]['x'] = record_coordinates[0]
main_dict[record_header[0]]['shapes'][count]['y'] = record_coordinates[1]
def get_multi_point_records():
record_header = get_header()
def get_polygon_records(main_dict):
#rec header
record_header = get_header()
rec1dict = unpack('<i4d2I', records.read(44)) #min & max's, then number of parts/points
num_rec1parts = rec1dict[5]
num_rec1points = rec1dict[6]
parts_list = unpack(('<' + str(num_rec1parts) + 'i'), records.read((num_rec1parts*4)))
points_list = unpack(('<' + str(num_rec1points*2) + 'd'), records.read((num_rec1points*8*2)))
list_number = record_header[0] - 1
main_dict[list_number] = {}
main_dict[list_number]['xmin'] = rec1dict[1]
main_dict[list_number]['ymin'] = rec1dict[2]
main_dict[list_number]['xmax'] = rec1dict[3]
main_dict[list_number]['ymax'] = rec1dict[4]
for i in range(0, len(db[0])):
main_dict[list_number][db[0][i]] = str(db[(int(list_number)+2)][i]).strip()
full_x = list(points_list[0:][::2])
full_y = list(points_list[1:][::2])
main_dict[list_number]['shapes'] = {}
count = 0
newstart = 0
for i in range(0, len(parts_list)):
main_dict[list_number]['shapes'][count] = {}
x = []
y = []
if len(parts_list) == 1:
x = full_x
y = full_y
elif i == (len(parts_list)-1):
x = full_x[parts_list[i]:]
y = full_y[parts_list[i]:]
else:
x = full_x[newstart:parts_list[(i+1)]]
y = full_y[newstart:parts_list[(i+1)]]
xmax, xmin, ymax, ymin = max(x), min(x), max(y), min(y)
main_dict[list_number]['shapes'][count]['x'] = x
main_dict[list_number]['shapes'][count]['y'] = y
main_dict[list_number]['shapes'][count]['xmax'] = xmax
main_dict[list_number]['shapes'][count]['xmin'] = xmin
main_dict[list_number]['shapes'][count]['ymax'] = ymax
main_dict[list_number]['shapes'][count]['ymin'] = ymin
main_dict[list_number]['shapes'][count]['area'] = ((xmax-xmin)*(ymax-ymin))
main_dict[list_number]['shapes'][count]['centerx'] = (xmax-xmin)/2 + xmin
main_dict[list_number]['shapes'][count]['centery'] = (ymax-ymin)/2 + ymin
if i < (len(parts_list)-1):
newstart = int(parts_list[(i+1)])
count += 1
def find_records(main_dict, shape_type):
while True:
try:
if shape_type == 3 or shape_type == 5:
get_polygon_records(main_dict)
elif shape_type == 1:
get_point_records(main_dict)
else:
print "Sorry, I can't handle that type of shapefile. Please use only point, line, or polygon type shapefiles."
except:
records.close()
break
#This finds the largest polygon within each region. For instance, it will find mainland Alaska and ignore the state's plethora of islands. This is very helpful for things like finding a helpful centroid and finding the poi
def get_largest_region(main_dict):
proj = False
dont_have_center = True
if 'projectedx' in main_dict[0]['shapes'][0]:
proj = True
if 'center' in main_dict[0]:
dont_have_center = False
polygon_areas = {}
record_centers = {}
projected_center = {}
for i in main_dict:
if dont_have_center == True:
polygon_areas['center'] = main_dict[i]['shapes'][0]
if proj == True:
polygon_areas['projectedcenter'] = main_dict[i]['shapes'][0]
for j in main_dict[i]['shapes']:
if dont_have_center == True:
if main_dict[i]['shapes'][j]['area'] > polygon_areas['center']['area']:
polygon_areas['center'] = main_dict[i]['shapes'][j]
if proj == True:
if main_dict[i]['shapes'][j]['projectedarea'] > polygon_areas['projectedcenter']['projectedarea']:
polygon_areas['projectedcenter'] = main_dict[i]['shapes'][j]
if dont_have_center == True:
main_dict[i]['shapes']['center'] = polygon_areas['center']
if proj == True:
main_dict[i]['shapes']['projectedcenter'] = polygon_areas['projectedcenter']
def avg_center(main_dict):
get_largest_region(main_dict)
for i in main_dict:
main_dict[i]['shapes']['center']['avgx'] = sum(main_dict[i]['shapes']['center']['x'])/float(len(main_dict[i]['shapes']['center']['x']))
main_dict[i]['shapes']['center']['avgy'] = sum(main_dict[i]['shapes']['center']['y'])/float(len(main_dict[i]['shapes']['center']['y']))
def get_area(x, y):
the_area = 0
for i in range(0, (len(x)-1)):
j = i + 1
the_area = the_area + (x[i]*y[j]) - (x[j]*y[i])
return (the_area/2)
def centroid(main_dict):
get_largest_region(main_dict)
unprojected = ['center', 'x', 'y', 'centroidx', 'centroidy']
projected = ['projectedcenter', 'projectedx', 'projectedy', 'pcentroidx', 'pcentroidy']
def save_centroid(access_name, access_x, access_y, save_name_x, save_name_y):
for i in main_dict:
x = main_dict[i]['shapes'][access_name][access_x][:]
y = main_dict[i]['shapes'][access_name][access_y][:]
cx = 0
cy = 0
area = get_area(x, y)
for k in range(0, (len(x)-1)):
j = k + 1
p = (x[k] * y[j]) - (x[j] * y[k])
cx = cx + (x[k] + x[j])*p
cy = cy + (y[k] + y[j])*p
center_x = cx/(6*area)
center_y = cy/(6*area)
main_dict[i][save_name_x] = center_x
main_dict[i][save_name_y] = center_y
if 'centroidx' not in main_dict[0]:
save_centroid(unprojected[0], unprojected[1], unprojected[2], unprojected[3], unprojected[4])
if 'projectedx' in main_dict[0]['shapes'][0]:
save_centroid(projected[0], projected[1], projected[2], projected[3], projected[4])
def point_in_polygon(x_vertices, y_vertices, x, y):
c = False
length = len(x_vertices)
j = 0
for i in range(1, length):
if (y_vertices[i] >= y) != (y_vertices[j] > y):
if x < (x_vertices[j] - x_vertices[i]) * (y - y_vertices[i])/(y_vertices[j] - y_vertices[i]) + x_vertices[i]:
c = not(c)
j = i
return c
def min_distance(listx, listy, x, y):
min_dist = sqrt((listx[0] - x)**2 + ((listy[0] - y))**2)
for i in range(0, len(listx)):
dist = sqrt((listx[i] - x)**2 + ((listy[i] - y))**2)
if dist < min_dist:
min_dist = dist
return min_dist
#calculate pole of inaccessibility
def poi(main_dict, sectors, area_inside=0.99, grid_inside=0.7):
get_largest_region(main_dict)
count = 0
for i in main_dict:
xmax = main_dict[i]['shapes']['center']['xmax']
ymax = main_dict[i]['shapes']['center']['ymax']
xmin = main_dict[i]['shapes']['center']['xmin']
ymin = main_dict[i]['shapes']['center']['ymin']
centerx = main_dict[i]['shapes']['center']['centerx']
centery = main_dict[i]['shapes']['center']['centery']
xpoints = main_dict[i]['shapes']['center']['x'][:]
ypoints = main_dict[i]['shapes']['center']['y'][:]
sections_x = []
sections_y = []
sector_cordinates = []
section_x_pieces = ((xmax-xmin)/sectors)
section_y_pieces = ((ymax-ymin)/sectors)
#I'm assuming that if the polygon's area is 99% of the regions estimated area the polygon is very close to a perfect circle or square. In such a case, the poi is assumed to be the bounding box's center point.
if get_area(xpoints, ypoints) >= (((ymax-ymin)*(xmax-xmin))*area_inside):
best_center_x = centerx
best_center_y = centery
main_dict[i]['centerx'] = best_center_x
main_dict[i]['centery'] = best_center_y
main_dict[i]['x'] = xpoints
main_dict[i]['y'] = ypoints
break
count += 1
print count
for j in range(1, (sectors+1)):
sections_x.append(section_x_pieces*j + xmin)
sections_y.append(section_y_pieces*j + ymin)
#This creates the grid, with (x, y) coordinates for all vertices
for j in range(0, len(sections_x)):
for k in range(0, len(sections_y)):
temp_list = []
temp_list.append(sections_x[j])
temp_list.append(sections_y[k])
sector_cordinates.append(temp_list)
sector_quadrants_x = [ [] for k in range(sectors**2) ]
sector_quadrants_y = [ [] for k in range(sectors**2) ]
quadrent_list = []
minimum_distance = []
for j in range(0, len(sector_cordinates)):
above_x = sector_cordinates[j][0]
above_y = sector_cordinates[j][1]
below_x = ((((sector_cordinates[j][0] - xmin)/(xmax-xmin))*sectors)-1)*((xmax-xmin)/sectors)+xmin
below_y = ((((sector_cordinates[j][1] - ymin)/(ymax-ymin))*sectors)-1)*((ymax-ymin)/sectors)+ymin
center_x = ((above_x - below_x)/2) + below_x
center_y = ((above_y - below_y)/2) + below_y
length = len(xpoints)
inside = point_in_polygon(xpoints, ypoints, center_x, center_y)
if inside == True:
minimum_distance.append(min_distance(xpoints, ypoints, center_x, center_y))
temp = []
tempx = center_x
tempy = center_y
temp.append(tempx)
temp.append(tempy)
quadrent_list.append(temp)
if len(quadrent_list) == 0:
main_dict[i]['centerx'] = centerx
main_dict[i]['centery'] = centery
main_dict[i]['x'] = xpoints
main_dict[i]['y'] = ypoints
continue
best_quadrant_list = []
best = max(minimum_distance)
for j in range(0, len(minimum_distance)):
if best == minimum_distance[j]:
best_quadrant_list.append(quadrent_list[j])
print len(best_quadrant_list)
best_center_x = best_quadrant_list[0][0]
best_center_y = best_quadrant_list[0][1]
main_dict[i]['centerx'] = best_center_x
main_dict[i]['centery'] = best_center_y
main_dict[i]['x'] = xpoints
main_dict[i]['y'] = ypoints
def draw_border(x, y, proj='albers', originx=0, originy=0, botlat=20, toplat=50, fill_the_lat=False, fill_the_long=False, fill_with_color='grey'):
import matplotlib.pyplot as plt
if proj == 'albers':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = albers(x[i], y[i],origin_x=originx, origin_y=originy, bot_lat=botlat, top_lat=toplat)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'kav':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = kav(x[i], y[i])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'aitoff':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = aitoff(x[i], y[i])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'hammer':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = hammer(x[i], y[i])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'winkel':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = winkel(x[i], y[i])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'azi':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = azi(x[i], y[i])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'mercator':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = mercator(x[i], y[i], origin_x=originx, origin_y=originy)
if temp_list == 0:
continue
if len(temp_list) == 2:
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
plt.plot(tempx, tempy, color='black')
elif proj == 'bonne':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = bonne(x[i], y[i], bot_lat=botlat, origin_x=originx)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
plt.plot(tempx, tempy, color='black')
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
elif proj == 'moll':
tempx = []
tempy = []
for i in range(0, len(x)):
temp_list = moll(x[i], y[i], origin_x=originx)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
plt.plot(tempx, tempy, color='black')
if fill_the_lat == True or fill_the_long == True:
plt.fill(tempx, tempy, color=fill_with_color)
else:
if fill_the_lat == True or fill_the_long == True:
plt.fill(x, y, color=fill_with_color)
plt.plot(x,y,color='black')
#creates a grid. changing long_lines and lat_lines shrinks the grid toward the center. 20 lines means grid lines from -100 through 100. 18 means grid lines from -90 through 90, and so on.
#requires the matplotlib library. To show the grid, import matplotlib.pyplot as plt and use the plt.show() command
def grid(projection='albers', origin_x=0, origin_y=0, bot_lat=20, top_lat=50, long_lines=36, lat_lines=20, fill_lat=False, fill_long=False, fillcolor='grey'):
x_max = long_lines*10/2
x_min = long_lines*10/2*-1
y_max = lat_lines*10/2
y_min = lat_lines*10/2*-1
x_lines = []
y_lines = []
x_interval = []
y_interval = []
#longitude
for i in range(x_min, x_max+1):
if i%10 == 0:
x_interval.append(i)
for i in x_interval:
new_list = []
x_lines.append(new_list)
for i in range(y_min, y_max+1):
y_lines.append(i)
for i in range(0, len(x_lines)):
for j in y_lines:
x_lines[i].append(x_interval[i])
if fill_lat == True:
if projection == 'azi':
for i in range(0, len(x_interval)):
draw_border(x_lines[i], y_lines, botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=True, fill_the_long=True, fill_with_color=fillcolor)
else:
for i in range(0, len(x_interval)):
draw_border(x_lines[i], y_lines, botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=False, fill_the_long=False, fill_with_color=fillcolor)
elif fill_long == True:
for i in range(0, len(x_interval)):
draw_border(x_lines[i], y_lines, botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=False, fill_the_long=True, fill_with_color=fillcolor)
else:
for i in range(0, len(x_interval)):
draw_border(x_lines[i], y_lines, botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=False, fill_the_long=False, fill_with_color=fillcolor)
#latitude
x_lines_lat = []
y_lines_lat = []
for i in range(y_min, y_max+1):
if i%10 == 0:
y_interval.append(i)
for i in y_interval:
new_list = []
y_lines_lat.append(new_list)
for i in range(x_min, x_max+1):
x_lines_lat.append(i)
for i in range(0, len(y_lines_lat)):
for j in x_lines_lat:
y_lines_lat[i].append(y_interval[i])
if fill_lat == True:
for i in range(0, len(y_interval)):
draw_border(x_lines_lat, y_lines_lat[i], botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=True, fill_the_long=False, fill_with_color=fillcolor)
elif fill_long == True:
for i in range(0, len(y_interval)):
draw_border(x_lines_lat, y_lines_lat[i], botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=False, fill_the_long=False, fill_with_color=fillcolor)
else:
for i in range(0, len(y_interval)):
draw_border(x_lines_lat, y_lines_lat[i], botlat=bot_lat, originx=origin_x, proj=projection, fill_the_lat=False, fill_the_long=False, fill_with_color=fillcolor)
def projection(main_dict, proj='kav', origin_x=0, origin_y=0, bot_lat=20, top_lat=50):
def projected_points(tempx, tempy, xmax, xmin, ymax, ymin):
main_dict[i]['shapes'][j]['projectedx'] = tempx
main_dict[i]['shapes'][j]['projectedy'] = tempy
main_dict[i]['shapes'][j]['projectedxmax'] = xmax
main_dict[i]['shapes'][j]['projectedxmin'] = xmin
main_dict[i]['shapes'][j]['projectedymax'] = ymax
main_dict[i]['shapes'][j]['projectedymin'] = ymin
main_dict[i]['shapes'][j]['projectedarea'] = ((xmax-xmin)*(ymax-ymin))
main_dict[i]['shapes'][j]['projectedcenterx'] = (xmax-xmin)/2 + xmin
main_dict[i]['shapes'][j]['projectedcentery'] = (ymax-ymin)/2 + ymin
if proj == 'albers':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = albers(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k], origin_x=origin_x, origin_y=origin_y, bot_lat=bot_lat, top_lat=top_lat)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'kav':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = kav(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'aitoff':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = aitoff(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'hammer':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = hammer(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'winkel':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = winkel(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'azi':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = azi(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k])
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'mercator':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = mercator(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k], origin_x=origin_x, origin_y=origin_y)
if temp_list == 0:
continue
if len(temp_list) == 2:
tempx.append(temp_list[0])
tempy.append(temp_list[1])
if len(tempx) > 0:
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'bonne':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = bonne(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k], bot_lat, origin_x)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'orthoequatorial':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = ortho(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k], origin_y=origin_y, origin_x=origin_x)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
if proj == 'moll':
for i in main_dict:
for j in main_dict[i]['shapes']:
tempx = []
tempy = []
for k in range(0, len(main_dict[i]['shapes'][j]['x'])):
temp_list = moll(main_dict[i]['shapes'][j]['x'][k], main_dict[i]['shapes'][j]['y'][k], origin_x=origin_x)
tempx.append(temp_list[0])
tempy.append(temp_list[1])
xmax, xmin, ymax, ymin = max(tempx), min(tempx), max(tempy), min(tempy)
projected_points(tempx, tempy, xmax, xmin, ymax, ymin)
def radian(point):
return point*pi/180
def degree(point):
return point*180/pi
def albers(feedx, feedy, origin_x=0, origin_y=0, bot_lat=20, top_lat=50):
temp_list = []
lambda_x = feedx
phi_y = feedy
lambda_0 = origin_x
phi_0 = radian(origin_y)
phi_1 = radian(bot_lat)
phi_2 = radian(top_lat)
R = 1
n = (sin(phi_1) + sin(phi_2))/2
C = (cos(phi_1)**2) + 2*n*sin(phi_1)
p_0 = ((C - 2*n*(sin(phi_0)))**(0.5))/n
theta = radian(n*(lambda_x - lambda_0))
p = ((C - 2*n*sin(radian(phi_y)))**(0.5))/n
x = p*(sin(theta))
y = p_0 - (p*cos(theta))
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def bonne(feedx,feedy,bot_lat=40, origin_x=0):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
phi_1 = radian(bot_lat)
lambda_0 = radian(origin_x)
p = 1/tan(phi_1)+(phi_1-phi_y)
E = (lambda_x -lambda_0)*(cos(phi_y))/p
x = p*sin(E)
y = 1/tan(phi_1) - p*cos(E)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def orthoequatorial(feedx,feedy,origin_y=40, origin_x=0):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
phi_1 = radian(origin_y)
lambda_0 = radian(origin_x)
c = sin(phi_1)*sin(phi_y)+cos(phi_1)*cos(phi_y)*cos(lambda_x-lambda_0)
x = cos(phi_y)*sin(lambda_x-lambda_0)
#y = cos(phi_1)*sin(phi_y)-sin(phi_1)*cos(phi_y)*cos(lambda_x-lambda_0)
y = sin(phi_y)
temp_list.append(degree(x))
temp_list.append(degree(y))
temp_list.append(c)
return temp_list
#kavraskiy
def kav(feedx,feedy):
temp_list = []
lambda_x = feedx
phi_y = feedy
x = (3*radian(lambda_x)/2*pi)*sqrt(((pi**2)/3)-(radian(phi_y)**2))
y = radian(phi_y)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def aitoff(feedx,feedy):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
alpha = acos(cos(phi_y)*cos(lambda_x*0.5))
if alpha == 0:
k = 0
else:
k = alpha/sin(alpha)
x = 2*k*cos(phi_y)*sin(lambda_x*0.5)
y = k*sin(phi_y)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def hammer(feedx, feedy):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
x = 2*sqrt(2/(1+cos(phi_y)*cos(lambda_x/2)))*cos(phi_y)*sin(lambda_x/2)
y = sqrt(2/(1+cos(phi_y)*cos(lambda_x/2)))*sin(phi_y)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def winkel(feedx, feedy):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
phi_0 = acos(2/pi)
alpha = acos(cos(phi_0)*cos(lambda_x/2))
if alpha == 0:
w = 0
else:
w = 1/sin(alpha)
x = 0.5*(lambda_x*cos(phi_0) + 2*w*alpha*cos(phi_y)*sin(lambda_x/2))
y = 0.5*(phi_y + w*alpha*sin(phi_y))
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
#azimuthal
def azi(feedx, feedy):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
x = sqrt(2/(1+cos(phi_y)*cos(lambda_x)))*cos(phi_y)*sin(lambda_x)
y = sqrt(2/(1+cos(phi_y)*cos(lambda_x)))*sin(phi_y)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
def mercator(feedx, feedy, origin_x, origin_y):
temp_list = []
lambda_x = feedx
phi_y = feedy
lambda_0 = origin_x
if phi_y < 90 and phi_y > -90:
x = pi*(lambda_x-lambda_0)/180
y = log(tan(radian(45 + phi_y*0.5)))
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
elif phi_y < 90:
return 0
elif phi_y > -90:
return 0
#Mollweide projection
def moll(feedx, feedy, origin_x=0):
temp_list = []
lambda_x = radian(feedx)
phi_y = radian(feedy)
lambda_0 = radian(origin_x)
theta = phi_y
while True:
error = (theta + sin(theta) - pi*sin(phi_y))/(1+cos(theta))
theta = theta - error
if error < 0.00000001 and error > -0.0000000001:
break
theta = theta/2
x = (8**0.5)/pi*(lambda_x - lambda_0)*cos(theta)
y = (2**0.5)*sin(theta)
temp_list.append(degree(x))
temp_list.append(degree(y))
return temp_list
| {
"repo_name": "stahlba2/Python-Shapefile-Reader",
"path": "shpread.py",
"copies": "1",
"size": "26026",
"license": "mit",
"hash": -6025971606697499000,
"line_mean": 32.0698856417,
"line_max": 222,
"alpha_frac": 0.6272957811,
"autogenerated": false,
"ratio": 2.5051496775435558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8404735798373506,
"avg_score": 0.04554193205400975,
"num_lines": 787
} |
__author__ = 'brunocatao'
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.views.generic.simple import direct_to_template
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from portal.files.models import UploadedFile
from portal.files.forms import UploadedFileForm
from portal.utils import get_class, get_mime_type
from filetransfers.api import prepare_upload, serve_file
@csrf_protect
@login_required
def upload(request, class_name, id):
form = None
instance = get_class(class_name).objects.get(pk=id)
view_url = reverse('portal.files.views.upload', args=[class_name, id, ])
if request.method == 'POST':
form = UploadedFileForm(request.POST, request.FILES)
if form.is_valid():
file = form.save(commit=False)
file.content_type = ContentType.objects.get_for_model(instance)
file.object_pk = force_unicode(instance._get_pk_val())
file.user = request.user
file.save()
instance.notify_upload(request.user, file)
return HttpResponseRedirect(instance.get_absolute_url())
else:
form = UploadedFileForm()
upload_url, upload_data = prepare_upload(request, view_url)
ctx = {
'form': form,
'upload_url': upload_url,
'upload_data': upload_data,
}
return direct_to_template(request, 'files/form.html', ctx)
def download(request, id):
uploaded_file = get_object_or_404(UploadedFile, pk=id)
#Modificando o contador de downloads
uploaded_file.downloads += 1
uploaded_file.save()
filename = uploaded_file.file.name
return serve_file(request, uploaded_file.file, save_as=True, content_type=get_mime_type(filename))
def view(request, id, filename):
return download(request, id)
def delete(request, id, next_url):
uploaded_file = get_object_or_404(UploadedFile, pk=id)
uploaded_file.delete()
return HttpResponseRedirect(next_url) | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/files/views.py",
"copies": "1",
"size": "2219",
"license": "bsd-3-clause",
"hash": -2240706787319636700,
"line_mean": 31.6470588235,
"line_max": 102,
"alpha_frac": 0.7016674178,
"autogenerated": false,
"ratio": 3.754653130287648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49563205480876477,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brunocatao'
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from portal.messages.models import Message, Attachment
class MessageForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
subject = forms.CharField(label=_("Subject"), required=True, max_length=100, widget=forms.TextInput(attrs={'class':'validate[required] text-input'}))
text = forms.CharField(label=_("Message"), required=True, widget=forms.Textarea(attrs={'class':'validate[required] text-input'}))
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_data())
super(MessageForm, self).__init__(data=data, initial=initial)
def generate_data(self):
data_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
}
return data_dict
def get_message_object(self):
if not self.is_valid():
raise ValueError("get_message_object may only be called on valid forms")
message = Message(**self.get_message_create_data())
return message
def get_message_create_data(self):
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
subject = self.cleaned_data["subject"],
text = self.cleaned_data["text"],
)
class AttachmentForm(forms.ModelForm):
class Meta:
model = Attachment
fields = ['file',] | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/messages/forms.py",
"copies": "1",
"size": "1919",
"license": "bsd-3-clause",
"hash": 212428114262167700,
"line_mean": 39,
"line_max": 159,
"alpha_frac": 0.6399166232,
"autogenerated": false,
"ratio": 4.006263048016701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5146179671216702,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brunocatao'
from django import forms
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from portal.models import UserInfo
class RegisterUserForm(forms.Form):
email = forms.EmailField(label=_('Email'), required=True, max_length=100)
password = forms.CharField(label=_('Password'), widget=forms.PasswordInput, required=True, min_length=5, max_length=100)
password_confirm = forms.CharField(
label=_('Password Confirmation'),
widget=forms.PasswordInput(attrs={'class':'validate[required,equals[id_password]] text-input'}),
required=True, min_length=5, max_length=100)
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(username=email).exists():
raise forms.ValidationError(_('A user with this email already exists.'))
return email
def clean(self):
cleaned_data = self.cleaned_data
password = cleaned_data.get('password')
p_confirm = cleaned_data.get('password_confirm')
if password != p_confirm:
msg = _("The two password fields didn't match.")
self._errors['password_confirm'] = self.error_class([msg])
return cleaned_data
class UserInfoForm(forms.ModelForm):
class Meta:
model = UserInfo
exclude = ('user', 'email', 'index', 'show_help_text', 'is_teacher', 'schedule_cache',) | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/accounts/forms.py",
"copies": "1",
"size": "1471",
"license": "bsd-3-clause",
"hash": -3688378505504465000,
"line_mean": 35.8,
"line_max": 132,
"alpha_frac": 0.6498980286,
"autogenerated": false,
"ratio": 4.0974930362116995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004320313422536511,
"num_lines": 40
} |
__author__ = 'brunocatao'
from django import forms
from django.utils.translation import ugettext as _
from portal.models import Institution
from portal.constants import STATES_CHOICES
class InstitutionForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), required=True, max_length=100)
acronym = forms.CharField(label=_('Acronym'), required=False, max_length=100)
#Address fields, maybe there's a better way to do this
address = forms.CharField(label=_('Address'), required=True, max_length=200)
number = forms.CharField(label=_('Number'), required=True, max_length=10)
neighborhood = forms.CharField(label=_('Neighborhood'), required=True, max_length=100)
city = forms.CharField(label=_('City'), required=True, max_length=100)
province = forms.ChoiceField(label=_('State or Province'), required=True, choices=STATES_CHOICES)
description = forms.CharField(label=_('Description'), widget=forms.Textarea)
homepage = forms.URLField(label=_('Homepage'), required=False)
feed_url = forms.CharField(label=_('News Feed URL'), required=False, max_length=512)
twitter_id = forms.CharField(label=_('Twitter ID'), required=False, max_length=100)
class Meta:
model = Institution
exclude = ('picture', 'slug', 'address', 'index', 'messages_cache',
'updates_cache', 'teachers_cache', 'students_cache') | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/institutions/forms.py",
"copies": "1",
"size": "1436",
"license": "bsd-3-clause",
"hash": -1120634822723354100,
"line_mean": 50.3214285714,
"line_max": 105,
"alpha_frac": 0.680362117,
"autogenerated": false,
"ratio": 3.881081081081081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506144319808108,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brunocatao'
from django.test import TestCase
from google.appengine.ext import db
from google.appengine.api import images
import logging
from portal.models import Picture
class PictureTestCase(TestCase):
PICTURE_FILE_NAME = '/Users/brunocatao/Pictures/foto.jpg'
def setUp(self):
logging.info('reading the image file ...')
self.picture_file = file(PictureTestCase.PICTURE_FILE_NAME).read()
def testCreatePicture(self):
logging.info('creating a picture ...')
picture = Picture()
picture.picture = db.Blob(self.picture_file)
picture.filename = PictureTestCase.PICTURE_FILE_NAME
picture.save()
self.assertEquals(193, picture.width)
self.assertEquals(237, picture.height)
self.assertEquals('jpg', picture.format)
def testCreateThumbnail(self):
picture = Picture()
picture.picture = db.Blob(self.picture_file)
picture.filename = PictureTestCase.PICTURE_FILE_NAME
picture.save()
thumb = Picture.create_thumbnail(picture, 32, 32)
self.assertTrue(thumb is not None)
def testGetThumbnail(self):
picture = Picture()
picture.picture = db.Blob(self.picture_file)
picture.filename = PictureTestCase.PICTURE_FILE_NAME
picture.save()
pre_thumb = Picture.create_thumbnail(picture, 32, 32)
self.assertTrue(pre_thumb is not None)
thumb = Picture.get_thumbnail(picture, 32, 32)
self.assertTrue(thumb is not None)
self.assertEquals(pre_thumb.id, thumb.id) # It should not create a new thumbnail
self.assertTrue(thumb.width <= 32)
self.assertTrue(thumb.height <= 32)
self.assertEquals('jpg', thumb.format)
thumb = Picture.get_thumbnail(picture, 64, 64)
self.assertTrue(thumb is not None)
self.assertTrue(thumb.id != pre_thumb.id)
thumb = Picture.get_thumbnail(picture, 128, 128)
self.assertTrue(thumb is not None)
self.assertTrue(thumb.id != pre_thumb.id)
# Load the tests from portal.accounts module
from portal.accounts.tests import RegisterUserTestCase
# Load the tests from portal.album module
from portal.album.tests import AlbumTestCase
# Load the tests from portal.polls module
from portal.polls.tests import PollTestCase | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/tests.py",
"copies": "1",
"size": "2328",
"license": "bsd-3-clause",
"hash": -5928493114983545000,
"line_mean": 34.2878787879,
"line_max": 88,
"alpha_frac": 0.6842783505,
"autogenerated": false,
"ratio": 3.8543046357615895,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032337573100572835,
"num_lines": 66
} |
__author__ = 'brunocatao'
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
class UploadedFileManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all uploaded files for a particular model (either an instance or a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
class UploadedFile(models.Model):
file = models.FileField(_('File'), blank=False, upload_to='uploads/%Y/%m/%d/%H/%M/%S/')
description = models.CharField(_('Description'), blank=False, max_length=100)
date_published = models.DateTimeField(default=datetime.datetime.now)
date_modified = models.DateTimeField(blank=True, null=True)
downloads = models.IntegerField(default=0)
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.CharField(_('object ID'), max_length=100)
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
objects = UploadedFileManager()
def fill_date_modified(sender, instance, **kw):
instance.date_modified = datetime.datetime.now()
models.signals.pre_save.connect(fill_date_modified, sender=UploadedFile) | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/files/models.py",
"copies": "1",
"size": "1737",
"license": "bsd-3-clause",
"hash": 3967593584850374700,
"line_mean": 43.5641025641,
"line_max": 130,
"alpha_frac": 0.7023603915,
"autogenerated": false,
"ratio": 3.868596881959911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0070542876887379055,
"num_lines": 39
} |
__author__ = 'brunocatao'
import datetime
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
import portal.models
class UpdateManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all updates for a particular model (either an instance or a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
class Update(models.Model):
text = models.CharField(blank=False, max_length=100)
link = models.CharField(blank=False, max_length=512)
date_published = models.DateTimeField(default=datetime.datetime.now)
author = models.ForeignKey(User, blank=False)
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.CharField(_('object ID'), max_length=100)
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
objects = UpdateManager()
@classmethod
def createUpdate(cls, author, text, link, instance):
update = Update(author=author, text=text, link=link)
update.content_type = ContentType.objects.get_for_model(instance)
update.object_pk = force_unicode(instance._get_pk_val())
update.save()
return update
def fill_date_published(sender, instance, **kw):
if not instance.date_published:
instance.date_published = datetime.datetime.now()
models.signals.pre_save.connect(fill_date_published, sender=Update)
def invalidate_cache(sender, instance, **kw):
target = instance.content_type.get_object_for_this_type(pk=instance.object_pk)
target.updates_cache = None
target.save()
if instance.content_type.model == 'institution':
if target.get_teachers():
for t in target.get_teachers():
t.updates_cache = None
t.save()
if target.get_students():
for s in target.get_students():
s.updates_cache = None
s.save()
for course in target.course_set.all():
course.updates_cache = None
course.save()
if course.get_teachers():
for t in course.get_teachers():
t.updates_cache = None
t.save()
if course.get_students():
for s in course.get_students():
s.updates_cache = None
s.save()
for discipline in course.discipline_set.all():
discipline.updates_cache = None
discipline.save()
if discipline.get_teachers():
for t in discipline.get_teachers():
t.updates_cache = None
t.save()
if discipline.get_students():
for s in discipline.get_students():
s.updates_cache = None
s.save()
elif instance.content_type.model == 'course':
target.institution.updates_cache = None
target.institution.save()
if target.get_teachers():
for t in target.get_teachers():
t.updates_cache = None
t.save()
if target.get_students():
for s in target.get_students():
s.updates_cache = None
s.save()
for discipline in target.discipline_set.all():
discipline.updates_cache = None
discipline.save()
if discipline.get_teachers():
for t in discipline.get_teachers():
t.updates_cache = None
t.save()
if discipline.get_students():
for s in discipline.get_students():
s.updates_cache = None
s.save()
elif instance.content_type.model == 'discipline':
institution = target.course.institution
institution.updates_cache = None
institution.save()
portal.models.InstitutionUpdateCache(text=instance.text, link=instance.link,
date_published=instance.date_published, author=instance.author,
institution=institution).save()
target.course.updates_cache = None
target.course.save()
if target.get_teachers():
for t in target.get_teachers():
t.updates_cache = None
t.save()
if target.get_students():
for s in target.get_students():
s.updates_cache = None
s.save()
else:
target.updates_cache = None
models.signals.pre_save.connect(invalidate_cache, sender=Update) | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/updates/models.py",
"copies": "1",
"size": "5156",
"license": "bsd-3-clause",
"hash": 1362164682577250300,
"line_mean": 36.6423357664,
"line_max": 130,
"alpha_frac": 0.5812645462,
"autogenerated": false,
"ratio": 4.174898785425102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5256163331625102,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brunocatao'
import random
import datetime
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from portal.models import Picture, Institution
class AlbumManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all albums for a particular model (either an instance or a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
'''
Albums are a group of pictures. They have a title, a date when it was published
and they can be associated with any kind of entities.
'''
class Album(models.Model):
title = models.CharField(_('Name'), blank=False, max_length=100)
date_published = models.DateTimeField(default=datetime.datetime.now)
date_modified = models.DateTimeField(blank=True, null=True)
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.CharField(_('object ID'), max_length=100)
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
objects = AlbumManager()
def add_picture(self, picture):
rpa = RelPictureAlbum(album=self, picture=picture)
rpa.save()
def _cover(self):
if self.picture_album_set.count() == 0:
return None
if self.picture_album_set.filter(is_cover=True).exists():
rpa = self.picture_album_set.get(is_cover=True)
return rpa.picture
pics = list(self.picture_album_set.all())
random.shuffle(pics)
return pics[0].picture
cover = property(_cover)
@classmethod
def create_album(cls, title, instance):
album = Album(title=title)
album.content_type = ContentType.objects.get_for_model(instance)
album.object_pk = force_unicode(instance._get_pk_val())
album.save()
return album
def fill_date_modified(sender, instance, **kw):
instance.date_modified = datetime.datetime.now()
models.signals.pre_save.connect(fill_date_modified, sender=Album)
class RelPictureAlbum(models.Model):
picture = models.ForeignKey(Picture, blank=False, related_name='picture_album_set')
album = models.ForeignKey(Album, blank=False, related_name='picture_album_set')
description = models.CharField(_('Description'), blank=True, null=True, max_length=100)
is_cover = models.BooleanField(default=False)
date_published = models.DateTimeField(default=datetime.datetime.now)
date_modified = models.DateTimeField(blank=True, null=True)
def just_one_cover(sender, instance, **kw):
if instance.is_cover:
#Ensure the no other picture is also tagged as the album's cover
for pic in instance.album.picture_album_set.filter(is_cover=True):
pic.is_cover=False
pic.save()
models.signals.pre_save.connect(just_one_cover, sender=RelPictureAlbum)
models.signals.pre_save.connect(fill_date_modified, sender=RelPictureAlbum) | {
"repo_name": "brunogamacatao/portalsaladeaula",
"path": "portal/album/models.py",
"copies": "1",
"size": "3361",
"license": "bsd-3-clause",
"hash": 5031773764010853000,
"line_mean": 39.5060240964,
"line_max": 130,
"alpha_frac": 0.6902707528,
"autogenerated": false,
"ratio": 3.742761692650334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9893728223143937,
"avg_score": 0.007860844461279439,
"num_lines": 83
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
__all__ = [
"call_external",
]
import Queue as queue
import multiprocessing
import cargo
class CallProcess(multiprocessing.Process):
def __init__(self, method, to_master):
self._method = method
self._to_master = to_master
def run(self):
result = self._method()
rutime = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self._to_master.put((result, rutime))
class TrackProcess(multiprocessing.Process):
def __init__(self, tracked, limit, to_master):
self._tracked = tracked_pid
self._limit = limit
self._to_master = to_master
def run(self):
while cargo.get_pid_utime(self._tracked) < self._limit:
time.sleep(1.0)
rutime = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self._to_master.put((result, rutime))
def call_external(method, cpu_seconds = None):
queue = None
call_child = None
track_child = None
try:
queue = multiprocessing.Queue()
call_child = ExternalCall(method, queue)
child.start()
track_child = ExternalCall(call_child.pid, queue)
track_child.start()
try:
return queue.get(timeout = timeout)
except queue.Empty:
return None
finally:
if queue is not None:
queue.close()
queue.join_thread()
if child is not None and child.is_alive():
child.terminate()
child.join()
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/concurrent.py",
"copies": "1",
"size": "1539",
"license": "mit",
"hash": -3794571331515227000,
"line_mean": 23.8225806452,
"line_max": 66,
"alpha_frac": 0.5964912281,
"autogenerated": false,
"ratio": 3.7813267813267815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48778180094267815,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
from __future__ import absolute_import
import os
import sys
import time
import zlib
import socket
import signal
import random
import traceback
import subprocess
import collections
import multiprocessing
import cPickle as pickle
import numpy
import cargo
logger = cargo.get_logger(__name__, level = "INFO")
def send_pyobj_gz(zmq_socket, message):
pickled = pickle.dumps(message)
compressed = zlib.compress(pickled, 1)
zmq_socket.send(compressed)
def recv_pyobj_gz(zmq_socket):
compressed = zmq_socket.recv()
decompressed = zlib.decompress(compressed)
unpickled = pickle.loads(decompressed)
return unpickled
class Message(object):
"""Message from a worker."""
def __init__(self, sender):
self.sender = sender
self.host = socket.gethostname()
self.pid = os.getpid()
def make_summary(self, text):
return "worker {0} (pid {1} on {2}) {3}".format(self.sender, self.pid, self.host, text)
class ApplyMessage(Message):
"""A worker wants a unit of work."""
def get_summary(self):
return self.make_summary("requested a job")
class ErrorMessage(Message):
"""An error occurred in a task."""
def __init__(self, sender, key, description):
Message.__init__(self, sender)
self.key = key
self.description = description
def get_summary(self):
brief = self.description.splitlines()[-1]
return self.make_summary("encountered an error ({0})".format(brief))
class InterruptedMessage(Message):
"""A worker was interrupted."""
def __init__(self, sender, key):
Message.__init__(self, sender)
self.key = key
def get_summary(self):
return self.make_summary("was interrupted")
class DoneMessage(Message):
"""A task was completed."""
def __init__(self, sender, key, result):
Message.__init__(self, sender)
self.key = key
self.result = result
def get_summary(self):
return self.make_summary("finished job {0}".format(self.key))
class Task(object):
"""One unit of distributable work."""
def __init__(self, call, args = [], kwargs = {}):
self.call = call
self.args = args
self.kwargs = kwargs
self.key = id(self)
def __hash__(self):
return hash(self.key)
def __call__(self):
return self.call(*self.args, **self.kwargs)
@staticmethod
def from_request(request):
"""Build a task, if necessary."""
if isinstance(request, Task):
return request
elif isinstance(request, collections.Mapping):
return Task(**mapping)
else:
return Task(*request)
class TaskState(object):
"""Current state of progress on a task."""
def __init__(self, task):
self.task = task
self.done = False
self.working = set()
def score(self):
"""Score the urgency of this task."""
if self.done:
return (sys.maxint, sys.maxint, random.random())
if len(self.working) == 0:
return (0, 0, random.random())
else:
return (
len(self.working),
max(wstate.timestamp for wstate in self.working),
random.random(),
)
class WorkerState(object):
"""Current state of a known worker process."""
def __init__(self, condor_id):
self.condor_id = condor_id
self.assigned = None
self.timestamp = None
def set_done(self):
self.assigned.working.remove(self)
was_done = self.assigned.done
self.assigned.done = True
self.assigned = None
return was_done
def set_assigned(self, tstate):
"""Change worker state in response to assignment."""
self.disassociate()
self.assigned = tstate
self.timestamp = time.time()
self.assigned.working.add(self)
def set_interruption(self):
"""Change worker state in response to interruption."""
self.disassociate()
def set_error(self):
"""Change worker state in response to error."""
self.disassociate()
def disassociate(self):
"""Disassociate from the current job."""
if self.assigned is not None:
self.assigned.working.remove(self)
self.assigned = None
class ManagerCore(object):
"""Maintain the task queue and worker assignments."""
def __init__(self, task_list):
"""Initialize."""
self.tstates = dict((t.key, TaskState(t)) for t in task_list)
self.wstates = {}
def handle(self, message):
"""Manage workers and tasks."""
logger.info(
"[%s/%i] %s",
str(self.done_count()).rjust(len(str(len(self.tstates))), "0"),
len(self.tstates),
message.get_summary(),
)
sender = self.wstates.get(message.sender)
if sender is None:
sender = WorkerState(message.sender)
self.wstates[sender.condor_id] = sender
if isinstance(message, ApplyMessage):
# task request
sender.disassociate()
sender.set_assigned(self.next_task())
return (sender.assigned.task, None)
elif isinstance(message, DoneMessage):
# task result
finished = sender.assigned
was_done = sender.set_done()
assert finished.task.key == message.key
selected = self.next_task()
if selected is None:
selected_task = None
else:
selected_task = selected.task
sender.set_assigned(selected)
if was_done:
return (selected_task, None)
else:
return (selected_task, (finished.task, message.result))
elif isinstance(message, InterruptedMessage):
# worker interruption
sender.set_interruption()
return (None, None)
elif isinstance(message, ErrorMessage):
# worker exception
sender.set_error()
return (None, None)
else:
raise TypeError("unrecognized message type")
def next_task(self):
"""Select the next task on which to work."""
tstate = min(self.tstates.itervalues(), key = TaskState.score)
if tstate.done:
return None
else:
return tstate
def done_count(self):
"""Return the number of completed tasks."""
return sum(1 for t in self.tstates.itervalues() if t.done)
def unfinished_count(self):
"""Return the number of unfinished tasks."""
return sum(1 for t in self.tstates.itervalues() if not t.done)
class RemoteManager(object):
"""Manage remotely-distributed work."""
def __init__(self, task_list, handler, rep_socket):
"""Initialize."""
self.handler = handler
self.rep_socket = rep_socket
self.core = ManagerCore(task_list)
def manage(self):
"""Manage workers and tasks."""
import zmq
poller = zmq.Poller()
poller.register(self.rep_socket, zmq.POLLIN)
while self.core.unfinished_count() > 0:
events = dict(poller.poll())
assert events.get(self.rep_socket) == zmq.POLLIN
message = recv_pyobj_gz(self.rep_socket)
(response, completed) = self.core.handle(message)
send_pyobj_gz(self.rep_socket, response)
if completed is not None:
self.handler(*completed)
@staticmethod
def distribute(tasks, workers = 8, handler = lambda _, x: x):
"""Distribute computation to remote workers."""
import zmq
logger.info("distributing %i tasks to %i workers", len(tasks), workers)
# prepare zeromq
context = zmq.Context()
rep_socket = context.socket(zmq.REP)
rep_port = rep_socket.bind_to_random_port("tcp://*")
logger.debug("listening on port %i", rep_port)
# launch condor jobs
cluster = cargo.submit_condor_workers(workers, "tcp://%s:%i" % (socket.getfqdn(), rep_port))
try:
try:
return RemoteManager(tasks, handler, rep_socket).manage()
except KeyboardInterrupt:
# work around bizarre pyzmq SIGINT behavior
raise
finally:
# clean up condor jobs
cargo.condor_rm(cluster)
logger.info("removed condor jobs")
# clean up zeromq
rep_socket.close()
context.term()
logger.info("terminated zeromq context")
class LocalWorkerProcess(multiprocessing.Process):
"""Work in a subprocess."""
def __init__(self, stm_queue):
"""Initialize."""
multiprocessing.Process.__init__(self)
self.stm_queue = stm_queue
self.mts_queue = multiprocessing.Queue()
def run(self):
"""Work."""
class DeathRequestedError(Exception):
pass
try:
def handle_sigusr1(number, frame):
raise DeathRequestedError()
signal.signal(signal.SIGUSR1, handle_sigusr1)
logger.info("subprocess running")
task = None
while True:
# get an assignment
if task is None:
self.stm_queue.put(ApplyMessage(os.getpid()))
task = self.mts_queue.get()
if task is None:
logger.info("received null assignment; terminating")
return None
# complete the assignment
try:
seed = abs(hash(task.key))
logger.info("setting PRNG seed to %s", seed)
numpy.random.seed(seed)
random.seed(numpy.random.randint(2**32))
logger.info("starting work on task %s", task.key)
result = task()
except KeyboardInterrupt, error:
logger.warning("interruption during task %s", task.key)
self.stm_queue.put(InterruptedMessage(os.getpid(), task.key))
self.mts_queue.get()
break
except DeathRequestedError:
logger.warning("death requested; terminating")
break
except BaseException, error:
description = traceback.format_exc(error)
logger.warning("error during task %s:\n%s", task.key, description)
self.stm_queue.put(ErrorMessage(os.getpid(), task.key, description))
self.mts_queue.get()
break
else:
logger.info("finished task %s", task.key)
self.stm_queue.put(DoneMessage(os.getpid(), task.key, result))
task = self.mts_queue.get()
except DeathRequestedError:
pass
class LocalManager(object):
"""Manage locally-distributed work."""
def __init__(self, stm_queue, task_list, processes, handler):
"""Initialize."""
self.stm_queue = stm_queue
self.core = ManagerCore(task_list)
self.processes = processes
self.handler = handler
def manage(self):
"""Manage workers and tasks."""
process_index = dict((process.pid, process) for process in self.processes)
while self.core.unfinished_count() > 0:
message = self.stm_queue.get()
(response, completed) = self.core.handle(message)
process_index[message.sender].mts_queue.put(response)
if completed is not None:
self.handler(*completed)
@staticmethod
def distribute(tasks, workers = 8, handler = lambda _, x: x):
"""Distribute computation to remote workers."""
logger.info("distributing %i tasks to %i workers", len(tasks), workers)
stm_queue = multiprocessing.Queue()
processes = [LocalWorkerProcess(stm_queue) for _ in xrange(workers)]
for process in processes:
process.start()
try:
return LocalManager(stm_queue, tasks, processes, handler).manage()
finally:
for process in processes:
os.kill(process.pid, signal.SIGUSR1)
logger.info("cleaned up child processes")
def do_or_distribute(requests, workers, handler = lambda _, x: x, local = False):
"""Distribute or compute locally."""
tasks = map(Task.from_request, requests)
if workers > 0:
if local:
return LocalManager.distribute(tasks, workers, handler)
else:
return RemoteManager.distribute(tasks, workers, handler)
else:
while tasks:
task = tasks.pop()
result = task()
handler(task, result)
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/labor2.py",
"copies": "1",
"size": "13097",
"license": "mit",
"hash": 181364940394797380,
"line_mean": 26.3423799582,
"line_max": 100,
"alpha_frac": 0.5674581965,
"autogenerated": false,
"ratio": 4.331018518518518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003971384906445632,
"num_lines": 479
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
from __future__ import absolute_import
import plac
import os.path
import imp
import uuid
import borg.log
logger = borg.log.get_logger(__name__, default_level = "INFO")
named_domains = {}
def do(*args, **kwargs):
import condor
return condor.do(*args, **kwargs)
def named_domain(domain_class):
"""Decorates and automatically registers a domain class."""
named_domains[domain_class.name] = domain_class()
return domain_class
def get_domain(name):
"""Look up and instantiate a domain."""
return named_domains[name]
def make_solvers(class_, suite_path, commands):
root = os.path.abspath(os.path.dirname(suite_path))
return dict((k, class_(root, v)) for (k, v) in commands.items())
def make_solvers_full(class_, suite_path, arg_lists):
root = os.path.abspath(os.path.dirname(suite_path))
return dict((k, class_(root, *v)) for (k, v) in arg_lists.items())
def load_solvers(path):
"""Load a suite of solvers."""
logger.info("loading solver suite from %s", path)
return imp.load_source("borg.suite_{0}".format(uuid.uuid4().hex), path)
class Suite(object):
"""Suite of subsolvers."""
def __init__(self, domain = None, solvers = None):
"""Initialize."""
self.domain = domain
self.solvers = {} if solvers is None else solvers
def integrate(self, other):
"""Integrate another suite into this one."""
if self.domain is None:
self.domain = other.domain
elif self.domain is not other.domain:
raise ArgumentError("solver suite domains do not match")
self.solvers.update(other.solvers)
@staticmethod
def integrated(*suites):
"""Merge multiple subsolver suites."""
merged = Suite()
for suite in suites:
merged.integrate(suite)
return merged
@staticmethod
def load_integrated(*paths):
"""Load and merge multiple subsolver suites."""
return Suite.integrated(*map(load_solvers, paths))
def script(main):
"""Call a script main function."""
borg.enable_default_logging()
plac.call(main)
annotations = plac.annotations
from . import defaults
from . import util
from borg.log import *
from . import models
from . import planners
from . import expenses
from . import solver_io
from . import storage
from . import fake
from . import unix
from . import bregman
from . import regression
from . import portfolios
from . import domains
from . import experiments
from . import log
from borg.expenses import *
from borg.storage import (
RunData,
TrainingData,
)
| {
"repo_name": "borg-project/borg",
"path": "borg/__init__.py",
"copies": "1",
"size": "2668",
"license": "mit",
"hash": -6010911084151923000,
"line_mean": 21.8034188034,
"line_max": 75,
"alpha_frac": 0.6604197901,
"autogenerated": false,
"ratio": 3.6348773841961854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4795297174296186,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import contextlib
import borg
from . import instance
from . import solvers
from . import features
from . import test
logger = borg.get_logger(__name__, default_level = "INFO")
class MAX_SAT_Task(object):
def __init__(self, path):
self.path = path
def clean(self):
pass
@borg.named_domain
class MAX_SAT_Domain(object):
name = "max-sat"
extensions = [".cnf", ".wcnf"]
@property
def solvers(self):
return solvers.named
@contextlib.contextmanager
def task_from_path(self, task_path):
task = MAX_SAT_Task(task_path)
try:
yield task
except:
raise
finally:
task.clean()
def compute_features(self, task):
return features.get_features_for(task.path)
def is_final(self, task, answer):
"""Is the answer definitive for the task?"""
return answer is not None
def show_answer(self, task, answer):
if answer is None:
print "s UNKNOWN"
else:
(description, certificate, optimum) = answer
if optimum is not None:
print "o {0}".format(optimum)
print "s {0}".format(description)
if certificate is not None:
print "v", " ".join(certificate)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/max_sat/__init__.py",
"copies": "1",
"size": "1359",
"license": "mit",
"hash": -3087217322101146600,
"line_mean": 21.2786885246,
"line_max": 58,
"alpha_frac": 0.5783664459,
"autogenerated": false,
"ratio": 3.7960893854748603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.487445583137486,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import contextlib
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "DETAIL")
class FakeSolverProcess(object):
"""Provide a solver interface to stored run data."""
def __init__(self, run):
"""Initialize."""
self._run = run
self._elapsed = 0.0
self._terminated = False
def run_then_stop(self, budget):
"""Unpause the solver for the specified duration."""
try:
return self.run_then_pause(budget)
finally:
self.stop()
def run_then_pause(self, budget):
"""Unpause the solver for the specified duration."""
assert not self._terminated
position = self._elapsed + budget
logger.detail(
"moving %s run to %.0f of %.0f (from %.0f)",
self._run.solver,
position,
self._run.cost,
self._elapsed,
)
if position >= self._run.cost:
borg.get_accountant().charge_cpu(self._run.cost - self._elapsed)
self._elapsed = self._run.cost
self._terminated = True
return self._run.success
else:
borg.get_accountant().charge_cpu(budget)
self._elapsed = position
return None
def stop(self):
"""Terminate the solver."""
self._position = None
@property
def name(self):
"""Name of the running solver."""
return self._run.solver
@property
def elapsed(self):
"""Number of CPU seconds used by this process (or processes)."""
return self._elapsed
@property
def terminated(self):
"""Has this process terminated?"""
return self._terminated
class FakeSolverFactory(object):
def __init__(self, solver_name, runs_data):
self._solver_name = solver_name
self._runs_data = runs_data
def start(self, task):
"""Return a fake solver process."""
all_runs = self._runs_data.run_lists[task]
our_runs = filter(lambda run: run.solver == self._solver_name, all_runs)
if len(our_runs) == 0:
raise Exception("no runs of solver \"{0}\" are recorded".format(self._solver_name))
run = our_runs[numpy.random.randint(len(our_runs))]
return FakeSolverProcess(run)
class FakeDomain(object):
name = "fake"
def __init__(self, suite):
"""Initialize."""
self._suite = suite
@contextlib.contextmanager
def task_from_path(self, task_path):
yield task_path
def compute_features(self, instance):
"""Return static features of an instance."""
# get features with cost
with_cost = self._suite.run_data.get_feature_vector(instance)
borg.get_accountant().charge_cpu(with_cost["cpu_cost"])
# return the features
features = dict(with_cost.iteritems())
del features["cpu_cost"]
return (features.keys(), features.values())
def is_final(self, task, answer):
"""Does this answer imply success?"""
return bool(answer)
@property
def extensions(self):
"""Not applicable."""
raise NotImplementedError()
class FakeSuite(object):
"""Mimic a solver suite using simulated solvers."""
def __init__(self, run_data):
"""Initialize."""
self.run_data = run_data
self.domain = FakeDomain(self)
self.solvers = dict((k, FakeSolverFactory(k, self.run_data)) for k in self.run_data.solver_names)
| {
"repo_name": "borg-project/borg",
"path": "borg/fake.py",
"copies": "1",
"size": "3592",
"license": "mit",
"hash": 306313750521106100,
"line_mean": 23.9444444444,
"line_max": 105,
"alpha_frac": 0.5815701559,
"autogenerated": false,
"ratio": 4.091116173120729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5172686329020729,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import cStringIO as StringIO
import nose
import nose.tools
import borg
input_text_plain = \
"""* foo!@#$
-1 x1 +23 x2 = +0;
-1 x1 +23 x2 >= -0;
-1 x1 +23 x2 >= -1;
* foo!@#$
+1 x1 >= 42 ;
* foo!@#$
* foo!@#$
"""
input_text_nlc = \
"""* foo!@#$
-1 x1 x2 +23 x2 = +0;
-1 x1 +23 x2 ~x1 >= -0;
-1 x1 +23 x2 >= -1;
* foo!@#$
+1 ~x1 >= 42 ;
* foo!@#$
* foo!@#$
"""
def test_parse_pbs_lin():
input_file = StringIO.StringIO(input_text_plain)
instance = borg.domains.pb.opb.parse_opb_file(input_file)
constraints = [
([(-1, [1]), (23, [2])], "=", 0),
([(-1, [1]), (23, [2])], ">=", 0),
([(-1, [1]), (23, [2])], ">=", -1),
([(1, [1])], ">=", 42),
]
nose.tools.assert_true(instance.objective is None)
nose.tools.assert_equal(len(instance.constraints), 4)
nose.tools.assert_equal(instance.constraints, constraints)
def test_parse_pbo_lin():
input_text = "*zorro!\nmin: -40 x1 3 x2 \n" + input_text_plain
input_file = StringIO.StringIO(input_text)
instance = borg.domains.pb.opb.parse_opb_file(input_file)
nose.tools.assert_equal(len(instance.constraints), 4)
nose.tools.assert_equal(instance.objective, [(-40, [1]), (3, [2])])
def test_parse_pbs_nlc():
input_file = StringIO.StringIO(input_text_nlc)
instance = borg.domains.pb.opb.parse_opb_file(input_file)
constraints = [
([(-1, [1, 2]), (23, [2])], "=", 0),
([(-1, [1]), (23, [2, -1])], ">=", 0),
([(-1, [1]), (23, [2])], ">=", -1),
([(1, [-1])], ">=", 42),
]
nose.tools.assert_true(instance.objective is None)
nose.tools.assert_equal(len(instance.constraints), 4)
nose.tools.assert_equal(instance.constraints, constraints)
def test_parse_pbo_nlc():
input_text = "*zorro!\nmin: -40 x1 3 x2 ~x3 \n" + input_text_nlc
input_file = StringIO.StringIO(input_text)
instance = borg.domains.pb.opb.parse_opb_file(input_file)
nose.tools.assert_equal(len(instance.constraints), 4)
nose.tools.assert_equal(instance.objective, [(-40, [1]), (3, [2, -3])])
if __name__ == "__main__":
nose.main(defaultTest = __name__)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/pb/test/test_opb.py",
"copies": "1",
"size": "2191",
"license": "mit",
"hash": -5614035782577130000,
"line_mean": 27.8289473684,
"line_max": 75,
"alpha_frac": 0.5600182565,
"autogenerated": false,
"ratio": 2.5655737704918034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36255920269918035,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import csv
import itertools
import numpy
import condor
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def run_experiment(run_data, planner_name, B):
if planner_name == "knapsack":
planner = borg.planners.KnapsackPlanner()
elif planner_name == "streeter":
planner = borg.planners.StreeterPlanner()
elif planner_name == "bellman":
planner = borg.planners.BellmanPlanner()
else:
raise ValueError("unrecognized planner name \"{0}\"".format(planner_name))
suite = borg.fake.FakeSuite(run_data)
portfolio = borg.portfolios.PreplanningPortfolio(suite, run_data, B = B, planner = planner)
def yield_rows():
for instance_id in run_data.run_lists:
with suite.domain.task_from_path(instance_id) as instance:
budget = borg.Cost(cpu_seconds = run_data.common_budget)
answer = portfolio(instance, suite, budget)
succeeded = suite.domain.is_final(instance, answer)
yield 1.0 if succeeded else 0.0
rate = numpy.mean(list(yield_rows()))
logger.info("success rate with B = %i is %f", B, rate)
return [planner_name, B, rate]
@borg.annotations(
out_path = ("results output path"),
bundle = ("path to pre-recorded runs"),
workers = ("submit jobs?", "option", "w", int),
local = ("workers are local?", "flag"),
)
def main(out_path, bundle, workers = 0, local = False):
"""Evaluate the mixture model(s) over a range of component counts."""
def yield_jobs():
run_data = borg.storage.RunData.from_bundle(bundle)
planner_names = ["knapsack", "streeter", "bellman"]
bin_counts = xrange(1, 121)
replications = xrange(16)
experiments = itertools.product(planner_names, bin_counts, replications)
for (planner_name, bin_count, _) in experiments:
if planner_name != "bellman" or bin_count <= 5:
yield (run_experiment, [run_data, planner_name, bin_count])
with open(out_path, "w") as out_file:
writer = csv.writer(out_file)
writer.writerow(["planner", "bins", "rate"])
for (_, row) in condor.do(yield_jobs(), workers, local):
writer.writerow(row)
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/solved_vs_b.py",
"copies": "1",
"size": "2358",
"license": "mit",
"hash": 7348667106510926000,
"line_mean": 33.1739130435,
"line_max": 95,
"alpha_frac": 0.6217133164,
"autogenerated": false,
"ratio": 3.3637660485021397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44854793649021396,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import csv
import itertools
import numpy
import sklearn
import condor
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def evaluate_features(model, testing, feature_names):
# use features
if len(feature_names) > 0:
# train the prediction method
feature_mask = numpy.array([f in feature_names for f in testing.common_features])
masked_model = model.with_new(features = model.features[:, feature_mask])
regression = borg.regression.NearestRTDRegression(masked_model)
# then test it
test_features = testing.filter_features(feature_names).to_features_array()
weights = regression.predict(None, test_features)
else:
weights = numpy.tile(numpy.exp(model.log_weights), (len(testing), 1))
# evaluate the model
logger.info("scoring %s on %i instances", model.name, len(testing))
log_probabilities = borg.models.run_data_log_probabilities(model, testing, weights)
return [
[model.name, len(feature_names), "mean_log_probability", numpy.mean(log_probabilities)],
[model.name, len(feature_names), "median_log_probability", numpy.median(log_probabilities)],
]
@borg.annotations(
out_path = ("results output path"),
experiments = ("path to experiments JSON", "positional", None, borg.util.load_json),
workers = ("submit jobs?", "option", "w", int),
local = ("workers are local?", "flag"),
)
def main(out_path, experiments, workers = 0, local = False):
"""Run the specified model evaluations."""
logger.info("running %i experiments", len(experiments))
get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle)
def yield_jobs():
for experiment in experiments:
logger.info("preparing experiment: %s", experiment)
run_data = get_run_data(experiment["run_data"])
validation = sklearn.cross_validation.KFold(len(run_data), 5, indices = False)
(train_mask, test_mask) = iter(validation).next()
training = run_data.masked(train_mask).collect_systematic([2])
testing = run_data.masked(test_mask).collect_systematic([4])
feature_counts = range(0, len(run_data.common_features) + 1, 2)
replications = xrange(32)
parameters = list(itertools.product(feature_counts, replications))
for model_name in experiment["model_names"]:
model = borg.experiments.common.train_model(model_name, training)
model.name = model_name
for (feature_count, _) in parameters:
shuffled_names = sorted(run_data.common_features, key = lambda _: numpy.random.random())
selected_names = sorted(shuffled_names[:feature_count])
yield (
evaluate_features,
[
model,
testing,
selected_names,
],
)
with borg.util.openz(out_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["model_name", "features", "score_name", "score"])
for (_, rows) in condor.do(yield_jobs(), workers, local):
writer.writerows(rows)
out_file.flush()
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/ll_vs_features.py",
"copies": "1",
"size": "3462",
"license": "mit",
"hash": -2297985817542835500,
"line_mean": 37.043956044,
"line_max": 108,
"alpha_frac": 0.6013864818,
"autogenerated": false,
"ratio": 3.934090909090909,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035477390890909,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import csv
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def plan_to_start_end(category, planner_name, solver_names, plan):
t = 0
for (s, d) in plan:
yield map(str, [category, planner_name, solver_names[s], t, t + d + 0.75])
t += d + 1
def plans_to_per_bin(category, planner_name, solver_names, plans, B):
#allocation = numpy.zeros((len(solver_names), B))
#for plan in plans:
#print plan
#t = 0
#for (s, d) in plan:
#allocation[s, t:t + d + 1] += 1.0
#t += d + 1
#for (s, solver_name) in enumerate(solver_names):
#for b in xrange(B):
#yield [category, planner_name, solver_name, str(b), str(allocation[s, b])]
for plan in plans:
for (s, d) in plan:
yield [category, planner_name, solver_names[s], str(d)]
def run_experiment(planner_name, bundle_path, category, individual):
"""Run a planning experiment."""
logger.info("loading run data from %s", bundle_path)
run_data = borg.RunData.from_bundle(bundle_path)
logger.info("computing a plan over %i instances with %s", len(run_data), planner_name)
if planner_name == "default":
planner = borg.planners.default
elif planner_name == "knapsack":
planner = borg.planners.ReorderingPlanner(borg.planners.KnapsackPlanner())
elif planner_name == "streeter":
planner = borg.planners.StreeterPlanner()
else:
raise ValueError("unrecognized planner name: {0}".format(planner_name))
B = 60
bins = run_data.to_bins_array(run_data.solver_names, B).astype(numpy.double)
bins[..., -2] += 1e-2 # if all else fails...
rates = bins / numpy.sum(bins, axis = -1)[..., None]
log_survival = numpy.log(1.0 + 1e-8 - numpy.cumsum(rates[..., :-1], axis = -1))
if individual:
plans = []
for n in xrange(len(run_data)):
plans.append(planner.plan(log_survival[n, :, :-1][None, ...]))
rows = plans_to_per_bin(category, planner_name, run_data.solver_names, plans, B)
else:
plan = planner.plan(log_survival[..., :-1])
rows = plan_to_start_end(category, planner_name, run_data.solver_names, plan)
for row in rows:
yield row
@borg.annotations(
out_path = ("plan output path"),
experiments = ("experiments to run", "positional", None, borg.util.load_json),
individual = ("make individual plans?", "flag"),
)
def main(out_path, experiments, individual = False):
"""Compute a plan as specified."""
with borg.util.openz(out_path, "wb") as out_file:
out_csv = csv.writer(out_file)
if individual:
#out_csv.writerow(["category", "planner", "solver", "bin", "count"])
out_csv.writerow(["category", "planner", "solver", "length"])
else:
out_csv.writerow(["category", "planner", "solver", "start", "end"])
for experiment in experiments:
rows = \
run_experiment(
experiment["planner"],
experiment["bundle"],
experiment["category"],
individual,
)
out_csv.writerows(list(rows))
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/plan.py",
"copies": "1",
"size": "3363",
"license": "mit",
"hash": -2255981617776169500,
"line_mean": 31.0285714286,
"line_max": 90,
"alpha_frac": 0.5783526613,
"autogenerated": false,
"ratio": 3.319842053307009,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4398194714607009,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import itertools
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class RandomPortfolio(object):
"""Random portfolio."""
def __call__(self, task, suite, budget):
"""Run the portfolio."""
solvers = suite.solvers.values()
selected = numpy.random.randint(len(solvers))
return solvers[selected].start(task).run_then_stop(budget.cpu_seconds)
class UniformPortfolio(object):
"""Portfolio that runs every solver once."""
def __call__(self, task, suite, budget):
"""Run the portfolio."""
budget_each = budget.cpu_seconds / (len(suite.solvers) * 100)
processes = [s.start(task) for s in suite.solvers.values()]
next_process = itertools.cycle(processes)
def finished():
return \
budget.cpu_seconds - sum(p.elapsed for p in processes) < budget_each \
or all(p.terminated for p in processes)
while not finished():
process = next_process.next()
if not process.terminated:
answer = process.run_then_pause(budget_each)
if suite.domain.is_final(task, answer):
return answer
return None
class BaselinePortfolio(object):
"""Portfolio that runs the best train-set solver."""
def __init__(self, suite, training):
"""Initialize."""
solver_names = list(suite.solvers)
outcome_counts = training.to_bins_array(solver_names, 1).astype(numpy.double)
success_rates = outcome_counts[..., 0] / numpy.sum(outcome_counts, axis = -1)
mean_rates = numpy.mean(success_rates, axis = 0)
# XXX hackishly break ties according to run time
count = 30
bins = training.to_bins_array(solver_names, count).astype(numpy.double)
wbins = numpy.mean(bins[..., :-1] * numpy.arange(count), axis = -1)
mean_rates -= numpy.mean(wbins, axis = 0) * 1e-8
self._solver_name = solver_names[numpy.argmax(mean_rates)]
def __call__(self, task, suite, budget):
"""Run the portfolio."""
process = suite.solvers[self._solver_name].start(task)
return process.run_then_stop(budget.cpu_seconds)
class OraclePortfolio(object):
"""Optimal prescient discrete-budget portfolio."""
def __init__(self, planner = borg.planners.default):
"""Initialize."""
self._planner = planner
def __call__(self, task, suite, budget):
"""Run the portfolio."""
# grab known run data
budget_count = 100
solver_names = sorted(suite.solvers)
data = suite.run_data.filter(task)
bins = data.to_bins_array(solver_names, budget_count, budget.cpu_seconds)[0].astype(numpy.double)
bins[:, -2] += 1e-2 # if all else fails...
bins[:, -1] += numpy.mean(bins[:, :-1] * numpy.arange(budget_count), axis = -1) * 1e-8 # sooner is better
rates = bins / numpy.sum(bins, axis = -1)[..., None]
log_survival = numpy.log(1.0 + 1e-64 - numpy.cumsum(rates[:, :-1], axis = -1))
# make a plan
interval = budget.cpu_seconds / budget_count
plan = self._planner.plan(log_survival[None, ...])
#if len(plan) > 1:
#with borg.util.numpy_printing(precision = 2, suppress = True, linewidth = 200, threshold = 1000000):
#print solver_names
#print plan
#print bins
#print numpy.exp(log_survival)
# and follow through
remaining = budget.cpu_seconds
for (s, b) in plan:
this_budget = (b + 1) * interval
assert remaining - this_budget > -1e-1
process = suite.solvers[solver_names[s]].start(task)
answer = process.run_then_stop(this_budget)
remaining -= this_budget
if suite.domain.is_final(task, answer):
return answer
return None
class PreplanningPortfolio(object):
"""Preplanning discrete-budget portfolio."""
def __init__(self, suite, model, planner = borg.planners.default):
"""Initialize."""
self._solver_names = sorted(suite.solvers)
self._model = model
self._planner = planner
#self._plan = None
self._plan = self._planner.plan(self._model.log_survival[..., :-1])
logger.info("preplanned plan: %s", self._plan)
def __call__(self, task, suite, budget):
"""Run the portfolio."""
#if self._plan is None:
# XXX
#self._plan = self._planner.plan(self._model.log_survival[..., :int((budget.cpu_seconds - 1.0) / self._model.interval) + 1])
#logger.info("preplanned plan: %s", self._plan)
remaining = budget.cpu_seconds
for (s, b) in self._plan:
this_budget = (b + 1) * self._model.interval
assert remaining - this_budget > -1e-1
process = suite.solvers[self._solver_names[s]].start(task)
answer = process.run_then_stop(this_budget)
remaining -= this_budget
if suite.domain.is_final(task, answer):
return answer
return None
class PureModelPortfolio(object):
"""Hybrid mixture-model portfolio."""
def __init__(self, suite, model, regress = None, planner = borg.planners.default):
"""Initialize."""
self._model = model
self._regress = regress
self._planner = planner
self._solver_names = sorted(suite.solvers)
self._runs_limit = 256
def __call__(self, task, suite, budget):
"""Run the portfolio."""
with borg.accounting() as accountant:
# predict RTD weights
if self._regress is None:
initial_model = self._model
else:
(feature_names, feature_values) = suite.domain.compute_features(task)
feature_dict = dict(zip(feature_names, feature_values))
feature_values_sorted = [feature_dict[f] for f in sorted(feature_names)]
(predicted_weights,) = numpy.log(self._regress.predict([task], [feature_values_sorted]))
initial_model = self._model.with_weights(predicted_weights)
# compute and execute a solver schedule
plan = []
failures = []
for i in xrange(self._runs_limit):
elapsed = accountant.total.cpu_seconds
if budget.cpu_seconds <= elapsed:
break
if len(plan) == 0:
model = initial_model.condition(failures)
remaining = budget.cpu_seconds - elapsed
remaining_b = int(numpy.ceil(remaining / model.interval))
plan = \
self._planner.plan(
model.log_survival[..., :remaining_b],
model.log_weights,
)
(s, b) = plan.pop(0)
remaining = budget.cpu_seconds - accountant.total.cpu_seconds
duration = min(remaining, (b + 1) * model.interval)
process = suite.solvers[self._solver_names[s]].start(task)
answer = process.run_then_stop(duration)
if suite.domain.is_final(task, answer):
return answer
else:
failures.append((s, b))
return None
| {
"repo_name": "borg-project/borg",
"path": "borg/portfolios.py",
"copies": "1",
"size": "7542",
"license": "mit",
"hash": 5398657686775485000,
"line_mean": 33.5963302752,
"line_max": 136,
"alpha_frac": 0.5604614161,
"autogenerated": false,
"ratio": 3.9383812010443866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4998842617144386,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import numpy
import sklearn.svm
import sklearn.pipeline
import sklearn.linear_model
import sklearn.decomposition
import sklearn.kernel_approximation
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class MultiClassifier(object):
def __init__(self, model_class, **model_kwargs):
self._model_class = model_class
self._model_kwargs = model_kwargs
def fit(self, X, Y):
(N, D) = Y.shape
(_, self._F) = X.shape
logger.info("fitting %i models to %i examples", D, N)
self._models = [None] * D
for d in xrange(D):
if d % 250 == 0:
logger.info("fit %i models so far", d)
if numpy.any(Y[:, d] > 0):
model = self._model_class(**self._model_kwargs)
model.fit(X, Y[:, d], class_weight = {0: 1.0, 1: 10.0})
else:
model = None
self._models[d] = model
return self
def predict_log_proba(self, X):
(M, F) = X.shape
D = len(self._models)
z = numpy.empty((M, D))
for (d, model) in enumerate(self._models):
if model is None:
z[:, d] = 0.0
else:
# TODO use predict_log_proba when it stops tossing warnings
z[:, d] = numpy.log(model.predict_proba(X)[:, 1] + 1e-64)
return z
def get_feature_weights(self):
coefs_list = []
for model in self._models:
if model is None:
coefs_list.append([0.0] * self._F)
else:
assert model.coef_.shape == (1, self._F)
coefs_list.append(model.coef_[0])
coefs = numpy.array(coefs_list)
weights = numpy.mean(numpy.abs(coefs), axis = 0)
return weights
def mapify_model_survivals(model):
"""Compute per-instance MAP survival functions."""
(P, S, D) = model.log_masses.shape
(_, F) = model.features.shape
unique_names = numpy.unique(model.names)
(N,) = unique_names.shape
masks = numpy.empty((N, P), bool)
map_survivals = numpy.empty((N, S, D))
features = numpy.empty((N, F))
logger.info("computing MAP RTDs over %i samples", P)
for (n, name) in enumerate(unique_names):
masks[n] = mask = model.names == name
features[n] = model.features[mask][0]
log_survivals = model.log_survival[mask]
log_weights = model.log_weights[mask]
log_weights -= numpy.logaddexp.reduce(log_weights)
map_survivals[n, :, :] = numpy.logaddexp.reduce(log_survivals + log_weights[:, None, None])
return (unique_names, masks, features, numpy.exp(map_survivals))
class NearestRTDRegression(object):
"""Predict nearest RTDs."""
def __init__(self, model):
self._model = model
(names, self._masks, features, survivals) = mapify_model_survivals(model)
(N, _) = features.shape
logger.info("computing %i^2 == %i inter-RTD distances", N, N * N)
distances = borg.bregman.survival_distances_all(survivals)
nearest = numpy.zeros((N, N), dtype = numpy.intc)
nearest_count = min(32, N / 4)
for n in xrange(N):
nearest[n, numpy.argsort(distances[n])[:nearest_count]] = 1
logger.info("fitting classifier to nearest RTDs")
classifier = MultiClassifier(sklearn.linear_model.LogisticRegression)
#classifier = MultiClassifier(sklearn.svm.SVC, scale_C = True, probability = True)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l1", C = 1e-1)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l2", C = 1e-2)
self._regression = \
sklearn.pipeline.Pipeline([
#("pca", sklearn.decomposition.PCA(whiten = True)),
#("kernel", sklearn.kernel_approximation.RBFSampler(n_components = 1000)),
("scaler", sklearn.preprocessing.Scaler()),
("classifier", classifier),
]) \
.fit(features, nearest)
def predict(self, tasks, features):
"""Predict RTD probabilities."""
features = numpy.asarray(features)
(P,) = self._model.log_weights.shape
(N, _) = self._masks.shape
(M, F) = features.shape
predictions = self._regression.predict_log_proba(features)
weights = numpy.empty((M, P))
weights[:, :] = self._model.log_weights[None, :]
for n in xrange(N):
weights[:, self._masks[n]] += predictions[:, n, None]
weights = numpy.exp(weights)
weights += 1e-64
weights /= numpy.sum(weights, axis = -1)[..., None]
return weights
@property
def classifier(self):
(_, classifier) = self._regression.steps[-1]
return classifier
| {
"repo_name": "borg-project/borg",
"path": "borg/regression.py",
"copies": "1",
"size": "4929",
"license": "mit",
"hash": 5764227892007821000,
"line_mean": 30.3949044586,
"line_max": 104,
"alpha_frac": 0.5719212822,
"autogenerated": false,
"ratio": 3.6484085862324203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.472032986843242,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import json
import tempfile
import contextlib
import borg
logger = borg.get_logger(__name__)
def parse_clasp_json_output(stdout):
"""Parse the output from clasp."""
try:
output = json.loads(stdout)
except ValueError:
return None
if output["Result"] == "UNKNOWN":
return None
else:
return output["Result"]
class ClaspSolverFactory(object):
"""Construct a Clasp solver invocation."""
def __init__(self, root, command):
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_clasp_json_output,
self._command + ["--outf=2"],
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
def with_args(self, args):
"""Return a new solver, with extra arguments appended."""
return ClaspSolverFactory(self._root, self._command + list(args))
def parse_clasp_human_output(stdout):
for line in stdout.splitlines():
if line in ["SATISFIABLE", "UNSATISFIABLE", "OPTIMUM FOUND"]:
return line
return None
class ClaspfolioSolverFactory(object):
"""Construct a Claspfolio solver invocation."""
def __init__(self, root, command, cwd):
self._root = root
self._command = command
self._cwd = cwd
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_clasp_human_output,
self._command + ["--outf=0"],
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
cwd = self._cwd.format(root = self._root),
)
def parse_yuliya_output(stdout):
for line in stdout.splitlines():
stripped = line.strip()
if stripped in ["Answer: 1", "No Answer Set"]:
return stripped
return None
class YuliyaSolverFactory(object):
"""Construct a cmodels solver invocation."""
def __init__(self, root, command):
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_yuliya_output,
self._command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
class LP2SAT_SolverFactory(object):
"""Construct an ASP-to-SAT solver invocation."""
def __init__(self, root, sat_factory, domain):
self._root = root
self._sat_factory = sat_factory
self._domain = domain
def __call__(self, task, stm_queue = None, solver_id = None):
try:
cnf_path = task.support_paths.get("cnf-g")
if cnf_path is None:
(fd, cnf_path) = tempfile.mkstemp(suffix = ".cnf")
task.support_paths["cnf-g"] = cnf_path
with contextlib.closing(os.fdopen(fd)) as cnf_file:
with open(task.path, "rb") as asp_file:
borg.domains.asp.run_lp2sat(self._domain.binaries_path, asp_file, cnf_file)
except borg.domains.asp.LP2SAT_FailedException:
return borg.solver_io.EmptySolver(None)
else:
with borg.get_domain("sat").task_from_path(cnf_path) as sat_task:
return self._sat_factory(sat_task)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/asp/solvers.py",
"copies": "1",
"size": "3716",
"license": "mit",
"hash": 7917138542038157000,
"line_mean": 28.4920634921,
"line_max": 99,
"alpha_frac": 0.5503229279,
"autogenerated": false,
"ratio": 3.8789144050104385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49292373329104383,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import os.path
import csv
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
@borg.annotations(
bundle_path = ("path to new bundle",),
root_path = ("instances root directory",),
runs_extension = ("runs files extension",),
features_extension = ("features files extension",),
only_solver = ("only include one solver's runs", "option"),
)
def main(
bundle_path,
root_path,
runs_extension = ".runs.csv",
features_extension = ".features.csv",
only_solver = None,
):
"""Bundle together run and feature data."""
# list relevant files
runs_paths = map(os.path.abspath, borg.util.files_under(root_path, [runs_extension]))
features_paths = map(os.path.abspath, borg.util.files_under(root_path, [features_extension]))
# write the bundle
os.mkdir(bundle_path)
csv.field_size_limit(1000 * 1000 * 1000)
logger.info("bundling run data from %i files", len(runs_paths))
with borg.util.openz(os.path.join(bundle_path, "all_runs.csv.gz"), "w") as out_file:
out_writer = csv.writer(out_file)
out_writer.writerow(["instance", "solver", "budget", "cost", "succeeded"])
for runs_path in runs_paths:
logger.info("reading %s", runs_path)
instance_path = runs_path[:-len(runs_extension)]
with open(runs_path) as in_file:
in_reader = csv.reader(in_file)
column_names = in_reader.next()
assert column_names[:4] == ["solver", "budget", "cost", "succeeded"]
for row in in_reader:
if only_solver is None or row[0] == only_solver:
out_writer.writerow([instance_path] + row[:4])
logger.info("bundling feature data from %i files", len(features_paths))
with borg.util.openz(os.path.join(bundle_path, "all_features.csv.gz"), "w") as out_file:
out_writer = csv.writer(out_file)
feature_names = None
for features_path in features_paths:
logger.info("reading %s", features_path)
instance_path = features_path[:-len(features_extension)]
with open(features_path) as in_file:
in_reader = csv.reader(in_file)
if feature_names is None:
feature_names = in_reader.next()
out_writer.writerow(["instance"] + feature_names)
else:
column_names = in_reader.next()
assert feature_names == column_names
for row in in_reader:
out_writer.writerow([instance_path] + row)
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/bundle_run_data.py",
"copies": "1",
"size": "2756",
"license": "mit",
"hash": -2323594235230234000,
"line_mean": 31.8095238095,
"line_max": 97,
"alpha_frac": 0.5845428157,
"autogenerated": false,
"ratio": 3.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9696387310896836,
"avg_score": 0.020488243817775652,
"num_lines": 84
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import os.path
import tempfile
import contextlib
import borg
from . import instance
from . import solvers
from . import features
from . import test
logger = borg.get_logger(__name__, default_level = "INFO")
class PseudoBooleanTask(object):
"""A pseudo-Boolean satisfiability (PB) instance."""
def __init__(self, path, linearizer_path = None):
self.path = path
self.support_paths = {}
with open(path) as opb_file:
self.header = instance.parse_opb_file_header(opb_file.readline())
(self.raw_M, self.raw_N, self.nonlinear) = self.header
if self.nonlinear:
assert linearizer_path is not None
(linearized, _) = borg.util.check_call_capturing([linearizer_path, self.path])
(fd, self.linearized_path) = tempfile.mkstemp(suffix = ".opb")
self._was_linearized = True
self.support_paths["linearized"] = self.linearized_path
with os.fdopen(fd, "w") as linearized_file:
linearized_file.write(linearized)
logger.info("wrote linearized instance to %s", self.linearized_path)
else:
self.linearized_path = path
self._was_linearized = False
with borg.accounting() as accountant:
with open(self.linearized_path) as opb_file:
self.opb = instance.parse_opb_file_linear(opb_file)
logger.info("parsing took %.2f s", accountant.total.cpu_seconds)
def get_linearized_path(self):
return self.linearized_path
def clean(self):
for path in self.support_paths.values():
os.unlink(path)
self.support_paths = {}
@borg.named_domain
class PseudoBooleanSatisfiability(object):
name = "pb"
extensions = [".opb", ".pbo"]
def __init__(self, linearizer_path = None):
if linearizer_path is None:
self._linearizer_path = None
else:
self._linearizer_path = os.path.abspath(linearizer_path)
@contextlib.contextmanager
def task_from_path(self, task_path):
"""Clean up cached task resources on context exit."""
task = PseudoBooleanTask(task_path, linearizer_path = self._linearizer_path)
try:
yield task
except:
raise
finally:
task.clean()
def compute_features(self, task):
"""Compute static features of the given task."""
(names, values) = features.compute_all(task.opb)
names.append("nonlinear")
values.append(1.0 if task._was_linearized else -1.0)
return (names, values)
def is_final(self, task, answer):
"""Is the answer definitive for the task?"""
if answer is None:
return False
else:
(description, _) = answer
if task.opb.objective is None:
return description in ("SATISFIABLE", "UNSATISFIABLE")
else:
return description in ("OPTIMUM FOUND", "UNSATISFIABLE")
def show_answer(self, task, answer):
if answer is None:
print "s UNKNOWN"
else:
(description, certificate) = answer
print "s {0}".format(description)
if certificate is not None:
sorted_certificate = sorted(certificate, key = lambda l: int(l[2:] if l[0] == "-" else l[1:]))
print "v", " ".join(sorted_certificate[:task.raw_N])
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/pb/__init__.py",
"copies": "1",
"size": "3514",
"license": "mit",
"hash": 4327076310073746000,
"line_mean": 28.5294117647,
"line_max": 110,
"alpha_frac": 0.594479226,
"autogenerated": false,
"ratio": 3.811279826464208,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9848180938204504,
"avg_score": 0.01151562285194082,
"num_lines": 119
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import pty
import subprocess
import borg
log = borg.get_logger(__name__)
def _child_preexec(environment):
"""Run in the child code prior to execution."""
# update the environment
for (key, value) in environment.iteritems():
os.putenv(key, str(value))
# start our own session
os.setsid()
def spawn_pipe_session(arguments, environment = {}, cwd = None):
"""Spawn a subprocess in its own session."""
popened = \
subprocess.Popen(
arguments,
close_fds = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
preexec_fn = lambda: _child_preexec(environment),
cwd = cwd,
)
popened.stdin.close()
return popened
def spawn_pty_session(arguments, environment = {}, cwd = None):
"""Spawn a subprocess in its own session, with stdout routed through a pty."""
# build a pty
(master_fd, slave_fd) = pty.openpty()
log.debug("opened pty %s", os.ttyname(slave_fd))
# launch the subprocess
try:
popened = \
subprocess.Popen(
arguments,
close_fds = True,
stdin = slave_fd,
stdout = slave_fd,
stderr = subprocess.PIPE,
preexec_fn = lambda: _child_preexec(environment),
cwd = cwd,
)
popened.stdout = os.fdopen(master_fd)
os.close(slave_fd)
return popened
except:
raised = borg.util.Raised()
try:
if master_fd is not None:
os.close(master_fd)
if slave_fd is not None:
os.close(slave_fd)
except:
borg.util.Raised().print_ignored()
raised.re_raise()
def kill_session(sid, number):
"""
Send signal C{number} to all processes in session C{sid}.
Theoretically imperfect, but should be consistently effective---almost
certainly paranoid overkill---in practice.
"""
# why do we pkill multiple times? because we're crazy.
for i in xrange(2):
exit_code = subprocess.call(["pkill", "-%i" % number, "-s", "%i" % sid])
if exit_code not in (0, 1):
raise RuntimeError("pkill failure")
| {
"repo_name": "borg-project/borg",
"path": "borg/unix/sessions.py",
"copies": "1",
"size": "2375",
"license": "mit",
"hash": 2704219688012391000,
"line_mean": 25.3888888889,
"line_max": 82,
"alpha_frac": 0.5608421053,
"autogenerated": false,
"ratio": 3.9451827242524917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006024829552491,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import pty
import sys
import functools
import subprocess
from cargo.log import get_logger
from cargo.unix.proc import ProcessStat
from cargo.errors import Raised
log = get_logger(__name__)
def _child_preexec(environment):
"""
Run in the child code prior to execution.
"""
# update the environment
for (key, value) in environment.iteritems():
os.putenv(key, str(value))
# start our own session
os.setsid()
def spawn_pipe_session(arguments, environment = {}):
"""Spawn a subprocess in its own session."""
popened = \
subprocess.Popen(
arguments,
close_fds = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
preexec_fn = lambda: _child_preexec(environment),
)
popened.stdin.close()
return popened
def spawn_pty_session(arguments, environment = {}):
"""Spawn a subprocess in its own session, with stdout routed through a pty."""
# build a pty
(master_fd, slave_fd) = pty.openpty()
log.debug("opened pty %s", os.ttyname(slave_fd))
# launch the subprocess
try:
popened = \
subprocess.Popen(
arguments,
close_fds = True,
stdin = slave_fd,
stdout = slave_fd,
stderr = subprocess.PIPE,
preexec_fn = lambda: _child_preexec(environment),
)
popened.stdout = os.fdopen(master_fd)
os.close(slave_fd)
return popened
except:
raised = Raised()
try:
if master_fd is not None:
os.close(master_fd)
if slave_fd is not None:
os.close(slave_fd)
except:
Raised().print_ignored()
raised.re_raise()
def kill_session(sid, number):
"""
Send signal C{number} to all processes in session C{sid}.
Theoretically imperfect, but should be consistently effective---almost
certainly paranoid overkill---in practice.
"""
# why do we pkill multiple times? because we're crazy.
for i in xrange(2):
exit_code = subprocess.call(["pkill", "-%i" % number, "-s", "%i" % sid])
if exit_code not in (0, 1):
raise RuntimeError("pkill failure")
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/unix/sessions.py",
"copies": "1",
"size": "2445",
"license": "mit",
"hash": 8399013479811368000,
"line_mean": 24.7368421053,
"line_max": 82,
"alpha_frac": 0.5676891616,
"autogenerated": false,
"ratio": 3.9563106796116503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502399984121165,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import re
import datetime
class ProcFileParseError(RuntimeError):
"""A file in /proc could not be parsed."""
class ProcessStat(object):
"""
Information about a specific process.
Merely a crude wrapper around the information in the /proc/<pid>/stat file.
Read the man pages! Read the kernel source! Nothing in /proc is ever quite
as it seems.
"""
__ticks_per_second = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
__entry_re = re.compile("\\d+")
__stat_re_strings = [
# signedness decisions were made by examining the kernel source, and in some
# cases (eg pid) don't make much sense---but who are we in userland to judge?
"(?P<pid>-?\\d+)", # process pid
"(?P<name>\\(.+\\))", # executable name
"(?P<state>[RSDZTWX])", # process state
"(?P<ppid>-?\\d+)", # parent's pid
"(?P<pgid>-?\\d+)", # process group id
"(?P<sid>-?\\d+)", # session id
"(?P<tty>-?\\d+)", # tty number
"(?P<ttyg>-?\\d+)", # group id of the process which owns the associated tty
"(?P<flags>\\d+)", # kernel flags word (kernel-version-dependent)
"(?P<min>\\d+)", # minor faults count
"(?P<cmin>\\d+)", # waited-for-children minor faults count
"(?P<maj>\\d+)", # major faults count
"(?P<cmaj>\\d+)", # waited-for-children major faults count
"(?P<utime>\\d+)", # user mode jiffies count
"(?P<stime>\\d+)", # kernel mode jiffies count
"(?P<cutime>-?\\d+)", # waited-for-children user mode jiffies count
"(?P<cstime>-?\\d+)", # waited-for-children kernel mode jiffies count
"(?P<priority>-?\\d+)", # real-time priority or raw nice value
"(?P<nice>-?\\d+)", # signed nice value in [-19, 19]
"(?P<nthreads>-?\\d+)", # number of threads in the process (replaced removed field)
"0", # removed-field placeholder
"(?P<start>\\d+)", # process start time in jiffies
"(?P<vsize>\\d+)", # bytes of process virtual memory
"(?P<rss>-?\\d+)", # resident set size minus three
"(?P<rlim>\\d+)", # rss limit in bytes
"(?P<pbot>\\d+)", # program text bottom address
"(?P<ptop>\\d+)", # program text top address
"(?P<stack>\\d+)", # stack start address
"(?P<esp>\\d+)", # stack pointer address
"(?P<eip>\\d+)", # instruction pointer address
"(?P<pending>\\d+)", # pending signals bitmap
"(?P<blocked>\\d+)", # blocked signals bitmap
"(?P<ignored>\\d+)", # ignored signals bitmap
"(?P<caught>\\d+)", # caught signals bitmap
"(?P<wchan>\\d+)", # process wait channel
"\\d+", # zero (in the past, pages swapped)
"\\d+", # zero (in the past, childrens' pages swapped)
"(?P<dsig>-?\\d+)", # death signal to parent
"(?P<cpu>-?\\d+)", # last CPU of execution
"(?P<rtprio>\\d+)", # real-time scheduling priority
"(?P<policy>\\d+)", # scheduling policy
"(?P<blkio>\\d+)", # clock ticks of block I/O delays
"(?P<gtime>\\d+)", # process guest time in clock ticks
"(?P<cgtime>\\d+)", # waited-for-children's guest time in clock ticks
]
__stat_res = [re.compile(s) for s in __stat_re_strings]
def __init__(self, pid):
"""Read and parse /proc/<pid>/stat."""
with open("/proc/%i/stat" % pid) as file:
stat = file.read()
strings = stat.split()
self.__d = {
"pid" : strings[0],
"sid" : strings[5],
"utime" : strings[13],
}
# for i in fields:
# m = ProcessStat.__stat_res[i].match(strings[i])
# self.__d.update(m.groupdict())
@staticmethod
def all():
"""
Iterate over all processes on the system.
Grabs a list of pids from /proc and iterates over them, skipping any
processes which have terminated by the time they are reached in the
iteration. The returned information is therefore not a perfect snapshot
of system state, but we have no alternative.
"""
for name in os.listdir("/proc"):
m = ProcessStat.__entry_re.match(name)
if m:
try:
yield ProcessStat(int(name))
except IOError:
pass
@staticmethod
def in_session(sid):
"""Iterate over all processes in a session."""
for process in ProcessStat.all():
if process.sid == sid:
yield process
def __ticks_to_timedelta(self, ticks):
"""Convert kernel clock ticks to a Python timedelta value."""
return datetime.timedelta(seconds = float(ticks) / self.__ticks_per_second)
# expose the relevant fields
pid = property(lambda self: int(self.__d["pid"]))
name = property(lambda self: self.__d["name"])
state = property(lambda self: self.__d["state"])
ppid = property(lambda self: int(self.__d["ppid"]))
pgid = property(lambda self: int(self.__d["pgid"]))
sid = property(lambda self: int(self.__d["sid"]))
tty = property(lambda self: int(self.__d["tty"]))
tty_owner_group = property(lambda self: int(self.__d["ttyg"]))
flags = property(lambda self: long(self.__d["flags"]))
minor_faults = property(lambda self: long(self.__d["min"]))
child_minor_faults = property(lambda self: long(self.__d["cmin"]))
major_faults = property(lambda self: long(self.__d["maj"]))
child_major_faults = property(lambda self: long(self.__d["cmaj"]))
user_time = property(lambda self: self.__ticks_to_timedelta(self.__d["utime"]))
kernel_time = property(lambda self: self.__ticks_to_timedelta(self.__d["stime"]))
child_user_time = property(lambda self: self.__ticks_to_timedelta(self.__d["cutime"]))
child_kernel_time = property(lambda self: self.__ticks_to_timedelta(self.__d["cstime"]))
priority = property(lambda self: int(self.__d["priority"]))
nice = property(lambda self: int(self.__d["nice"]))
threads = property(lambda self: int(self.__d["nthreads"]))
start_time = property(lambda self: self.__ticks_to_timedelta(self.__d["start"]))
virtual_size = property(lambda self: long(self.__d["vsize"]))
resident_set_size = property(lambda self: int(self.__d["rss"]))
resident_set_limit = property(lambda self: long(self.__d["rlim"]))
text_bottom = property(lambda self: long(self.__d["pbot"]))
text_top = property(lambda self: long(self.__d["ptop"]))
stack_start = property(lambda self: long(self.__d["stack"]))
stack_pointer = property(lambda self: long(self.__d["esp"]))
instruction_pointer = property(lambda self: long(self.__d["eip"]))
pending_signals = property(lambda self: long(self.__d["pending"]))
blocked_signals = property(lambda self: long(self.__d["blocked"]))
ignored_signals = property(lambda self: long(self.__d["ignored"]))
caught_signals = property(lambda self: long(self.__d["caught"]))
wait_channel = property(lambda self: long(self.__d["wchan"]))
exit_signal = property(lambda self: int(self.__d["dsig"]))
last_cpu = property(lambda self: int(self.__d["cpu"]))
priority = property(lambda self: long(self.__d["rtprio"]))
policy = property(lambda self: long(self.__d["policy"]))
io_delay = property(lambda self: long(self.__d["blkio"]))
guest_time = property(lambda self: self.__ticks_to_timedelta(self.__d["gtime"]))
child_guest_time = property(lambda self: self.__ticks_to_timedelta(self.__d["cgtime"]))
def get_pid_utime(pid):
return ProcessStat(pid).user_time
def get_sid_utime(sid):
return sum(p.user_time for p in ProcessStat.in_session(sid))
| {
"repo_name": "borg-project/borg",
"path": "borg/unix/proc.py",
"copies": "1",
"size": "8351",
"license": "mit",
"hash": -6076569407519470000,
"line_mean": 48.1235294118,
"line_max": 94,
"alpha_frac": 0.5462818824,
"autogenerated": false,
"ratio": 3.612024221453287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9597403372620315,
"avg_score": 0.012180546246594549,
"num_lines": 170
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import re
import tempfile
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def parse_competition(stdout):
"""Parse output from a standard competition solver."""
match = re.search(r"^s +([a-zA-Z ]+) *\r?$", stdout, re.M)
if match:
(answer_type,) = match.groups()
answer_type = answer_type.strip().upper()
if answer_type in ("SATISFIABLE", "OPTIMUM FOUND"):
certificate = []
for line in re.findall(r"^v ([ x\-0-9]*) *\r?$", stdout, re.M):
certificate.extend(line.split())
if len(certificate) == 0:
return None
elif answer_type == "UNSATISFIABLE":
certificate = None
else:
return None
return (answer_type, certificate)
return None
class PseudoBooleanSolverFactory(object):
def __init__(self, root, command):
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_competition,
self._command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
class LinearPseudoBooleanSolverFactory(PseudoBooleanSolverFactory):
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_competition,
self._command,
self._root,
task.get_linearized_path(),
stm_queue = stm_queue,
solver_id = solver_id,
)
def parse_scip(variables, optimization, stdout):
"""Parse output from the SCIP solver(s)."""
answer_match = re.search(r"^SCIP Status *: *problem is solved \[([a-zA-Z ]+)\] *\r?$", stdout, re.M)
if answer_match:
(status,) = answer_match.groups()
if status == "optimal solution found":
answer_type = "OPTIMUM FOUND" if optimization else "SATISFIABLE"
trues = map(int, re.findall(r"^x([0-9]+) *1[ \t]*\(obj:.+\) *\r?$", stdout, re.M))
solution = numpy.zeros(variables, bool)
for v in trues:
solution[v - 1] = True
certificate = [("" if t else "-") + ("x%i" % (v + 1)) for (v, t) in enumerate(solution)]
elif status == "infeasible":
answer_type = "UNSATISFIABLE"
certificate = None
else:
return None
return (answer_type, certificate)
return None
class SCIP_SolverFactory(object):
def __init__(self, root, command):
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
def parse(stdout):
return parse_scip(task.opb.N, task.opb.objective is not None, stdout)
return \
borg.solver_io.RunningSolver(
parse,
self._command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
def parse_opbdp(variables, optimization, stdout):
"""Parse output from the SCIP solver(s)."""
if re.search(r"^Constraint Set is unsatisfiable\r?$", stdout, re.M):
return ("UNSATISFIABLE", None)
elif re.search(r"^Global Minimum: ****** -?[0-9]+ ******\r?$", stdout, re.M):
solution_match = re.search(r"^0-1 Variables fixed to 1 :([x0-9 ]*)\r?$", stdout, re.M)
if solution_match:
solution = numpy.zeros(variables, bool)
(solution_chunk,) = answer_match.groups()
for part in solution_chunk.split():
solution[int(part[1:]) - 1] = True
return (
"OPTIMUM FOUND" if optimization else "SATISFIABLE",
[("" if t else "-") + ("x%i" % (v + 1)) for (v, t) in enumerate(solution)],
)
return None
class OPBDP_SolverFactory(object):
def __call__(self, task, stm_queue = None, solver_id = None):
def parse(stdout):
return parse_opbdp(task.opb.N, task.opb.objective is not None)
nl_flag = ["-n"] if task.nonlinear else []
return \
borg.solver_io.RunningSolver(
parse,
["{root}/opbdp-1.1.3/opbdp", "-s", "-v1"] + nl_flag + ["{task}"],
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
def write_minion_from_pb(instance, minion_file):
minion_file.write("MINION 3\n")
minion_file.write("**VARIABLES**\n")
minion_file.write("BOOL b[{0}]\n".format(instance.N))
if instance.objective is not None:
minion_file.write("BOUND c {{{0}..{1}}}\n".format(-2**30, 2**30))
minion_file.write("**CONSTRAINTS**\n")
if instance.objective is not None:
weights = ",".join(str(w) for (w, _) in instance.objective)
variables = ",".join("b[{0}]".format(v) for (_, v) in instance.objective)
minion_file.write("weightedsumgeq([{0}],[{1}],c)\n".format(weights, variables))
minion_file.write("weightedsumleq([{0}],[{1}],c)\n".format(weights, variables))
for i in xrange(instance.M):
j = instance.constraints.indptr[i]
k = instance.constraints.indptr[i + 1]
total = instance.totals[i]
weights = ",".join(map(str, instance.constraints.data[j:k]))
variables = ",".join(map("b[{0}]".format, instance.constraints.indices[j:k]))
minion_file.write("weightedsumgeq([{0}],[{1}],{2})\n".format(weights, variables, total))
if instance.relations[i] == 1:
minion_file.write("weightedsumleq([{0}],[{1}],{2})\n".format(weights, variables, total))
minion_file.write("**SEARCH**\n")
if instance.objective is not None:
minion_file.write("MINIMIZING c\n")
minion_file.write("PRINT [b]\n")
minion_file.write("**EOF**\n")
def parse_minion(instance, stdout):
answer_match = re.search(r"^Problem solvable\?: ([a-z]+)\r?$", stdout, re.M)
if answer_match:
(status,) = answer_match.groups()
if status == "yes":
answer_type = "SATISFIABLE" if instance.objective is None else "OPTIMUM FOUND"
solution_group = re.findall(r"^Sol: ([01 ]+)\r?$", stdout, re.M)[-1]
solution = map(int, solution_group.split())
certificate = [("" if t == 1 else "-") + ("x%i" % (v + 1)) for (v, t) in enumerate(solution)]
elif status == "no":
answer_type = "UNSATISFIABLE"
certificate = None
else:
return None
return (answer_type, certificate)
return None
def build_minion_pb_solver(task, stm_queue = None, solver_id = None):
input_path = task.support_paths.get("minion")
if input_path is None:
(fd, input_path) = tempfile.mkstemp(suffix = ".minion")
task.support_paths["minion"] = input_path
with os.fdopen(fd, "w") as input_file:
write_minion_from_pb(task.opb, input_file)
logger.info("wrote minion input file to %s", input_path)
def parse(stdout):
return parse_minion(task.opb, stdout)
return \
borg.solver_io.RunningSolver(
parse,
["{root}/minion-0.12/bin/minion", "-noresume", "{task}"],
input_path,
stm_queue = stm_queue,
solver_id = solver_id,
)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/pb/solvers.py",
"copies": "1",
"size": "7636",
"license": "mit",
"hash": 3490355400830166000,
"line_mean": 32.0562770563,
"line_max": 105,
"alpha_frac": 0.5449188057,
"autogenerated": false,
"ratio": 3.512419503219871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4557338308919871,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import signal
import multiprocessing
import condor
logger = condor.log.get_logger(__name__)
class LocalWorkerProcess(multiprocessing.Process):
"""Work in a subprocess."""
def __init__(self, stm_queue):
"""Initialize."""
multiprocessing.Process.__init__(self)
self.stm_queue = stm_queue
self.mts_queue = multiprocessing.Queue()
def run(self):
"""Work."""
class DeathRequestedError(Exception):
pass
try:
def handle_sigusr1(number, frame):
raise DeathRequestedError()
signal.signal(signal.SIGUSR1, handle_sigusr1)
logger.info("subprocess running")
task = None
while True:
# get an assignment
if task is None:
self.stm_queue.put(condor.messages.ApplyMessage(os.getpid()))
task = self.mts_queue.get()
if task is None:
logger.info("received null assignment; terminating")
return None
# complete the assignment
try:
logger.info("starting work on task %s", task.key)
result = task()
except KeyboardInterrupt, error:
logger.warning("interruption during task %s", task.key)
self.stm_queue.put(condor.messages.InterruptedMessage(os.getpid(), task.key))
self.mts_queue.get()
break
except DeathRequestedError:
logger.warning("death requested; terminating")
break
except BaseException, error:
description = traceback.format_exc(error)
logger.warning("error during task %s:\n%s", task.key, description)
self.stm_queue.put(condor.messages.ErrorMessage(os.getpid(), task.key, description))
self.mts_queue.get()
break
else:
logger.info("finished task %s", task.key)
self.stm_queue.put(condor.messages.DoneMessage(os.getpid(), task.key, result))
task = self.mts_queue.get()
except DeathRequestedError:
pass
class ParallelManager(object):
"""Manage locally-distributed work."""
def __init__(self, tasks, workers):
"""Initialize."""
self._core = condor.managers.ManagerCore(tasks)
logger.info("distributing %i tasks to %i workers", len(tasks), workers)
self._stm_queue = multiprocessing.Queue()
self._processes = [LocalWorkerProcess(self._stm_queue) for _ in xrange(workers)]
for process in self._processes:
process.start()
def manage(self):
"""Manage workers and tasks."""
process_index = dict((process.pid, process) for process in self._processes)
while self._core.unfinished_count() > 0:
message = self._stm_queue.get()
(response, completed) = self._core.handle(message)
process_index[message.sender].mts_queue.put(response)
if completed is not None:
yield completed
def clean(self):
for process in self._processes:
os.kill(process.pid, signal.SIGUSR1)
| {
"repo_name": "borg-project/utcondor",
"path": "condor/managers/parallel.py",
"copies": "1",
"size": "3455",
"license": "mit",
"hash": -292769262950733400,
"line_mean": 28.5299145299,
"line_max": 104,
"alpha_frac": 0.5496382055,
"autogenerated": false,
"ratio": 4.5520421607378125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5601680366237813,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os
import socket
import condor
class Message(object):
"""Message from a worker."""
def __init__(self, sender):
self.sender = sender
self.host = socket.gethostname()
self.pid = os.getpid()
def make_summary(self, text):
return "worker {0} (pid {1} on {2}) {3}".format(self.sender, self.pid, self.host, text)
class ApplyMessage(Message):
"""A worker wants a unit of work."""
def get_summary(self):
return self.make_summary("requested a job")
class ErrorMessage(Message):
"""An error occurred in a task."""
def __init__(self, sender, key, description):
Message.__init__(self, sender)
self.key = key
self.description = description
def get_summary(self):
brief = self.description.splitlines()[-1]
return self.make_summary("encountered an error ({0})".format(brief))
class InterruptedMessage(Message):
"""A worker was interrupted."""
def __init__(self, sender, key):
Message.__init__(self, sender)
self.key = key
def get_summary(self):
return self.make_summary("was interrupted")
class DoneMessage(Message):
"""A task was completed."""
def __init__(self, sender, key, result):
Message.__init__(self, sender)
self.key = key
self.result = result
def get_summary(self):
return self.make_summary("finished job {0}".format(self.key))
class TaskMessage(Message):
"""A task, with arguments stored appropriately."""
def __init__(self, sender, task, cache = None):
Message.__init__(self, sender)
if cache is None:
cache = {}
self._call = task.call
self._arg_ids = map(id, task.args)
self._kwarg_ids = dict((k, id(v)) for (k, v) in task.kwargs.items())
self._key = task.key
self._cache = cache
for arg in task.args:
self._cache[id(arg)] = arg
for kwarg in task.kwargs.values():
self._cache[id(kwarg)] = kwarg
def get_task(self):
args = map(self._cache.__getitem__, self._arg_ids)
kwargs = dict((k, self._cache[v]) for (k, v) in self._kwarg_ids.items())
return condor.managers.Task(self._call, args, kwargs, self._key)
| {
"repo_name": "borg-project/utcondor",
"path": "condor/messages.py",
"copies": "1",
"size": "2326",
"license": "mit",
"hash": -1273110466965169000,
"line_mean": 25.7356321839,
"line_max": 95,
"alpha_frac": 0.5915735168,
"autogenerated": false,
"ratio": 3.7335473515248796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.482512086832488,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
@borg.annotations(
suite_path = ("path to the solvers suite", "positional", None, os.path.abspath),
solver_name = ("name of solver to run", "positional"),
instance_path = ("path to problem instance", "positional"),
specifics = ("other instance parameters (ignored)", "positional"),
budget = ("run cutoff in seconds", "positional", None, float),
max_length = ("length cutoff (ignored)", "positional"),
seed = ("solver seed", "positional", None, int),
)
def main(
suite_path,
solver_name,
instance_path,
specifics = None,
budget = 1e6,
max_length = None,
seed = None,
*args
):
"""Make a single solver run for ParamILS."""
suite = borg.load_solvers(suite_path)
if seed is not None:
borg.statistics.set_prng_seeds(seed)
suite = borg.load_solvers(suite_path)
solver = suite.solvers[solver_name].with_args(args)
with suite.domain.task_from_path(instance_path) as task:
with borg.accounting() as accountant:
answer = solver(task)(budget)
succeeded = suite.domain.is_final(task, answer)
print \
"Result for ParamILS: {solved}, {run_time}, {run_length}, {best}, {seed}".format(
solved = "SAT" if succeeded else "TIMEOUT",
run_time = accountant.total.cpu_seconds,
run_length = -1,
best = -1,
seed = seed,
)
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/run_for_paramils.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": -5852073763036611000,
"line_mean": 28.7962962963,
"line_max": 89,
"alpha_frac": 0.6041019267,
"autogenerated": false,
"ratio": 3.4527896995708156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4556891626270816,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import bz2
import sys
import pwd
import gzip
import shutil
import tempfile
import json
import traceback
import contextlib
import subprocess
import numpy
def files_under(path, extensions = None):
"""Iterate over paths in the specified directory tree."""
assert not isinstance(extensions, str)
if os.path.isfile(path):
walked = [path]
else:
def walk_path():
for (p, _, f) in os.walk(path, followlinks = True):
for n in f:
yield os.path.join(p, n)
walked = walk_path()
if extensions is None:
for name in walked:
yield name
else:
for name in walked:
if any(name.endswith(e) for e in extensions):
yield name
@contextlib.contextmanager
def mkdtemp_scoped(prefix = None):
"""Create, and then delete, a temporary directory."""
# provide a reasonable default prefix
if prefix is None:
prefix = "%s." % pwd.getpwuid(os.getuid())[0]
# create the context
path = None
try:
path = tempfile.mkdtemp(prefix = prefix)
yield path
finally:
if path is not None:
shutil.rmtree(path, ignore_errors = True)
def openz(path, mode = "rb", closing = True):
"""Open a file, transparently [de]compressing it if a known extension is present."""
(_, extension) = os.path.splitext(path)
if extension == ".bz2":
file_ = bz2.BZ2File(path, mode)
elif extension == ".gz":
file_ = gzip.GzipFile(path, mode)
elif extension == ".xz":
raise NotImplementedError()
else:
return open(path, mode)
if closing:
return contextlib.closing(file_)
def memoize(call):
"""Automatically memoize a callable."""
results = {}
def wrapper(*args, **kwargs):
key = (tuple(args), tuple(sorted(kwargs.iteritems())))
try:
return results[key]
except KeyError:
results[key] = result = call(*args, **kwargs)
return result
return wrapper
def load_json(path_or_file):
"""Load JSON from a path or file."""
if isinstance(path_or_file, str):
with openz(path_or_file, "rb") as json_file:
return json.load(json_file)
else:
return json.load(path_or_file)
@contextlib.contextmanager
def numpy_printing(**kwargs):
"""Temporarily modify numpy printing options."""
old = numpy.get_printoptions()
numpy.set_printoptions(**kwargs)
try:
yield
except:
raise
finally:
numpy.set_printoptions(**old)
@contextlib.contextmanager
def numpy_errors(**kwargs):
"""Temporarily modify numpy error options."""
old = numpy.seterr(**kwargs)
try:
yield
except:
raise
finally:
numpy.seterr(**old)
def seconds(value):
"""Return the equivalent number of seconds, floating-point."""
return value.days * 8.64e4 + value.seconds + value.microseconds / 1e6
def call_capturing(arguments, input = None, preexec_fn = None):
"""Spawn a process and return its output and status code."""
popened = None
try:
# launch the subprocess
popened = \
subprocess.Popen(
arguments,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
preexec_fn = preexec_fn,
)
# wait for its natural death
(stdout, stderr) = popened.communicate(input)
except:
#raised = Raised()
if popened is not None and popened.poll() is None:
#try:
popened.kill()
popened.wait()
#except:
#Raised().print_ignored()
#raised.re_raise()
else:
return (stdout, stderr, popened.returncode)
def check_call_capturing(arguments, input = None, preexec_fn = None):
"""Spawn a process and return its output."""
(stdout, stderr, code) = call_capturing(arguments, input, preexec_fn)
if code == 0:
return (stdout, stderr)
else:
from subprocess import CalledProcessError
error = CalledProcessError(code, arguments)
error.stdout = stdout
error.stderr = stderr
raise error
class Raised(object):
"""
Store the currently-handled exception.
The current exception must be saved before errors during error handling are
handled, so that the original exception can be re-raised with its context
information intact.
"""
def __init__(self):
(self.type, self.value, self.traceback) = traceback.exc_info()
def format(self):
"""Return a list of lines describing the exception."""
return traceback.format_exception(self.type, self.value, self.traceback)
def re_raise(self):
"""Re-raise the stored exception."""
raise (self.type, self.value, self.traceback)
def print_ignored(self, message = "An error was unavoidably ignored:", file_ = sys.stderr):
"""Print an exception-was-ignored message."""
file_.write("\n%s\n" % message)
file_.write("".join(self.format()))
file_.write("\n")
| {
"repo_name": "borg-project/borg",
"path": "borg/util.py",
"copies": "1",
"size": "5302",
"license": "mit",
"hash": -5290050649040553000,
"line_mean": 24.1279620853,
"line_max": 95,
"alpha_frac": 0.5976989815,
"autogenerated": false,
"ratio": 4.094208494208495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5191907475708495,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import cStringIO as StringIO
import nose.tools
import borg
def path_to(name):
return os.path.join(os.path.dirname(__file__), name)
def test_cnf_parse_simple():
"""Test simple CNF input."""
with open(path_to("example.simple.cnf")) as cnf_file:
instance = borg.domains.sat.instance.parse_sat_file(cnf_file)
nose.tools.assert_equal(instance.N, 4)
nose.tools.assert_equal(instance.M, 2)
nose.tools.assert_equal(
instance.to_clauses(),
[[4, -1], [3, 2]],
)
def test_cnf_write_simple():
"""Test simple CNF output."""
clauses = [[-1, 4], [2, 3]]
cnf_out = borg.domains.sat.instance.SAT_Instance.from_clauses(clauses, 4)
file_out = StringIO.StringIO()
cnf_out.write(file_out)
file_in = StringIO.StringIO(file_out.getvalue())
cnf_in = borg.domains.sat.instance.parse_sat_file(file_in)
print cnf_out.to_clauses()
print file_out.getvalue()
nose.tools.assert_equal(cnf_in.to_clauses(), clauses)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/sat/test/test_instance.py",
"copies": "1",
"size": "1060",
"license": "mit",
"hash": 2714098951448984600,
"line_mean": 25.5,
"line_max": 77,
"alpha_frac": 0.6509433962,
"autogenerated": false,
"ratio": 3.0547550432276656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42056984394276653,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import copy
import numpy
import condor
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def infer_distributions(run_data, model_name, instance, exclude):
"""Compute model predictions on every instance."""
# customize the run data
filtered_data = copy.deepcopy(run_data)
filtered_runs = filter(lambda r: r.solver != exclude, filtered_data.run_lists[instance])
filtered_data.run_lists[instance] = filtered_runs
# sample from the model posterior
model = borg.experiments.common.train_model(model_name, filtered_data, bins = 8)
# summarize the samples
(M, S, B) = model.log_masses.shape
n = sorted(filtered_data.run_lists).index(instance)
true = run_data.to_bins_array(run_data.solver_names, B = 8)[n].astype(float)
true /= numpy.sum(true, axis = -1)[:, None] + 1e-16
observed = filtered_data.to_bins_array(run_data.solver_names, B = 8)[n].astype(float)
observed /= numpy.sum(observed, axis = -1)[:, None] + 1e-16
mask = model.names == instance
log_predicted_all = model.log_weights[mask][:, None, None] + model.log_masses[mask, :, :]
predicted = numpy.sum(numpy.exp(log_predicted_all), axis = 0)
predicted /= numpy.sum(predicted, axis = -1)[..., None]
def yield_rows():
for s in xrange(S):
solver_name = run_data.solver_names[s]
for b in xrange(B):
yield (model_name, instance, solver_name, b, predicted[s, b])
yield ("observed", instance, solver_name, b, observed[s, b])
yield ("true", instance, solver_name, b, true[s, b])
return list(yield_rows())
@borg.annotations(
out_path = ("results output path"),
bundle = ("path to pre-recorded runs", "positional", None, os.path.abspath),
experiments = ("path to experiments JSON", "positional", None, borg.util.load_json),
workers = ("submit jobs?", "option", "w", int),
local = ("workers are local?", "flag"),
)
def main(out_path, bundle, experiments, workers = 0, local = False):
"""Write the actual output of multiple models."""
def yield_jobs():
run_data = borg.storage.RunData.from_bundle(bundle)
for experiment in experiments:
yield (
infer_distributions,
[
run_data,
experiment["model_name"],
experiment["instance"],
experiment["exclude"],
],
)
with open(out_path, "w") as out_file:
writer = csv.writer(out_file)
writer.writerow(["model_name", "instance", "solver", "bin", "probability"])
for (_, row) in condor.do(yield_jobs(), workers, local):
writer.writerows(row)
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/apply_models.py",
"copies": "1",
"size": "2899",
"license": "mit",
"hash": 3924936828884834000,
"line_mean": 32.7093023256,
"line_max": 93,
"alpha_frac": 0.5998620214,
"autogenerated": false,
"ratio": 3.614713216957606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9617429231255727,
"avg_score": 0.01942920142037563,
"num_lines": 86
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import itertools
import collections
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class RunRecord(object):
"""Record of a solver run."""
def __init__(self, solver, budget, cost, success):
"""Initialize."""
self.solver = solver
self.budget = budget
self.cost = cost
self.success = success
def __str__(self):
return str((self.solver, self.budget, self.cost, self.success))
def __repr__(self):
return repr((self.solver, self.budget, self.cost, self.success))
class RunData(object):
"""Load and access portfolio training data."""
def __init__(self, solver_names, common_budget = None):
"""Initialize."""
self.solver_names = solver_names
self.run_lists = {}
self.feature_vectors = {}
self.common_budget = common_budget
self.common_features = None
def __len__(self):
"""Number of instances for which data are stored."""
return len(self.run_lists)
def add_run(self, id_, run):
"""Add a run to these data."""
runs = self.run_lists.get(id_)
if runs is None:
self.run_lists[id_] = [run]
else:
runs.append(run)
if self.common_budget is None:
self.common_budget = run.budget
else:
assert run.budget == self.common_budget
def add_runs(self, pairs):
"""Add runs to these data."""
for (id_, run) in pairs:
self.add_run(id_, run)
def add_feature_vector(self, id_, vector):
"""Add a feature vector to these data."""
assert id_ not in self.feature_vectors
assert isinstance(vector, collections.Mapping)
names = [k for k in vector if k != "cpu_cost"]
if self.common_features is None:
self.common_features = sorted(names)
else:
assert self.common_features == sorted(names)
self.feature_vectors[id_] = vector
def filter(self, *ids):
"""Return a filtered set of run data."""
data = RunData(self.solver_names)
for id_ in ids:
for run in self.run_lists[id_]:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def filter_features(self, names):
"""Return a set of run data with only the specified features."""
data = RunData(self.solver_names, self.common_budget)
data.run_lists = self.run_lists
for (id_, old_vector) in self.feature_vectors.iteritems():
new_vector = dict((k, old_vector[k]) for k in names)
data.add_feature_vector(id_, new_vector)
return data
def masked(self, mask):
"""Return a subset of the instances."""
return self.filter(*(id_ for (id_, m) in zip(self.ids, mask) if m))
def only_successful(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(run.success for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nontrivial(self, threshold = 1.0):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(not run.success or run.cost > threshold for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nonempty(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if len(run_list) > 0:
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def collect_systematic(self, counts):
"""Get a systematic subset of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
for solver in self.solver_names:
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
assert len(runs) >= count
sampled.add_runs((id_, run) for run in runs[:count])
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def collect_independent(self, counts):
"""Get independent subsets of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
for solver in self.solver_names:
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
sampled.add_runs((id_, run) for run in runs[:count])
if id_ not in sampled.feature_vectors:
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def runs_on(self, id_, solver):
"""Retrieve runs made by a solver on an instance."""
for run in self.run_lists[id_]:
if run.solver == solver:
yield run
def get_feature_vector(self, id_):
"""Retrieve features of a task."""
return self.feature_vectors[id_]
def get_feature_vectors(self):
"""Retrieve features of all tasks."""
return self.feature_vectors
def get_common_budget(self):
"""Retrieve the common run budget, if any."""
budget = None
for runs in self.run_lists.values():
for run in runs:
if budget is None:
budget = run.budget
elif run.budget != budget:
raise Exception("collected runs include multiple run budgets")
return budget
def get_run_count(self):
"""Return the number of runs stored."""
return sum(map(len, self.run_lists.values()))
def to_features_array(self):
"""Retrieve feature values in an array."""
assert set(self.feature_vectors) == set(self.run_lists)
N = len(self.ids)
F = len(self.common_features)
feature_values_NF = numpy.empty((N, F), numpy.double)
for (n, instance_id) in enumerate(sorted(self.ids)):
features = self.feature_vectors[instance_id]
for (f, name) in enumerate(self.common_features):
feature_values_NF[n, f] = features[name]
return feature_values_NF
def to_runs_array(self, solver_names):
"""Return run durations as a partially-filled array."""
S = len(solver_names)
N = len(self.run_lists)
# accumulate the success and failure counts
successes_NS = numpy.zeros((N, S), numpy.intc)
failures_NS = numpy.zeros((N, S), numpy.intc)
solver_names_S = list(solver_names)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
successes_NS[n, s] += 1
else:
failures_NS[n, s] += 1
R = numpy.max(successes_NS)
# fill in run durations
durations_NSR = numpy.ones((N, S, R), numpy.double) * numpy.nan
successes_NS[...] = 0
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
r = successes_NS[n, s]
durations_NSR[n, s, r] = run.cost
successes_NS[n, s] = r + 1
return (successes_NS, failures_NS, durations_NSR)
def to_times_arrays(self):
"""Return run durations as per-solver arrays."""
S = len(self.solver_names)
N = len(self.run_lists)
times_lists = [[] for _ in xrange(S)]
ns_lists = [[] for _ in xrange(S)]
failures_NS = numpy.zeros((N, S), numpy.intc)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = self.solver_names.index(run.solver)
if run.success:
times_lists[s].append(run.cost)
ns_lists[s].append(n)
else:
failures_NS[n, s] += 1
times_arrays = map(numpy.array, times_lists)
ns_arrays = map(numpy.array, ns_lists)
return (times_arrays, ns_arrays, failures_NS)
def to_bins_array(self, solver_names, B, cutoff = None):
"""Return discretized run duration counts."""
if cutoff is None:
cutoff = self.get_common_budget()
S = len(solver_names)
N = len(self.run_lists)
C = B + 1
solver_name_index = list(solver_names)
outcomes_NSC = numpy.zeros((N, S, C), numpy.intc)
interval = cutoff / B
for (n, instance_id) in enumerate(sorted(self.run_lists)):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_name_index.index(run.solver)
if run.success and run.cost < cutoff:
b = int(run.cost / interval)
outcomes_NSC[n, s, b] += 1
else:
outcomes_NSC[n, s, B] += 1
return outcomes_NSC
@property
def ids(self):
"""All associated instance ids."""
return self.run_lists.keys()
@staticmethod
def from_roots(solver_names, tasks_roots, domain, suffix = ".runs.csv"):
"""Collect run data by scanning for tasks."""
task_paths = []
for tasks_root in tasks_roots:
task_paths.extend(borg.util.files_under(tasks_root, domain.extensions))
return RunData.from_paths(solver_names, task_paths, domain, suffix)
@staticmethod
def from_paths(solver_names, task_paths, domain, suffix = ".runs.csv"):
"""Collect run data from task paths."""
training = RunData(solver_names)
for path in task_paths:
# load run records
run_data = numpy.recfromcsv(path + suffix, usemask = True)
rows = run_data.tolist()
if run_data.shape == ():
rows = [rows]
for (run_solver, run_budget, run_cost, run_succeeded, run_answer) in rows:
record = RunRecord(run_solver, run_budget, run_cost, run_succeeded)
training.add_run(path, record)
# load feature data
feature_records = numpy.recfromcsv("{0}.features.csv".format(path))
feature_dict = dict(zip(feature_records.dtype.names, feature_records.tolist()))
training.add_feature_vector(path, feature_dict)
return training
@staticmethod
def from_bundle(bundle_path):
"""Collect run data from two CSV files."""
run_data = RunData(None)
# load runs
runs_csv_path = os.path.join(bundle_path, "all_runs.csv.gz")
logger.info("reading run data from %s", runs_csv_path)
solver_names = set()
with borg.util.openz(runs_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
columns = csv_reader.next()
if columns[:5] != ["instance", "solver", "budget", "cost", "succeeded"]:
raise Exception("unexpected columns in run data CSV file")
for (instance, solver, budget_str, cost_str, succeeded_str) in csv_reader:
run_data.add_run(
instance,
RunRecord(
solver,
float(budget_str),
float(cost_str),
succeeded_str.lower() == "true",
),
)
solver_names.add(solver)
run_data.solver_names = sorted(solver_names)
# load features
features_csv_path = os.path.join(bundle_path, "all_features.csv.gz")
logger.info("reading feature data from %s", features_csv_path)
with borg.util.openz(features_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
try:
columns = csv_reader.next()
except StopIteration:
pass
else:
if columns[0] != "instance":
raise Exception("unexpected columns in features CSV file")
for row in csv_reader:
feature_dict = dict(zip(columns[1:], map(float, row[1:])))
run_data.add_feature_vector(row[0], feature_dict)
assert set(run_data.run_lists) == set(run_data.feature_vectors)
return run_data
TrainingData = RunData
| {
"repo_name": "borg-project/borg",
"path": "borg/storage.py",
"copies": "1",
"size": "13946",
"license": "mit",
"hash": 7673057979373106000,
"line_mean": 29.0560344828,
"line_max": 93,
"alpha_frac": 0.5539222716,
"autogenerated": false,
"ratio": 3.961931818181818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015854089781818,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
@borg.annotations(
out_root = ("results output path"),
bundle = ("path to pre-recorded runs", "positional", None, os.path.abspath),
)
def main(out_root, bundle):
"""Write the latent classes of a matrix model."""
# fit the model
run_data = borg.storage.RunData.from_bundle(bundle)
logger.info("fitting matrix mixture model")
# extract the latent classes
estimator = borg.models.MulDirMatMixEstimator(K = 16)
model = estimator(run_data, 10, run_data)
latent = model.latent_classes
latent /= numpy.sum(latent, axis = -1)[..., None]
(K, S, D) = latent.shape
latent_csv_path = os.path.join(out_root, "latent_classes.csv.gz")
with open(latent_csv_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["k", "solver", "bin", "value"])
for k in xrange(K):
for s in xrange(S):
solver_name = run_data.solver_names[s]
for d in xrange(D):
writer.writerow([k, solver_name, d, latent[k, s, d]])
# extract the responsibilities
(K, N) = model.responsibilities.shape
responsibilities = numpy.exp(model.responsibilities)
responsibilities_csv_path = os.path.join(out_root, "responsibilities.csv.gz")
with open(responsibilities_csv_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["k", "n", "value"])
for k in xrange(K):
for n in xrange(N):
writer.writerow([k, n, responsibilities[k, n]])
# extract the categories
responsibilities_csv_path = os.path.join(out_root, "categories.csv.gz")
with open(responsibilities_csv_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["n", "category"])
for (n, name) in enumerate(sorted(run_data.ids)):
category = name.split("/")[8]
writer.writerow([n, category])
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/latent_classes.py",
"copies": "1",
"size": "2156",
"license": "mit",
"hash": 6005821073727157000,
"line_mean": 27.7466666667,
"line_max": 81,
"alpha_frac": 0.6108534323,
"autogenerated": false,
"ratio": 3.389937106918239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9414502614170563,
"avg_score": 0.017257585009535125,
"num_lines": 75
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import numpy
import sklearn
import condor
import borg
import borg.experiments.simulate_runs
logger = borg.get_logger(__name__, default_level = "INFO")
def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):
"""Simulate portfolio execution on a train/test split."""
train_data = all_data.masked(train_mask)
test_data = all_data.masked(test_mask)
if instances is not None:
ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]
train_data = train_data.filter(*ids)
if independent:
train_data = train_data.collect_independent(mixture).only_nonempty()
else:
train_data = train_data.collect_systematic(mixture).only_nonempty()
budget = test_data.common_budget
#budget = test_data.common_budget / 2 # XXX
suite = borg.fake.FakeSuite(test_data)
if maker.subname == "preplanning-dir":
model_kwargs = {"K": 64}
if "set_alpha" in maker.variants:
model_kwargs["alpha"] = 1e-2
else:
model_kwargs = {}
solver = maker(suite, train_data, model_kwargs = model_kwargs)
successes = []
for (i, instance_id) in enumerate(test_data.run_lists):
logger.info("simulating run %i/%i on %s", i, len(test_data), instance_id)
with suite.domain.task_from_path(instance_id) as instance:
with borg.accounting() as accountant:
answer = solver.start(instance).run_then_stop(budget)
succeeded = suite.domain.is_final(instance, answer)
logger.info(
"%s %s on %s (%.2f CPU s)",
maker.name,
"succeeded" if succeeded else "failed",
os.path.basename(instance),
accountant.total.cpu_seconds,
)
if succeeded:
successes.append(accountant.total.cpu_seconds)
logger.info(
"%s had %i successes over %i instances",
maker.name,
len(successes),
len(test_data),
)
description = "{0} ({1})".format(mixture, "Sep." if independent else "Sys.")
return (
description,
maker.name,
instances,
len(successes),
numpy.mean(successes),
numpy.median(successes),
)
@borg.annotations(
out_path = ("results CSV output path"),
runs = ("path to JSON runs specification", "positional", None, borg.util.load_json),
repeats = ("number of times to repeat each run", "option", None, int),
workers = ("submit jobs?", "option", "w"),
local = ("workers are local?", "flag"),
)
def main(out_path, runs, repeats = 128, workers = 0, local = False):
"""Simulate portfolio and solver behavior."""
logger.info("simulating %i runs", len(runs))
get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle)
def yield_jobs():
for run in runs:
all_data = get_run_data(run["bundle"])
validation = sklearn.cross_validation.ShuffleSplit(len(all_data), repeats, test_fraction = 0.2, indices = False)
if run["portfolio_name"] == "-":
makers = map(borg.experiments.simulate_runs.SolverMaker, all_data.solver_names)
else:
makers = [borg.experiments.simulate_runs.PortfolioMaker(run["portfolio_name"])]
max_instances = len(all_data) * 0.8
for (train_mask, test_mask) in validation:
for instances in map(int, map(round, numpy.r_[10.0:max_instances:32j])):
for maker in makers:
yield (
simulate_run,
[
run,
maker,
all_data,
train_mask,
test_mask,
instances,
run["independent"],
run["mixture"],
],
)
with borg.util.openz(out_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["description", "solver", "instances", "successes", "mean_time", "median_time"])
for (_, row) in condor.do(yield_jobs(), workers, local):
writer.writerow(row)
out_file.flush()
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/simulate_iid.py",
"copies": "1",
"size": "4571",
"license": "mit",
"hash": 6725562996317923000,
"line_mean": 32.6102941176,
"line_max": 124,
"alpha_frac": 0.5491139794,
"autogenerated": false,
"ratio": 3.900170648464164,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4949284627864164,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import uuid
import numpy
import sklearn
import condor
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def evaluate_split(run_data, alpha, split, train_mask, test_mask):
"""Evaluate a model on a train/test split."""
training = run_data.masked(train_mask).collect_systematic([4])
testing = run_data.masked(test_mask).collect_systematic([4])
model = borg.models.MulEstimator(alpha = alpha)(training, 10, training)
score = numpy.mean(borg.models.run_data_log_probabilities(model, testing))
logger.info(
"score at alpha = %.2f given %i runs from %i instances: %f",
alpha,
training.get_run_count(),
len(training),
score,
)
return [alpha, len(training), split, score]
@borg.annotations(
out_path = ("results output path"),
bundle = ("path to pre-recorded runs", "positional", None, os.path.abspath),
workers = ("submit jobs?", "option", "w", int),
local = ("workers are local?", "flag"),
)
def main(out_path, bundle, workers = 0, local = False):
"""Evaluate the pure multinomial model over a range of smoothing values."""
def yield_jobs():
run_data = borg.storage.RunData.from_bundle(bundle)
validation = sklearn.cross_validation.KFold(len(run_data), 10, indices = False)
for (train_mask, test_mask) in validation:
split = uuid.uuid4()
alphas = numpy.r_[1e-8:1e-1:64j]
for alpha in alphas:
yield (evaluate_split, [run_data, alpha, split, train_mask, test_mask])
with open(out_path, "w") as out_file:
writer = csv.writer(out_file)
writer.writerow(["alpha", "instances", "split", "mean_log_probability"])
for (_, row) in condor.do(yield_jobs(), workers, local):
writer.writerow(row)
out_file.flush()
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/experiments/mul_over_alpha.py",
"copies": "1",
"size": "1991",
"license": "mit",
"hash": -2289466634146313500,
"line_mean": 30.6031746032,
"line_max": 87,
"alpha_frac": 0.6228026118,
"autogenerated": false,
"ratio": 3.4686411149825784,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9475174679121805,
"avg_score": 0.023253809532154716,
"num_lines": 63
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import socket
import condor
logger = condor.log.get_logger(__name__, default_level = "INFO")
class DistributedManager(object):
"""Manage remotely-distributed work."""
def __init__(self, tasks, workers):
"""Initialize."""
self._core = condor.managers.ManagerCore(tasks)
# set up zeromq
import zmq
self._context = zmq.Context()
self._rep_socket = self._context.socket(zmq.REP)
rep_port = self._rep_socket.bind_to_random_port("tcp://*")
logger.debug("communicating on port %i", rep_port)
# launch condor jobs
logger.info("distributing %i tasks to %i workers", len(tasks), workers)
(root_path, self._cluster) = \
condor.raw.submit_condor_workers(
workers,
"tcp://{0}:{1}".format(socket.getfqdn(), rep_port),
)
self._cache = condor.cache.DiskCache(os.path.join(root_path, "cache"))
def manage(self):
"""Manage workers and tasks."""
import zmq
try:
poller = zmq.Poller()
poller.register(self._rep_socket, zmq.POLLIN)
while self._core.unfinished_count() > 0:
events = dict(poller.poll())
assert events.get(self._rep_socket) == zmq.POLLIN
message = condor.recv_pyobj_compressed(self._rep_socket)
(next_task, completed) = self._core.handle(message)
if next_task is None:
reply_message = None
else:
reply_message = condor.messages.TaskMessage(None, next_task, self._cache.fork())
condor.send_pyobj_compressed(self._rep_socket, reply_message)
if completed is not None:
yield completed
except KeyboardInterrupt:
# work around bizarre pyzmq SIGINT behavior
raise
def clean(self):
"""Clean up manager state."""
#condor.raw.condor_rm(self._cluster)
#logger.info("removed condor jobs")
condor.raw.condor_vacate_job(self._cluster)
#logger.info("held condor jobs")
self._rep_socket.close()
self._context.term()
logger.info("terminated zeromq context")
self._cache.delete()
logger.info("cleaned up argument cache")
| {
"repo_name": "borg-project/utcondor",
"path": "condor/managers/distributed.py",
"copies": "1",
"size": "2430",
"license": "mit",
"hash": -8957896402820144000,
"line_mean": 26.3033707865,
"line_max": 100,
"alpha_frac": 0.566255144,
"autogenerated": false,
"ratio": 4.016528925619835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082784069619835,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import sys
import csv
import zlib
import base64
import cPickle as pickle
import numpy
import borg
import borg.distributors
logger = borg.get_logger(__name__, default_level = "INFO")
def run_solver_on(suite_path, solver_name, task_path, budget, store_answers, seed):
"""Run a solver."""
# bring back relevant globals
import os
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
if seed is not None:
borg.statistics.set_prng_seeds(seed)
# run the solver
suite = borg.load_solvers(suite_path)
with suite.domain.task_from_path(task_path) as task:
with borg.accounting() as accountant:
answer = suite.solvers[solver_name](task)(budget)
succeeded = suite.domain.is_final(task, answer)
cost = accountant.total.cpu_seconds
logger.info(
"%s %s in %.2f (of %.2f) on %s",
solver_name,
"succeeded" if succeeded else "failed",
cost,
budget,
os.path.basename(task_path),
)
if store_answers:
return (task_path, solver_name, budget, cost, succeeded, answer)
else:
return (task_path, solver_name, budget, cost, succeeded, None)
@borg.annotations(
suite_path = ("path to the solvers suite", "positional", None, os.path.abspath),
tasks_root = ("path to task files", "positional", None, os.path.abspath),
budget = ("per-instance budget", "positional", None, float),
only_missing = ("only make missing runs", "flag"),
store_answers = ("store answers to instances", "flag"),
only_solver = ("only make runs of one solver", "option"),
runs = ("number of runs", "option", "r", int),
suffix = ("runs file suffix", "option"),
distributor_name = ("name of task distributor", "option"),
workers = ("submit jobs?", "option", "w", int),
)
def main(
suite_path,
tasks_root,
budget,
only_missing = False,
store_answers = False,
only_solver = None,
runs = 4,
suffix = ".runs.csv",
distributor_name = "ipython",
workers = 0,
):
"""Collect solver running-time data."""
def yield_runs():
suite = borg.load_solvers(suite_path)
logger.info("scanning paths under %s", tasks_root)
paths = list(borg.util.files_under(tasks_root, suite.domain.extensions))
if not paths:
raise ValueError("no paths found under specified root")
if only_solver is None:
solver_names = suite.solvers.keys()
else:
solver_names = [only_solver]
for path in paths:
run_data = None
if only_missing and os.path.exists(path + suffix):
run_data = numpy.recfromcsv(path + suffix, usemask = True)
for solver_name in solver_names:
if only_missing and run_data is not None:
count = max(0, runs - numpy.sum(run_data.solver == solver_name))
else:
count = runs
logger.info("scheduling %i run(s) of %s on %s", count, solver_name, os.path.basename(path))
for _ in xrange(count):
seed = numpy.random.randint(sys.maxint)
yield (run_solver_on, [suite_path, solver_name, path, budget, store_answers, seed])
distributor = borg.distributors.make(
distributor_name,
workers=workers)
for row in distributor.do(yield_runs()):
# unpack run outcome
(cnf_path, solver_name, budget, cost, succeeded, answer) = row
if answer is None:
answer_text = None
else:
answer_text = base64.b64encode(zlib.compress(pickle.dumps(answer)))
# write it to disk
csv_path = cnf_path + suffix
existed = os.path.exists(csv_path)
with open(csv_path, "a") as csv_file:
writer = csv.writer(csv_file)
if not existed:
writer.writerow(["solver", "budget", "cost", "succeeded", "answer"])
writer.writerow([solver_name, budget, cost, succeeded, answer_text])
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/run_solvers.py",
"copies": "1",
"size": "4217",
"license": "mit",
"hash": 2119797261524817400,
"line_mean": 29.1214285714,
"line_max": 107,
"alpha_frac": 0.5926013754,
"autogenerated": false,
"ratio": 3.699122807017544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706654706008588,
"avg_score": 0.017013895281790946,
"num_lines": 140
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import tempfile
import resource
import subprocess
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def normalized_claspre_names(raw_names):
"""Convert names from claspre to "absolute" names."""
parent = None
names = []
for raw_name in raw_names:
if raw_name.startswith("_"):
assert parent is not None
names.append(parent + raw_name)
elif len(raw_name) > 0:
names.append(raw_name)
parent = raw_name
return names
def parse_claspre_value(raw_value):
"""Convert values from claspre to floats."""
special = {
"No": -1.0,
"Yes": 1.0,
"NA": 0.0,
}
value = special.get(raw_value)
if value is None:
return float(raw_value)
else:
return value
def get_claspfolio_features_for(asp_path, binaries_path):
"""Invoke claspre to compute features of an ASP instance."""
previous_utime = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime
# get feature names
claspre_path = os.path.join(binaries_path, "claspfolio-0.8.0-x86-linux/clasp+pre-1.3.4")
(names_out, _) = borg.util.check_call_capturing([claspre_path, "--list-features"])
(dynamic_names_out, static_names_out) = names_out.splitlines()
dynamic_names = normalized_claspre_names(dynamic_names_out.split(","))
static_names = normalized_claspre_names(static_names_out.split(","))
# compute feature values
values_command = [
claspre_path,
"--rand-prob=10,30",
"--search-limit=300,10",
"--features=C1",
"--file",
asp_path,
]
num_restarts = 10
logger.info("running %s", values_command)
(values_out, _, _) = borg.util.call_capturing(values_command)
values_per = [map(parse_claspre_value, l.split(",")) for l in values_out.strip().splitlines()]
if len(values_per) < num_restarts + 1:
# claspre failed, or the instance was solved in preprocessing
if len(values_per) == 0:
# (claspre died)
values_per = [[0.0] * len(static_names)]
missing = (num_restarts - len(values_per) + 1)
values_per = values_per[:-1] + ([[0.0] * len(dynamic_names)] * missing) + values_per[-1:]
else:
assert len(values_per) == num_restarts + 1
# pull them together
names = []
values = []
for i in xrange(num_restarts):
names += ["restart{0}-{1}".format(i, n) for n in dynamic_names]
values += values_per[i]
names += static_names
values += values_per[-1]
# ...
cost = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime - previous_utime
borg.get_accountant().charge_cpu(cost)
logger.info("collected features of %s in %.2fs", asp_path, cost)
assert len(names) == len(values)
return (names, values)
def get_lp2sat_features_for(asp_path, binaries_path):
"""Convert to CNF and compute SAT features of an ASP instance."""
with tempfile.NamedTemporaryFile(prefix = "borg.", suffix = ".cnf") as cnf_file:
with open(asp_path, "rb") as asp_file:
try:
borg.domains.asp.run_lp2sat(binaries_path, asp_file, cnf_file)
except borg.domains.asp.LP2SAT_FailedException:
# XXX this workaround is silly; just improve sat.features
cnf_file.seek(0)
cnf_file.truncate(0)
cnf_file.write("p cnf 1 1\n1 0\n")
cnf_file.flush()
return borg.domains.sat.features.get_features_for(cnf_file.name)
def get_features_for(asp_path, binaries_path):
"""Compute features of an ASP instance."""
#(cnf_names, cnf_values) = get_lp2sat_features_for(asp_path, binaries_path)
(clasp_names, clasp_values) = get_claspfolio_features_for(asp_path, binaries_path)
#cnf_qnames = map("cnf-{0}".format, cnf_names)
clasp_qnames = map("clasp-{0}".format, clasp_names)
#return (cnf_qnames + clasp_qnames, cnf_values + clasp_values)
return (clasp_qnames, clasp_values)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/asp/features.py",
"copies": "1",
"size": "4121",
"license": "mit",
"hash": -2385762256279919000,
"line_mean": 29.984962406,
"line_max": 98,
"alpha_frac": 0.6115020626,
"autogenerated": false,
"ratio": 3.2862838915470496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9347962571300172,
"avg_score": 0.009964676569375316,
"num_lines": 133
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import uuid
import subprocess
import condor
import borg
logger = borg.get_logger(__name__, default_level = "DEBUG")
def ground_instance(asp_path, gringo_path, domain_path, ignore_errors, compat):
"""Ground an ASP instance using Gringo."""
# prepare the gringo invocation
(asp_path_base, _) = os.path.splitext(asp_path)
verify_path = "{0}.verify".format(asp_path_base)
command = [gringo_path, domain_path, asp_path]
if compat:
command.append("--compat")
if os.path.exists(verify_path):
command.append(verify_path)
verified = " (and verified)"
else:
verified = ""
logger.debug("running %s", command)
# then ground the instance
lparse_part_path = "{0}.ground.part.{1}".format(asp_path, uuid.uuid4())
lparse_gz_path = "{0}.gz".format(lparse_part_path)
try:
with open(lparse_part_path, "wb") as part_file:
with open("/dev/null", "wb") as null_file:
gringo_status = \
subprocess.call(
command,
stdout = part_file,
stderr = null_file,
)
if gringo_status != 0:
message = "gringo failed to ground {0}".format(asp_path)
if ignore_errors:
logger.warning("%s", message)
return None
else:
raise Exception(message)
else:
logger.info("grounded %s%s", asp_path, verified)
# compress it
with open(lparse_part_path) as part_file:
with borg.util.openz(lparse_gz_path, "wb") as gz_file:
gz_file.write(part_file.read())
# and move it into place
lparse_final_path = "{0}.ground.gz".format(asp_path)
os.rename(lparse_gz_path, lparse_final_path)
finally:
if os.path.exists(lparse_part_path):
os.unlink(lparse_part_path)
if os.path.exists(lparse_gz_path):
os.unlink(lparse_gz_path)
@borg.annotations(
gringo_path = ("path to Gringo", "positional", None, os.path.abspath),
domain_path = ("path to the ASP domain file", "positional", None, os.path.abspath),
root_path = ("instances root directory",),
ignore_errors = ("ignore Gringo errors", "flag"),
skip_existing = ("skip already-grounded instances", "flag"),
compat = ("enable lparse compatibility", "flag"),
workers = ("number of Condor workers", "option", "w", int),
)
def main(
gringo_path,
domain_path,
root_path,
ignore_errors = False,
skip_existing = False,
compat = False,
workers = 0,
):
"""Ground a set of ASP instances using Gringo."""
asp_paths = map(os.path.abspath, borg.util.files_under(root_path, [".asp"]))
def yield_jobs():
for asp_path in asp_paths:
if skip_existing and os.path.exists(asp_path + ".ground.gz"):
continue
yield (ground_instance, [asp_path, gringo_path, domain_path, ignore_errors, compat])
jobs = list(yield_jobs())
logger.info("grounding %i instances", len(jobs))
condor.do_for(jobs, workers)
if __name__ == "__main__":
borg.script(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/ground.py",
"copies": "1",
"size": "3329",
"license": "mit",
"hash": 6305234570006554000,
"line_mean": 29.5412844037,
"line_max": 96,
"alpha_frac": 0.5719435266,
"autogenerated": false,
"ratio": 3.5264830508474576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45984265774474575,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import uuid
import time
import shutil
import signal
import select
import random
import tempfile
import datetime
import multiprocessing
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def random_seed():
"""Return a random solver seed."""
return numpy.random.randint(0, 2**31)
def timed_read(fds, timeout = -1):
"""Read from multiple descriptors with an optional timeout."""
# poll for I/O events
polling = select.poll()
for fd in fds:
polling.register(fd, select.POLLIN)
changed = dict(polling.poll(timeout * 1000))
# and interpret them
def make_read(fd):
revents = changed.get(fd, 0)
if revents & select.POLLIN:
return os.read(fd, 65536)
elif revents & select.POLLHUP:
return ""
else:
return None
return map(make_read, fds)
class SolverProcess(multiprocessing.Process):
"""Attempt to solve the task in a subprocess."""
def __init__(self, parse_output, arguments, stm_queue, mts_queue, solver_id, tmpdir, cwd):
self._parse_output = parse_output
self._arguments = arguments
self._stm_queue = stm_queue
self._mts_queue = mts_queue
self._solver_id = solver_id
self._tmpdir = tmpdir
self._seed = random_seed()
self._popened = None
self._cwd = cwd
if self._cwd is None:
logger.info("running %s", arguments)
else:
logger.info("running %s under %s", arguments, cwd)
multiprocessing.Process.__init__(self)
def run(self):
numpy.random.seed(self._seed)
random.seed(numpy.random.randint(2**31))
try:
class DeathRequestedError(Exception):
pass
try:
def handle_sigusr1(number, frame):
raise DeathRequestedError()
try:
signal.signal(signal.SIGUSR1, handle_sigusr1)
self.handle_subsolver()
finally:
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
except DeathRequestedError:
pass
except Exception, error:
self._stm_queue.put(error)
except KeyboardInterrupt:
pass
finally:
if self._popened is not None:
self._popened.kill()
os.kill(self._popened.pid, signal.SIGCONT)
self._popened.wait()
shutil.rmtree(self._tmpdir, ignore_errors = True)
def handle_subsolver(self):
# spawn solver
limit = 0.0
stdout = ""
expenditure = datetime.timedelta(seconds = limit)
last_expenditure = expenditure
last_audit = time.time() - 1.0
while limit == 0.0 or self._popened is not None:
if expenditure >= datetime.timedelta(seconds = limit):
if self._popened is not None:
os.kill(popened.pid, signal.SIGSTOP)
run_cost = borg.util.seconds(expenditure - last_expenditure)
self._stm_queue.put((self._solver_id, run_cost, None, False))
additional = self._mts_queue.get()
limit += additional
last_expenditure = expenditure
if self._popened is None:
popened = borg.unix.sessions.spawn_pipe_session(self._arguments, cwd = self._cwd)
self._popened = popened
descriptors = [popened.stdout.fileno(), popened.stderr.fileno()]
accountant = borg.unix.accounting.SessionTimeAccountant(popened.pid)
else:
os.kill(popened.pid, signal.SIGCONT)
# spend some time waiting for output
(chunk, _) = timed_read(descriptors, 1.0)
if time.time() - last_audit > borg.defaults.proc_poll_period:
accountant.audit()
expenditure = accountant.total
last_audit = time.time()
# check for termination
if chunk == "":
self._popened = None
elif chunk is not None:
stdout += chunk
# provide the outcome to the central planner
answer = self._parse_output(stdout)
run_cost = borg.util.seconds(expenditure - last_expenditure)
self._stm_queue.put((self._solver_id, run_cost, answer, True))
def prepare(command, root, cnf_path, tmpdir):
"""Format command for execution."""
keywords = {
"root": root,
"task": cnf_path,
"seed": random_seed(),
"tmpdir": tmpdir,
}
return [s.format(**keywords) for s in command]
class RunningSolver(object):
"""In-progress solver process."""
def __init__(
self,
parse,
command,
root,
task_path,
stm_queue = None,
solver_id = None,
cwd = None,
):
"""Initialize."""
if stm_queue is None:
self._stm_queue = multiprocessing.Queue()
else:
self._stm_queue = stm_queue
if solver_id is None:
self._solver_id = uuid.uuid4()
else:
self._solver_id = solver_id
self._mts_queue = multiprocessing.Queue()
self._tmpdir = tempfile.mkdtemp(prefix = "borg.")
self._process = \
SolverProcess(
parse,
prepare(command, root, task_path, self._tmpdir),
self._stm_queue,
self._mts_queue,
self._solver_id,
self._tmpdir,
cwd,
)
def __call__(self, budget):
"""Unpause the solver, block for some limit, and terminate it."""
self.unpause_for(budget)
response = self._stm_queue.get()
if isinstance(response, Exception):
raise response
else:
(solver_id, run_cpu_cost, answer, terminated) = response
assert solver_id == self._solver_id
self.stop()
borg.get_accountant().charge_cpu(run_cpu_cost)
return answer
def unpause_for(self, budget):
"""Unpause the solver for the specified duration."""
if not self._process.is_alive():
self._process.start()
self._mts_queue.put(budget)
def stop(self):
"""Terminate the solver."""
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGUSR1)
self._process.join()
shutil.rmtree(self._tmpdir, ignore_errors = True)
class RunningPortfolio(object):
"""Portfolio running on a task."""
def __init__(self, portfolio, suite, task):
"""Initialize."""
self.portfolio = portfolio
self.suite = suite
self.task = task
def run_then_stop(self, budget):
"""Attempt to solve the associated task."""
return self.portfolio(self.task, self.suite, borg.Cost(cpu_seconds = budget))
class RunningPortfolioFactory(object):
"""Run a portfolio on tasks."""
def __init__(self, portfolio, suite):
"""Initialize."""
self.portfolio = portfolio
self.suite = suite
def start(self, task):
"""Return an instance of this portfolio running on the task."""
return RunningPortfolio(self.portfolio, self.suite, task)
class EmptySolver(object):
"""Immediately return the specified answer."""
def __init__(self, answer):
self._answer = answer
def __call__(self, budget):
return self._answer
def unpause_for(self, budget):
pass
def stop(self):
pass
| {
"repo_name": "borg-project/borg",
"path": "borg/solver_io.py",
"copies": "1",
"size": "7800",
"license": "mit",
"hash": -7232422010866268000,
"line_mean": 26.4647887324,
"line_max": 101,
"alpha_frac": 0.5553846154,
"autogenerated": false,
"ratio": 4.109589041095891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516497365649589,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
import condor.work
if __name__ == "__main__":
plac.call(condor.work.main)
import sys
import imp
import traceback
import zmq
import condor
logger = condor.log.get_logger(__name__, default_level = "NOTSET")
def work_once(condor_id, req_socket, task_message):
"""Request and/or complete a single unit of work."""
# get an assignment
poller = zmq.Poller()
poller.register(req_socket, zmq.POLLIN)
if task_message is None:
condor.send_pyobj_compressed(
req_socket,
condor.messages.ApplyMessage(condor_id),
)
polled = poller.poll(timeout = 60 * 1000)
if dict(polled).get(req_socket, 0) & zmq.POLLIN:
task_message = condor.recv_pyobj_compressed(req_socket)
else:
task_message = None
if task_message is None:
logger.info("received null assignment or timed out; terminating")
return None
task = task_message.get_task()
# complete the assignment
try:
logger.info("starting work on task %s", task.key)
result = task()
except KeyboardInterrupt, error:
logger.warning("interruption during task %s", task.key)
condor.send_pyobj_compressed(
req_socket,
condor.messages.InterruptedMessage(condor_id, task.key),
)
req_socket.recv()
except BaseException, error:
description = traceback.format_exc(error)
logger.warning("error during task %s:\n%s", task.key, description)
condor.send_pyobj_compressed(
req_socket,
condor.messages.ErrorMessage(condor_id, task.key, description),
)
req_socket.recv()
else:
logger.info("finished task %s", task.key)
condor.send_pyobj_compressed(
req_socket,
condor.messages.DoneMessage(condor_id, task.key, result),
)
return condor.recv_pyobj_compressed(req_socket)
return None
def work_loop(condor_id, req_socket):
"""Repeatedly request and complete units of work."""
task_message = None
while True:
try:
task_message = work_once(condor_id, req_socket, task_message)
except Exception:
raise
if task_message is None:
break
@plac.annotations(
req_address = ("zeromq address of master"),
condor_id = ("condor process specifier"),
main_path = ("path to module that replaces __main__"),
)
def main(req_address, condor_id, main_path = None):
"""Do arbitrary distributed work."""
condor.log.enable_default_logging()
# replace the __main__ module, if necessary
if main_path is not None:
sys.modules["__old_main__"] = sys.modules["__main__"]
sys.modules["__main__"] = imp.load_source("__new_main__", main_path)
# connect to the work server
logger.info("connecting to %s", req_address)
context = zmq.Context()
req_socket = context.socket(zmq.REQ)
req_socket.setsockopt(zmq.LINGER, 60 * 1000)
req_socket.connect(req_address)
# enter the work loop
try:
work_loop(condor_id, req_socket)
finally:
logger.debug("flushing sockets and terminating zeromq context")
req_socket.close()
context.term()
logger.debug("zeromq cleanup complete")
| {
"repo_name": "borg-project/utcondor",
"path": "condor/work.py",
"copies": "1",
"size": "3413",
"license": "mit",
"hash": 712505623287191200,
"line_mean": 25.0534351145,
"line_max": 77,
"alpha_frac": 0.6120714914,
"autogenerated": false,
"ratio": 3.7505494505494505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48626209419494504,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
import cPickle as pickle
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
@plac.annotations(
out_path = ("path to store solver"),
portfolio_name = ("name of the portfolio to train"),
solvers_path = ("path to the solvers bundle"),
suffix = ("runs file suffix", "option"),
tasks_roots = ("paths to training task directories"),
)
def main(out_path, portfolio_name, solvers_path, suffix = ".runs.csv", *tasks_roots):
"""Train a solver."""
borg.enable_default_logging()
# load the solvers bundle
bundle = borg.load_solvers(solvers_path)
# train the portfolio
training = borg.storage.TrainingData.from_roots(tasks_roots, bundle.domain, suffix = suffix)
portfolio = borg.portfolios.named[portfolio_name](bundle, training, 100.0, 60) # XXX
logger.info("portfolio training complete")
# write it to disk
with open(out_path, "w") as out_file:
pickle.dump(portfolio, out_file, protocol = -1)
logger.info("portfolio written to %s", out_path)
if __name__ == "__main__":
plac.call(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/train.py",
"copies": "1",
"size": "1151",
"license": "mit",
"hash": 7766794975929679000,
"line_mean": 29.2894736842,
"line_max": 96,
"alpha_frac": 0.6629018245,
"autogenerated": false,
"ratio": 3.2792022792022792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4442104103702279,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
import sys
import logging
import cPickle as pickle
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class CompetitionFormatter(logging.Formatter):
"""A concise log formatter for output during competition."""
def __init__(self):
logging.Formatter.__init__(self, "%(levelname)s: %(message)s", "%y%m%d%H%M%S")
def format(self, record):
"""Format the log record."""
raw = logging.Formatter.format(self, record)
def yield_lines():
lines = raw.splitlines()
indent = "c " + " " * (len(record.levelname) + 2)
yield "c " + lines[0]
for line in lines[1:]:
yield indent + line
return "\n".join(yield_lines())
def enable_output():
"""Set up competition-compliant output."""
# configure the default global level
borg.get_logger(level = borg.defaults.root_log_level)
# set up output
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(CompetitionFormatter())
handler.setLevel(logging.NOTSET)
logging.root.addHandler(handler)
@plac.annotations(
model_path = ("path to trained model pickle"),
solvers_path = ("path to solvers bundle"),
input_path = ("path to instance"),
seed = ("PRNG seed", "option", None, int),
budget = ("time limit (CPU or wall)", "option", None, float),
cores = ("units of execution", "option", None, int),
speed = ("machine calibration ratio", "option", "s", float),
quiet = ("be less noisy", "flag", "q"),
)
def main(
model_path,
solvers_path,
input_path,
seed = 42,
budget = 3600.0,
cores = 1,
speed = borg.defaults.machine_speed,
quiet = False
):
"""Solve a problem instance."""
# XXX hackish
borg.defaults.machine_speed = speed
try:
# general setup
enable_output()
if not quiet:
borg.get_logger("borg.solvers", level = "DETAIL")
borg.statistics.set_prng_seeds(seed)
# run the solver
bundle = borg.load_solvers(solvers_path)
logger.info("loaded portfolio model from %s", model_path)
with open(model_path) as file:
portfolio = pickle.load(file)
logger.info("solving %s", input_path)
with bundle.domain.task_from_path(input_path) as task:
remaining = budget - borg.get_accountant().total.cpu_seconds
answer = portfolio(task, bundle, borg.Cost(cpu_seconds = remaining), cores)
return bundle.domain.show_answer(task, answer)
except KeyboardInterrupt:
print "\nc terminating on SIGINT"
if __name__ == "__main__":
plac.call(main)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/solve.py",
"copies": "1",
"size": "2758",
"license": "mit",
"hash": 8659637850224930000,
"line_mean": 26.3069306931,
"line_max": 87,
"alpha_frac": 0.6069615664,
"autogenerated": false,
"ratio": 3.6289473684210525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47359089348210526,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
if __name__ == "__main__":
from borg_explorer.tools.view_fit import main
plac.call(main)
import os.path
import json
import cPickle as pickle
import tarfile
import cStringIO as StringIO
import numpy
import rpy2.robjects
import rpy2.robjects.packages
from rpy2.robjects.numpy2ri import numpy2ri
import cargo
import borg
logger = cargo.get_logger(__name__, default_level = "INFO")
class CategoryData(object):
"""Data for a category."""
def fit(self, runs_path, budget_interval, budget_count):
"""Fit data for a category."""
# load the runs data
logger.info("loading data from %s", runs_path)
runs = numpy.recfromcsv(runs_path, usemask = True).tolist()
# build the indices
solver_index = {}
instance_index = {}
for (_, instance, _, _, _, solver_name, _) in runs:
instance_name = os.path.basename(instance)
if instance_name not in instance_index:
instance_index[instance_name] = len(instance_index)
if solver_name not in solver_index:
solver_index[solver_name] = len(solver_index)
S = len(solver_index)
N = len(instance_index)
B = budget_count
self.solvers = sorted(solver_index, key = lambda k: solver_index[k])
self.instances = sorted(instance_index, key = lambda k: instance_index[k])
# build the matrices
budgets = [b * budget_interval for b in xrange(1, B + 1)]
max_cost = budget_interval * budget_count
attempts = numpy.zeros((N, S))
costs = numpy.zeros((N, S))
successes = numpy.zeros((N, S))
answers = numpy.zeros((N, S))
binned_successes = numpy.zeros((N, S, B))
for (_, instance, answer, cost, _, solver_name, _) in runs:
s = solver_index[solver_name]
n = instance_index[os.path.basename(instance)]
if attempts[n, s] == 0.0: # XXX support multiple runs
attempts[n, s] = 1.0
costs[n, s] = cost
if cost <= max_cost and not (answer.startswith("UNKNOWN") or answer == "SIGNAL"):
b = numpy.digitize([cost], budgets)
successes[n, s] += 1.0
binned_successes[n, s, b] += 1.0
if answer == "SAT":
answers[n, s] = 1.0
elif answer == "UNSAT":
answers[n, s] = -1.0
else:
raise RuntimeError("unrecognized answer {0}".format(answer))
# fit the model
self.model = borg.models.BilevelMultinomialModel(binned_successes, attempts)
# build the mean-cost table
self.table = []
for n in xrange(N):
task_runs_list = []
for s in xrange(S):
if answers[n, s] == 0.0:
answer = None
elif answers[n, s] == 1.0:
answer = True
else:
answer = False
task_runs_list.append({
"solver": self.solvers[s],
"cost": costs[n, s],
"answer": answer
})
self.table.append({
"instance": self.instances[n],
"runs": task_runs_list,
})
# generate cluster projection
self.similarity_NN = numpy.empty((N, N))
for m in xrange(N):
for n in xrange(N):
rm_SK = numpy.sum(self.model._tclass_res_LN[:, m][:, None, None] * self.model._tclass_LSK, axis = 0)
rn_SK = numpy.sum(self.model._tclass_res_LN[:, n][:, None, None] * self.model._tclass_LSK, axis = 0)
self.similarity_NN[m, n] = numpy.sum(rm_SK * numpy.log(rm_SK / rn_SK))
self.projection_N2 = numpy.array(rpy2.robjects.r["cmdscale"](numpy2ri(1.0 - self.similarity_NN)))
return self
class ViewData(object):
"""All data required for visualization."""
def __init__(self, relative_to, setup):
"""Initialize."""
self.setup = setup
# archive the data directory
logger.info("archiving data as tarball")
data_file = StringIO.StringIO()
data_tar = tarfile.open(None, "w:gz", data_file)
data_tar.add(relative_to)
data_tar.close()
self.data_archive = data_file.getvalue()
# fit a model to each category
categories = {}
for category in setup["categories"]:
path = os.path.join(relative_to, category["file"])
categories[category["name"]] = \
CategoryData().fit(
path,
setup["budget_interval"],
setup["budget_count"],
)
self.categories = categories
@plac.annotations(
out_path = ("path to write model(s)"),
setup_path = ("path to configuration"),
)
def main(out_path, setup_path):
"""Prepare to visualize run data."""
cargo.enable_default_logging()
# build the configuration
with open(setup_path) as setup_file:
setup = json.load(setup_file)
view = ViewData(os.path.dirname(setup_path), setup)
# write it to disk
logger.info("writing visualization data to %s", out_path)
with open(out_path, "w") as out_file:
pickle.dump(view, out_file)
| {
"repo_name": "borg-project/borg-explorer",
"path": "src/python/borg_explorer/tools/view_fit.py",
"copies": "1",
"size": "5498",
"license": "mit",
"hash": -4274125806278486500,
"line_mean": 29.5444444444,
"line_max": 116,
"alpha_frac": 0.5383775919,
"autogenerated": false,
"ratio": 3.8022130013831257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48405905932831256,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
if __name__ == "__main__":
from borg_explorer.tools.view_write import main
plac.call(main)
import os.path
import csv
import json
import cPickle as pickle
import distutils.dir_util
import numpy
import jinja2
import cargo
import borg
import borg_explorer
logger = cargo.get_logger(__name__, default_level = "INFO")
def sanitize(name):
return name.replace("/", "_")
def write_category(root_path, name, category):
# write data files
sanitized = sanitize(name)
data_path = os.path.join(root_path, "data", sanitized)
logger.info("writing %s files to %s", name, data_path)
if not os.path.exists(data_path):
os.makedirs(data_path)
with open(os.path.join(data_path, "runs.json"), "w") as output_file:
json.dump(category.table, output_file)
with open(os.path.join(data_path, "solvers.json"), "w") as output_file:
json.dump(category.solvers, output_file)
with open(os.path.join(data_path, "instances.json"), "w") as output_file:
json.dump(category.instances, output_file)
with open(os.path.join(data_path, "membership.json"), "w") as output_file:
json.dump(category.model._tclass_res_LN.T.tolist(), output_file)
with open(os.path.join(data_path, "projection.json"), "w") as output_file:
json.dump(category.projection_N2.tolist(), output_file)
# reify URLs to the filesystem
logger.info("reifying interface URLs")
def reify_ui_path(*components):
pseudo_path = os.path.join(root_path, "ui", sanitized, *components)
if not os.path.exists(pseudo_path):
os.makedirs(pseudo_path)
os.symlink(
os.path.join(root_path, "index.html"),
os.path.join(pseudo_path, "index.html"),
)
reify_ui_path()
reify_ui_path("table")
reify_ui_path("cluster")
reify_ui_path("projection")
@plac.annotations(
out_path = ("path to write visualization"),
fit_path = ("path to visualization data"),
)
def main(out_path, fit_path):
"""Visualize model parameters."""
cargo.enable_default_logging()
# copy over static content
static_path = os.path.join(borg_explorer.__path__[0], "static")
distutils.dir_util.copy_tree(static_path, out_path)
# load the model(s)
logger.info("loading visualization data from %s", fit_path)
with open(fit_path) as fit_file:
fit = pickle.load(fit_file)
# write data directories
logger.info("writing inputs archive")
with open(os.path.join(out_path, "inputs.tar.gz"), "w") as archive_file:
archive_file.write(fit.data_archive)
for (name, category) in fit.categories.items():
write_category(out_path, name, category)
# generate the visualization
loader = jinja2.PackageLoader("borg_explorer", "templates")
environment = jinja2.Environment(loader = loader)
def write_rendered(template_name, output_name, **kwargs):
template = environment.get_template(template_name)
with open(os.path.join(out_path, output_name), "w") as output_file:
output_file.write(template.render(**kwargs).encode("utf-8"))
write_rendered("index.html", "index.html", base_url = fit.setup["base_url"])
write_rendered("borgview.js", "borgview.js", base_url = fit.setup["base_url"])
write_rendered("borgview.css", "borgview.css")
write_rendered("analytics.js", "analytics.js")
with open(os.path.join(out_path, "categories.json"), "w") as output_file:
category_list = [{"name": k, "path": sanitize(k)} for k in fit.categories.keys()]
json.dump(category_list, output_file)
| {
"repo_name": "borg-project/borg-explorer",
"path": "src/python/borg_explorer/tools/view_write.py",
"copies": "1",
"size": "3699",
"license": "mit",
"hash": 12901482045695892,
"line_mean": 30.6153846154,
"line_max": 89,
"alpha_frac": 0.6526088132,
"autogenerated": false,
"ratio": 3.317488789237668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411858665271863,
"avg_score": 0.011647787433161104,
"num_lines": 117
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
if __name__ == "__main__":
from borg.tools.armada import main
plac.call(main)
import sys
import random
import logging
import cPickle as pickle
import numpy
import cargo
import borg
logger = cargo.get_logger(__name__, default_level = "INFO")
@plac.annotations(
in_path = ("path to instance"),
seed = ("PRNG seed", "option", None, int),
workers = ("units of execution", "option", None, int),
)
def main(in_path, seed = 42, budget = 2e6, workers = 1):
"""Solve a problem instance with an armada."""
# general setup
numpy.random.seed(seed)
random.seed(numpy.random.randint(2**31))
# parse the instance
with open(in_path) as in_file:
cnf = borg.domains.sat.dimacs.parse_cnf(in_file)
# generate splits
nbits = int(math.ceil(math.log(workers)))
bits = sorted(xrange(cnf.N), key = lambda: random.random())[:nbits]
for bit in bits:
for polarity in [-1, 1]:
clauses = cnf.clauses + [[polarity * (bit + 1)]]
subcnf = borg.domains.sat.dimacs.DIMACS_GraphFile([], clauses, cnf.N)
| {
"repo_name": "borg-project/borg",
"path": "borg/tools/armada.py",
"copies": "1",
"size": "1148",
"license": "mit",
"hash": -3209167832984324600,
"line_mean": 25.0909090909,
"line_max": 81,
"alpha_frac": 0.631533101,
"autogenerated": false,
"ratio": 3.162534435261708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4294067536261708,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
if __name__ == "__main__":
from cargo.tools.labor.work2 import main
plac.call(main)
import numpy
import random
import traceback
import zmq
import cargo
logger = cargo.get_logger(__name__, level = "NOTSET")
def work_once(condor_id, req_socket, task):
"""Request and/or complete a single unit of work."""
# get an assignment
if task is None:
cargo.send_pyobj_gz(
req_socket,
cargo.labor2.ApplyMessage(condor_id),
)
task = cargo.recv_pyobj_gz(req_socket)
if task is None:
logger.info("received null assignment; terminating")
return None
# complete the assignment
try:
seed = abs(hash(task.key))
logger.info("setting PRNG seed to %s", seed)
numpy.random.seed(seed)
random.seed(numpy.random.randint(2**32))
logger.info("starting work on task %s", task.key)
result = task()
except KeyboardInterrupt, error:
logger.warning("interruption during task %s", task.key)
cargo.send_pyobj_gz(
req_socket,
cargo.labor2.InterruptedMessage(condor_id, task.key),
)
req_socket.recv()
except BaseException, error:
description = traceback.format_exc(error)
logger.warning("error during task %s:\n%s", task.key, description)
cargo.send_pyobj_gz(
req_socket,
cargo.labor2.ErrorMessage(condor_id, task.key, description),
)
req_socket.recv()
else:
logger.info("finished task %s", task.key)
cargo.send_pyobj_gz(
req_socket,
cargo.labor2.DoneMessage(condor_id, task.key, result),
)
return cargo.recv_pyobj_gz(req_socket)
return None
def work_loop(condor_id, req_socket):
"""Repeatedly request and complete units of work."""
task = None
while True:
try:
task = work_once(condor_id, req_socket, task)
except Exception:
raise
if task is None:
break
@plac.annotations(
req_address = ("zeromq address of master"),
condor_id = ("condor process specifier"),
)
def main(req_address, condor_id):
"""Do arbitrary distributed work."""
cargo.enable_default_logging()
# connect to the work server
logger.info("connecting to %s", req_address)
context = zmq.Context()
req_socket = context.socket(zmq.REQ)
req_socket.connect(req_address)
# enter the work loop
try:
work_loop(condor_id, req_socket)
finally:
logger.info("flushing sockets and terminating zeromq context")
req_socket.close()
context.term()
logger.info("zeromq cleanup complete")
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/tools/labor/work2.py",
"copies": "1",
"size": "2831",
"license": "mit",
"hash": 5725056930277920000,
"line_mean": 22.3966942149,
"line_max": 74,
"alpha_frac": 0.5990815966,
"autogenerated": false,
"ratio": 3.6718547341115433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9719246924399616,
"avg_score": 0.0103378812623853,
"num_lines": 121
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import plac
if __name__ == "__main__":
from cargo.tools.triggered import main
plac.call(main)
import subprocess
import pyinotify
import cargo
logger = cargo.get_logger(__name__, level = "NOTSET")
class TriggerHandler(pyinotify.ProcessEvent):
"""
Handle relevant filesystem events.
"""
def process_default(self, event):
"""
Handle a filesystem event.
"""
logger.info("filesystem event: %s", event)
def execute_command(command, tmux, window):
"""
Execute the triggered command.
"""
logger.info("executing triggered command")
if tmux:
if window is None:
subprocess.call(["tmux", "-q", "display-message", "command active"])
else:
subprocess.call(["tmux", "-q", "setw", "-t", str(window), "window-status-bg", "green"])
status = subprocess.call(command)
if tmux:
if window is None:
if status == 0:
message = "command successful"
else:
message = "command failed"
subprocess.call(["tmux", "-q", "display-message", message])
else:
if status == 0:
color = "default"
else:
color = "red"
subprocess.call(["tmux", "-q", "setw", "-t", str(window), "window-status-bg", color])
@plac.annotations(
path = ("path to watch"),
executable = ("triggered command"),
arguments = ("command arguments"),
no_tmux = ("disable tmux interaction", "option"),
window = ("tmux window number", "option"),
timeout = ("wait; coalesce events", "option", "t", int),
)
def main(path, executable, no_tmux = False, timeout = 250, window = None, *arguments):
"""
Run something in response to changes in a directory.
"""
# enable logging
cargo.enable_default_logging()
# prepare the notification framework
command = [executable] + list(arguments)
manager = pyinotify.WatchManager()
handler = TriggerHandler()
notifier = pyinotify.Notifier(manager, handler)
manager.add_watch(
path,
pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY,
rec = True,
)
# watch for and respond to events
try:
while True:
triggered = notifier.check_events()
if triggered:
# coalesce events
notifier.read_events()
notifier.process_events()
if timeout is not None:
while notifier.check_events(timeout = timeout):
notifier.read_events()
notifier.process_events()
# run the command
execute_command(command, not no_tmux, window)
finally:
if not no_tmux:
subprocess.call(["tmux", "-q", "setw", "-t", str(window), "window-status-bg", "default"])
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/tools/triggered.py",
"copies": "1",
"size": "2975",
"license": "mit",
"hash": 8037923821020988000,
"line_mean": 26.8037383178,
"line_max": 101,
"alpha_frac": 0.5613445378,
"autogenerated": false,
"ratio": 4.075342465753424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136687003553424,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import random
import cPickle as pickle
import condor
from . import log
logger = condor.log.get_logger(__name__, level = "INFO")
from . import defaults
from . import raw
from . import cache
from . import managers
from . import messages
try:
import snappy
except ImportError:
import zlib
logger.debug("using zlib for compression")
compress = lambda data: zlib.compress(data, 1)
decompress = zlib.decompress
compression_extension = "gz"
else:
logger.debug("using snappy for compression")
compress = snappy.compress
decompress = snappy.decompress
compression_extension = "snappy"
def send_pyobj_compressed(zmq_socket, message):
pickled = pickle.dumps(message, protocol = -1)
compressed = condor.compress(pickled)
zmq_socket.send(compressed)
def recv_pyobj_compressed(zmq_socket):
compressed = zmq_socket.recv()
decompressed = condor.decompress(compressed)
unpickled = pickle.loads(decompressed)
return unpickled
def do(requests, workers, local = False):
"""Do work remotely or locally; yield result pairs."""
tasks = sorted(map(condor.managers.Task.from_request, requests), key = lambda _: random.random())
if workers == "auto":
workers = min(256, len(tasks))
else:
workers = int(workers)
if workers > 0:
if local:
manager = condor.managers.ParallelManager(tasks, workers)
else:
manager = condor.managers.DistributedManager(tasks, workers)
else:
manager = condor.managers.SerialManager(tasks)
try:
for item in manager.manage():
yield item
finally:
manager.clean()
def do_for(requests, workers, handler = lambda _, x: x, local = False):
"""Do work remotely or locally; apply handler to result pairs."""
for (task, result) in do(requests, workers, local = local):
handler(task, result)
| {
"repo_name": "borg-project/utcondor",
"path": "condor/__init__.py",
"copies": "1",
"size": "1962",
"license": "mit",
"hash": 7856821494570038000,
"line_mean": 24.8157894737,
"line_max": 101,
"alpha_frac": 0.6712538226,
"autogenerated": false,
"ratio": 3.908366533864542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079620356464541,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import re
import borg
logger = borg.get_logger(__name__)
def parse_sat_output(stdout):
"""Parse a solver's standard competition-format output."""
match = re.search(r"^s +(.+)$", stdout, re.M)
if match:
(answer_type,) = map(str.upper, match.groups())
if answer_type == "SATISFIABLE":
answer = []
for line in re.findall(r"^v ([ \-0-9]*)$", stdout, re.M):
answer.extend(map(int, line.split()))
if answer[-1] == 0:
return answer[:-1]
elif answer_type == "UNSATISFIABLE":
return False
return None
class SAT_SolverFactory(object):
"""Construct a basic competition solver callable."""
def __init__(self, root, command, library_paths = ()):
"""Initialize."""
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_sat_output,
self._command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/sat/solvers.py",
"copies": "1",
"size": "1254",
"license": "mit",
"hash": -3326308309528993300,
"line_mean": 25.125,
"line_max": 69,
"alpha_frac": 0.5183413078,
"autogenerated": false,
"ratio": 3.710059171597633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4728400479397633,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import re
import os
import os.path
import sys
import pipes
import datetime
import cStringIO as StringIO
import subprocess
import condor
logger = condor.log.get_logger(__name__, level = "INFO")
def call_capturing(arguments, input = None, preexec_fn = None):
"""Spawn a process and return its output and status code."""
popened = None
try:
# launch the subprocess
popened = \
subprocess.Popen(
arguments,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
preexec_fn = preexec_fn,
)
# wait for its natural death
(stdout, stderr) = popened.communicate(input)
except:
#raised = Raised()
if popened is not None and popened.poll() is None:
#try:
popened.kill()
popened.wait()
#except:
#Raised().print_ignored()
#raised.re_raise()
else:
return (stdout, stderr, popened.returncode)
def check_call_capturing(arguments, input = None, preexec_fn = None):
"""Spawn a process and return its output."""
(stdout, stderr, code) = call_capturing(arguments, input, preexec_fn)
if code == 0:
return (stdout, stderr)
else:
from subprocess import CalledProcessError
error = CalledProcessError(code, arguments)
error.stdout = stdout
error.stderr = stderr
raise error
class CondorSubmission(object):
"""Stream output to a Condor submission file."""
def __init__(self):
self._out = StringIO.StringIO()
def blank(self, lines = 1):
"""Write a blank line or many."""
for i in xrange(lines):
self._out.write("\n")
return self
def pair(self, name, value):
"""Write a variable assignment line."""
self._out.write("%s = %s\n" % (name, value))
return self
def pairs(self, **kwargs):
"""Write a block of variable assignment lines."""
self.pairs_dict(kwargs)
return self
def pairs_dict(self, pairs):
"""Write a block of variable assignment lines."""
max_len = max(len(k) for (k, _) in pairs.iteritems())
for (name, value) in pairs.iteritems():
self.pair(name.ljust(max_len), value)
return self
def environment(self, **kwargs):
"""Write an environment assignment line."""
self._out.write("environment = \\\n")
pairs = sorted(kwargs.items(), key = lambda (k, _): k)
for (i, (key, value)) in enumerate(pairs):
self._out.write(" %s=%s;" % (key, value))
if i < len(pairs) - 1:
self._out.write(" \\")
self._out.write("\n")
return self
def header(self, header):
"""Write commented header text."""
dashes = "-" * len(header)
self.comment(dashes)
self.comment(header.upper())
self.comment(dashes)
return self
def comment(self, comment):
"""Write a line of commented text."""
self._out.write("# %s\n" % comment)
return self
def queue(self, count):
"""Write a queue instruction."""
self._out.write("Queue %i\n" % count)
return self
@property
def contents(self):
"""The raw contents of the file."""
return self._out.getvalue()
def condor_submit(submit_path):
"""Submit to condor; return the cluster number."""
(stdout, stderr) = check_call_capturing(["/usr/bin/env", "condor_submit", submit_path])
expression = r"(\d+) job\(s\) submitted to cluster (\d+)\."
match = re.match(expression , stdout.splitlines()[-1])
if match:
(jobs, cluster) = map(int, match.groups())
logger.info("submitted %i condor jobs as group %i", jobs, cluster)
return cluster
else:
raise RuntimeError("failed to submit to condor:%s" % stdout)
def condor_rm(specifier):
"""Kill condor job(s)."""
logger.info("killing condor jobs matched by %s", specifier)
try:
check_call_capturing(["condor_rm", str(specifier)])
except subprocess.CalledProcessError:
return False
else:
return True
def condor_hold(*specifiers):
"""Hold condor job(s)."""
logger.info("holding condor job(s) matched by %s", specifiers)
try:
check_call_capturing(["condor_hold"] + map(str, specifiers))
except subprocess.CalledProcessError:
return False
else:
return True
def condor_release(specifiers):
"""Release condor job(s)."""
logger.info("releasing condor job(s) matched by %s", specifiers)
try:
check_call_capturing(["condor_release"] + map(str, specifiers))
except subprocess.CalledProcessError:
return False
else:
return True
def condor_vacate_job(specifier):
"""Bump condor job(s)."""
logger.info("vacating condor jobs matched by %s", specifier)
try:
check_call_capturing(["condor_vacate_job", str(specifier)])
except subprocess.CalledProcessError:
return False
else:
return True
def unclaimed(constraint = None):
"""Return the number of unclaimed nodes matching constraint."""
command = ["condor_status"]
if constraint is not None:
command += ["-constraint", constraint]
(stdout, stderr) = check_call_capturing(command)
parts = stdout.splitlines()[-1].split()
if len(parts) != 8:
raise Exception("unable to parse condor_status output")
return int(parts[4])
def default_condor_home():
return os.path.join(
condor.defaults.home,
datetime.datetime.now().replace(microsecond = 0).isoformat())
def submit_condor_workers(
workers,
req_address,
matching = None,
description = "distributed Python worker process(es)",
group = "GRAD",
project = "AI_ROBOTICS",
condor_home = default_condor_home(),
):
# defaults
if matching is None:
matching = condor.defaults.condor_matching
# prepare the working directories
working_paths = [os.path.join(condor_home, "%i" % i) for i in xrange(workers)]
for working_path in working_paths:
os.makedirs(working_path)
# provide a convenience symlink
link_path = "workers-latest"
if os.path.lexists(link_path):
os.unlink(link_path)
os.symlink(condor_home, link_path)
# write the submit file, starting with job matching
submit = CondorSubmission()
if matching:
submit \
.header("matching") \
.blank() \
.pair("requirements", matching) \
.blank()
# write the general condor section
submit \
.header("configuration") \
.blank() \
.pairs_dict({
"+Group": "\"%s\"" % group,
"+Project": "\"%s\"" % project,
"+ProjectDescription": "\"%s\"" % description,
}) \
.blank() \
.pairs(
universe = "vanilla",
notification = "Error",
kill_sig = "SIGINT",
Log = "condor.log",
Error = "condor.err",
Output = "condor.out",
Input = "/dev/null",
Executable = os.environ.get("SHELL"),
) \
.blank() \
.environment(
PATH = os.environ.get("PATH", ""),
PYTHONPATH = os.environ.get("PYTHONPATH", ""),
LD_LIBRARY_PATH = os.environ.get("LD_LIBRARY_PATH", ""),
) \
.blank()
# write the jobs section
submit \
.header("jobs") \
.blank()
for working_path in working_paths:
arg_format = '"-c \'%s ""$0"" $@\' -m condor.work %s $(Cluster).$(Process) %s"'
main_path = os.path.abspath(sys.modules["__main__"].__file__)
submit \
.pairs(
Initialdir = working_path,
Arguments = arg_format % (sys.executable, req_address, pipes.quote(main_path)),
) \
.queue(1) \
.blank()
# submit the job to condor
submit_path = os.path.join(condor_home, "workers.condor")
with open(submit_path, "w") as submit_file:
submit_file.write(submit.contents)
group_number = condor_submit(submit_path)
logger.info("(cluster has %i matching node(s) unclaimed)", unclaimed(matching))
return (condor_home, group_number)
| {
"repo_name": "borg-project/utcondor",
"path": "condor/raw.py",
"copies": "1",
"size": "8568",
"license": "mit",
"hash": 8850002276834373000,
"line_mean": 25.1219512195,
"line_max": 95,
"alpha_frac": 0.5704948646,
"autogenerated": false,
"ratio": 3.9814126394052045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9999964065634126,
"avg_score": 0.010388687674215684,
"num_lines": 328
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import re
import os
import os.path
import sys
import time
import datetime
import subprocess
import cargo
import cStringIO as StringIO
logger = cargo.get_logger(__name__, level = "INFO")
class CondorSubmission(object):
"""Stream output to a Condor submission file."""
def __init__(self):
self._out = StringIO.StringIO()
def blank(self, lines = 1):
"""Write a blank line or many."""
for i in xrange(lines):
self._out.write("\n")
return self
def pair(self, name, value):
"""Write a variable assignment line."""
self._out.write("%s = %s\n" % (name, value))
return self
def pairs(self, **kwargs):
"""Write a block of variable assignment lines."""
self.pairs_dict(kwargs)
return self
def pairs_dict(self, pairs):
"""Write a block of variable assignment lines."""
max_len = max(len(k) for (k, _) in pairs.iteritems())
for (name, value) in pairs.iteritems():
self.pair(name.ljust(max_len), value)
return self
def environment(self, **kwargs):
"""Write an environment assignment line."""
self._out.write("environment = \\\n")
pairs = sorted(kwargs.items(), key = lambda (k, _): k)
for (i, (key, value)) in enumerate(pairs):
self._out.write(" %s=%s;" % (key, value))
if i < len(pairs) - 1:
self._out.write(" \\")
self._out.write("\n")
return self
def header(self, header):
"""Write commented header text."""
dashes = "-" * len(header)
self.comment(dashes)
self.comment(header.upper())
self.comment(dashes)
return self
def comment(self, comment):
"""Write a line of commented text."""
self._out.write("# %s\n" % comment)
return self
def queue(self, count):
"""Write a queue instruction."""
self._out.write("Queue %i\n" % count)
return self
@property
def contents(self):
"""The raw contents of the file."""
return self._out.getvalue()
def condor_submit(submit_path):
"""Submit to condor; return the cluster number."""
(stdout, stderr) = cargo.check_call_capturing(["/usr/bin/env", "condor_submit", submit_path])
expression = r"(\d+) job\(s\) submitted to cluster (\d+)\."
match = re.match(expression , stdout.splitlines()[-1])
if match:
(jobs, cluster) = map(int, match.groups())
logger.info("submitted %i condor jobs to cluster %i", jobs, cluster)
return cluster
else:
raise RuntimeError("failed to submit to condor:%s" % stdout)
def condor_rm(specifier):
"""Kill condor job(s)."""
logger.debug("killing condor jobs matched by %s", specifier)
try:
cargo.check_call_capturing(["condor_rm", str(specifier)])
except subprocess.CalledProcessError:
return False
else:
return True
def condor_hold(specifiers):
"""Hold condor job(s)."""
logger.debug("holding condor job(s) matched by %s", specifiers)
cargo.check_call_capturing(["/usr/bin/env", "condor_hold"] + map(str, specifiers))
def condor_release(specifiers):
"""Release condor job(s)."""
logger.debug("releasing condor job(s) matched by %s", specifiers)
try:
cargo.check_call_capturing(["condor_release"] + map(str, specifiers))
except subprocess.CalledProcessError:
return False
else:
return True
def default_condor_home():
return "workers/%s" % datetime.datetime.now().replace(microsecond = 0).isoformat()
def submit_condor_workers(
workers,
req_address,
matching = cargo.defaults.condor_matching,
description = "distributed Python worker process(es)",
group = "GRAD",
project = "AI_ROBOTICS",
condor_home = default_condor_home(),
):
# prepare the working directories
working_paths = [os.path.join(condor_home, "%i" % i) for i in xrange(workers)]
for working_path in working_paths:
os.makedirs(working_path)
# provide a convenience symlink
link_path = "workers-latest"
if os.path.lexists(link_path):
os.unlink(link_path)
os.symlink(condor_home, link_path)
# write the submit file, starting with job matching
submit = CondorSubmission()
if matching:
submit \
.header("matching") \
.blank() \
.pair("requirements", matching) \
.blank()
# write the general condor section
submit \
.header("configuration") \
.blank() \
.pairs_dict({
"+Group": "\"%s\"" % group,
"+Project": "\"%s\"" % project,
"+ProjectDescription": "\"%s\"" % description,
}) \
.blank() \
.pairs(
universe = "vanilla",
notification = "Error",
kill_sig = "SIGINT",
Log = "condor.log",
Error = "condor.err",
Output = "condor.out",
Input = "/dev/null",
Executable = os.environ.get("SHELL"),
) \
.blank() \
.environment(
CARGO_LOG_FILE_PREFIX = "log",
PATH = os.environ.get("PATH", ""),
PYTHONPATH = os.environ.get("PYTHONPATH", ""),
LD_LIBRARY_PATH = os.environ.get("LD_LIBRARY_PATH", ""),
) \
.blank()
# write the jobs section
submit \
.header("jobs") \
.blank()
for working_path in working_paths:
arg_format = '"-c \'%s ""$0"" $@\' -m cargo.tools.labor.work2 %s $(Cluster).$(Process)"'
submit \
.pairs(
Initialdir = working_path,
Arguments = arg_format % (sys.executable, req_address),
) \
.queue(1) \
.blank()
# submit the job to condor
submit_path = os.path.join(condor_home, "workers.condor")
with open(submit_path, "w") as submit_file:
submit_file.write(submit.contents)
return cargo.condor_submit(submit_path)
| {
"repo_name": "borg-project/cargo",
"path": "src/python/cargo/condor.py",
"copies": "1",
"size": "6173",
"license": "mit",
"hash": -2925866236527615000,
"line_mean": 25.156779661,
"line_max": 97,
"alpha_frac": 0.56423133,
"autogenerated": false,
"ratio": 3.848503740648379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4912735070648379,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import re
import os.path
import borg
logger = borg.get_logger(__name__)
def parse_max_sat_competition(stdout):
"""Parse output from a standard competition solver."""
optima = map(int, re.findall(r"^o +([0-9]+) *\r?$", stdout, re.M))
if len(optima) > 0:
optimum = optima[-1]
else:
optimum = None
answer_match = re.search(r"^s +([a-zA-Z ]+) *\r?$", stdout, re.M)
if answer_match:
(answer_type_raw,) = answer_match.groups()
answer_type = answer_type_raw.strip().upper()
if answer_type == "OPTIMUM FOUND":
certificate = []
for line in re.findall(r"^v ([ x\-0-9]*) *\r?$", stdout, re.M):
certificate.extend(line.split())
if len(certificate) == 0:
return None
elif answer_type == "UNSATISFIABLE":
certificate = None
else:
return None
return (answer_type, certificate, optimum)
return None
class MAX_SAT_BasicSolverFactory(object):
def __init__(self, root, command):
self._root = root
self._command = command
def __call__(self, task, stm_queue = None, solver_id = None):
return \
borg.solver_io.RunningSolver(
parse_max_sat_competition,
self._command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
class MAX_SAT_WBO_SolverFactory(object):
def __init__(self, root, prefix):
self._root = root
self._prefix = prefix
def __call__(self, task, stm_queue = None, solver_id = None):
(_, extension) = os.path.splitext(task.path)
command = self._prefix + ["-file-format={0}".format(extension[1:]), "{task}"]
return \
borg.solver_io.RunningSolver(
parse_max_sat_competition,
command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
class MAX_SAT_IncSatzSolverFactory(object):
def __init__(self, root, (inc_command, incw_command)):
self._root = root
self._inc_command = inc_command
self._incw_command = incw_command
def __call__(self, task, stm_queue = None, solver_id = None):
(_, extension) = os.path.splitext(task.path)
if extension[1:] == "cnf":
command = self._inc_command
else:
command = self._incw_command
return \
borg.solver_io.RunningSolver(
parse_max_sat_competition,
command,
self._root,
task.path,
stm_queue = stm_queue,
solver_id = solver_id,
)
| {
"repo_name": "borg-project/borg",
"path": "borg/domains/max_sat/solvers.py",
"copies": "1",
"size": "2888",
"license": "mit",
"hash": 1393598621557451800,
"line_mean": 27.88,
"line_max": 85,
"alpha_frac": 0.5162742382,
"autogenerated": false,
"ratio": 3.6883780332056193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624766948277245,
"avg_score": 0.015977064625674952,
"num_lines": 100
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import sys
import time
import random
import collections
import condor
logger = condor.log.get_logger(__name__, default_level = "INFO")
from .distributed import DistributedManager
from .parallel import ParallelManager
from .serial import SerialManager
from .http_status import serve_http_status
class Task(object):
"""One unit of distributable work."""
def __init__(self, call, args = [], kwargs = {}, key = None):
self.call = call
self.args = args
self.kwargs = kwargs
if key is None:
self.key = id(self)
else:
self.key = key
def __hash__(self):
return hash(self.key)
def __call__(self):
return self.call(*self.args, **self.kwargs)
def __str__(self):
return '%s(*%s, **%s)' % (self.call.__name__, self.args, self.kwargs)
@staticmethod
def from_request(request):
"""Build a task, if necessary."""
if isinstance(request, Task):
return request
elif isinstance(request, collections.Mapping):
return Task(**request)
else:
return Task(*request)
class TaskState(object):
"""Current state of progress on a task."""
def __init__(self, task):
self.task = task
self.done = False
self.working = set()
def score(self):
"""Score the urgency of this task."""
if self.done:
return (sys.maxint, sys.maxint, random.random())
if len(self.working) == 0:
return (0, 0, random.random())
else:
return (
len(self.working),
max(wstate.timestamp for wstate in self.working),
random.random(),
)
def is_finished(self):
return self.score()[0] != sys.maxint
class WorkerState(object):
"""Current state of a known worker process."""
def __init__(self, condor_id):
self.condor_id = condor_id
self.assigned = None
self.timestamp = None
def set_done(self):
self.assigned.working.remove(self)
was_done = self.assigned.done
self.assigned.done = True
self.assigned = None
return was_done
def set_assigned(self, tstate):
"""Change worker state in response to assignment."""
self.disassociate()
self.assigned = tstate
self.timestamp = time.time()
self.assigned.working.add(self)
def set_interruption(self):
"""Change worker state in response to interruption."""
self.disassociate()
def set_error(self):
"""Change worker state in response to error."""
self.disassociate()
def disassociate(self):
"""Disassociate from the current job."""
if self.assigned is not None:
self.assigned.working.remove(self)
self.assigned = None
class ManagerCore(object):
"""Maintain the task queue and worker assignments."""
def __init__(self, tasks):
"""Initialize."""
self.tstates = dict((t.key, TaskState(t)) for t in tasks)
self.wstates = {}
serve_http_status(self)
def handle(self, message):
"""Manage workers and tasks."""
logger.info(
"[%s/%i] %s",
str(self.done_count()).rjust(len(str(len(self.tstates))), "0"),
len(self.tstates),
message.get_summary(),
)
sender = self.wstates.get(message.sender)
if sender is None:
sender = WorkerState(message.sender)
self.wstates[sender.condor_id] = sender
if isinstance(message, condor.messages.ApplyMessage):
# task request
sender.disassociate()
sender.set_assigned(self.next_task())
return (sender.assigned.task, None)
elif isinstance(message, condor.messages.DoneMessage):
# task result
finished = sender.assigned
was_done = sender.set_done()
assert finished.task.key == message.key
selected = self.next_task()
if selected is None:
selected_task = None
else:
selected_task = selected.task
sender.set_assigned(selected)
if was_done:
return (selected_task, None)
else:
return (selected_task, (finished.task, message.result))
elif isinstance(message, condor.messages.InterruptedMessage):
# worker interruption
sender.set_interruption()
return (None, None)
elif isinstance(message, condor.messages.ErrorMessage):
# worker exception
sender.set_error()
return (None, None)
else:
raise TypeError("unrecognized message type")
def next_task(self):
"""Select the next task on which to work."""
tstate = min(self.tstates.itervalues(), key = TaskState.score)
if tstate.done:
return None
else:
return tstate
def done_count(self):
"""Return the number of completed tasks."""
return sum(1 for t in self.tstates.itervalues() if t.done)
def unfinished_count(self):
"""Return the number of unfinished tasks."""
return sum(1 for t in self.tstates.itervalues() if not t.done)
| {
"repo_name": "borg-project/utcondor",
"path": "condor/managers/__init__.py",
"copies": "1",
"size": "5445",
"license": "mit",
"hash": -4141308458064184300,
"line_mean": 25.432038835,
"line_max": 77,
"alpha_frac": 0.5687786961,
"autogenerated": false,
"ratio": 4.247269890795632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5316048586895632,
"avg_score": null,
"num_lines": null
} |
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import time
import operator
import resource
import contextlib
import borg
class Cost(object):
"""Resources."""
def __init__(self, cpu_seconds = None, wall_seconds = None):
self.cpu_seconds = None if cpu_seconds is None else float(cpu_seconds)
self.wall_seconds = None if wall_seconds is None else float(wall_seconds)
def __str__(self):
return "(CPU seconds: {0}; wall seconds: {1})".format(self.cpu_seconds, self.wall_seconds)
def __add__(self, other):
none_add = lambda x, y: none_op(operator.add, x, y)
return \
Cost(
cpu_seconds = none_add(self.cpu_seconds, other.cpu_seconds),
wall_seconds = none_add(self.wall_seconds, other.wall_seconds),
)
def __sub__(self, other):
none_sub = lambda x, y: none_op(operator.sub, x, y)
return \
Cost(
cpu_seconds = none_sub(self.cpu_seconds, other.cpu_seconds),
wall_seconds = none_sub(self.wall_seconds, other.wall_seconds),
)
class Accountant(object):
"""Track resources used."""
def __init__(self, parent = None, eve = False):
"""Start tracking."""
self._parent = parent
if eve:
self._past = Cost(cpu_seconds = resource.getrusage(resource.RUSAGE_SELF).ru_utime, wall_seconds = 0.0)
else:
self._past = Cost(cpu_seconds = 0.0, wall_seconds = 0.0)
self.start()
def start(self):
"""Start or restart tracking."""
self._start_cpu_seconds = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self._start_wall_seconds = time.time()
def stop(self):
"""Stop tracking."""
self._past = self.total
self._start_cpu_seconds = None
self._start_wall_seconds = None
def charge(self, cost):
"""Add an external cost."""
self._past += cost
if self._parent is not None:
self._parent.charge(cost)
def charge_cpu(self, cpu_seconds):
"""Add an external cost."""
self.charge(Cost(cpu_seconds = float(cpu_seconds)))
@property
def total(self):
"""The total accumulated cost."""
if self._start_cpu_seconds is None:
recent = Cost()
else:
cpu_now = resource.getrusage(resource.RUSAGE_SELF).ru_utime
recent = \
Cost(
cpu_seconds = cpu_now - self._start_cpu_seconds,
wall_seconds = time.time() - self._start_wall_seconds,
)
return self._past + recent
accountant_stack = [Accountant(eve = True)]
def get_accountant():
if accountant_stack:
return accountant_stack[-1]
else:
return None
@contextlib.contextmanager
def accounting():
accountant = Accountant(get_accountant())
accountant_stack.append(accountant)
try:
yield accountant
except:
raise
finally:
accountant.stop()
accountant_stack.pop()
def normal_to_machine(machine_cpu_seconds):
return machine_cpu_seconds * borg.defaults.machine_speed
def machine_to_normal(normal_cpu_seconds):
return normal_cpu_seconds / borg.defaults.machine_speed
def none_op(op, x, y):
if x is None:
return None
else:
if y is None:
return x
else:
return op(x, y)
def unicore_cpu_budget(budget):
"""The maximum single-core CPU budget."""
if budget.cpu_seconds is None:
if budget.wall_seconds is None:
return 1e8
else:
return budget.wall_seconds
elif budget.wall_seconds is None:
return budget.cpu_seconds
else:
return min(budget.cpu_seconds, budget.wall_seconds)
| {
"repo_name": "borg-project/borg",
"path": "borg/expenses.py",
"copies": "1",
"size": "3847",
"license": "mit",
"hash": -7306897675532324000,
"line_mean": 25.7152777778,
"line_max": 114,
"alpha_frac": 0.5812321289,
"autogenerated": false,
"ratio": 3.7901477832512316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48713799121512313,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brycedcarter'
filename = "sampleData/data_test.dat"
packetStream = [] # this will contain the final list of packet objects
# this is the preamble and address that should be used for matching a valid packet
preambleAndAddress = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0]
# Class to represent data packets
class Packet(object):
def __init__(self, preBits, pcBits, payloadBits):
self.address = preBits[8:]
self.dataLength = self.getPayloadLength(pcBits)
self.data = payloadBits[0:self.dataLength * 8]
# TODO implement use of the rest of the packet control bits other than just the packet length
# TODO implement CRC check
@staticmethod
def getPayloadLength(pcBits): # gives the payload length given a set of (9) packet control bits
payloadLength = int("".join(str(i) for i in pcBits[0:6]), 2)
return payloadLength
# Open the target file and start reading bits out of it
with open(filename, 'r') as f:
preBits = [] # this holds the set of preamble and address bits
pcBits = [] # this holds the packet control bits
payloadBits = [] # this holds the payload and crc bits
lockIndex = 0 # this is the index below which we know there is no data or the data has already been processed
lockCount = 0 # this is the index of the preamble and address stream that we have match up to... if this reached the length of the preamble and address it means that we have successfully matched a packet
payloadLength = None # this is the length of the payload read from the pc bits
for bitByte in f.read():
bit = ord(bitByte) # the newest bit read from the file
if lockCount == len(
preambleAndAddress): # if we have matched the full preamble and address then we can collect the rest of the packet bits
if len(pcBits) < 9: # read 9 bits into the set of packet control bits
pcBits.append(bit)
continue # don't move on with processing until we have all of the pc bits
if len(
pcBits) == 9 and payloadLength is None: # if we have all of the pc bits but have not determined the length of the payload, do so...
payloadLength = Packet.getPayloadLength(pcBits)
if len(payloadBits) < payloadLength * 8 + 15: # collect the payload and the crc bits
payloadBits.append(bit)
continue
payloadBits.append(bit) # grab the last straggler bit that was missed by the last if
packetStream.append(Packet(preBits, pcBits, payloadBits)) # build a packet and add it to the packet stream
# reset all of the buffers and indexes to look for the next buffer
preBits = []
pcBits = []
payloadBits = []
lockIndex = 0
lockCount = 0
payloadLength = None
# if we did not continue on the loop before, try to match the current position of the preamble and address
if bit == preambleAndAddress[lockCount]:
lockCount += 1
preBits.append(bit)
# if we cant match it... reset and start again
else:
lockIndex += 1
lockCount = 0
preBits = []
print packetStream[0].data # print the data of the first packet
| {
"repo_name": "ProjectKarman/comm-sys-protocol-implementation",
"path": "bitStreamProcssor.py",
"copies": "1",
"size": "3471",
"license": "mit",
"hash": -1261023049258103000,
"line_mean": 43.5,
"line_max": 208,
"alpha_frac": 0.6277729761,
"autogenerated": false,
"ratio": 4.022016222479722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149789198579722,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
# Debugging parameters
WRITE_LEFT_IMAGE = True
WRITE_LOCATION = "/home/bs/Desktop/out.jpg"
# Input video
INPUT_VIDEOS = '/home/bs/itu/graphics_and_image_analysis_SIGB/code/Stereo-Vision-System/bergar/Videos/'
VIDEO_LEFT_1 = INPUT_VIDEOS + "cameraLeft.mov"
VIDEO_RIGHT_1 = INPUT_VIDEOS + "cameraRight.mov"
VIDEO_LEFT_2 = INPUT_VIDEOS + "cameraLeft1.mov"
VIDEO_RIGHT_2 = INPUT_VIDEOS + "cameraRight1.mov"
VIDEO_LEFT_3 = INPUT_VIDEOS + "left.mp4"
VIDEO_RIGHT_3 = INPUT_VIDEOS + "right.mp4"
VIDEO_LEFT_4 = INPUT_VIDEOS + "cameraLeft2.mov"
VIDEO_RIGHT_4 = INPUT_VIDEOS + "cameraRight2.mov"
# Texture images
TEXTURE_IMAGES = '/home/bs/itu/graphics_and_image_analysis_SIGB/code/Stereo-Vision-System/bergar/Images/'
TEXTURE_UP = TEXTURE_IMAGES + "Up.jpg"
TEXTURE_DOWN = TEXTURE_IMAGES + "Down.jpg"
TEXTURE_LEFT = TEXTURE_IMAGES + "Left.jpg"
TEXTURE_RIGHT = TEXTURE_IMAGES + "Right.jpg"
TEXTURE_TOP = TEXTURE_IMAGES + "Top.jpg"
# Input images (for testing instead of video)
INPUT_IMAGES = '/home/bs/itu/graphics_and_image_analysis_SIGB/code/Stereo-Vision-System/bergar/Project/InputImages/'
II_1 = INPUT_IMAGES + "l113.jpg"
II_2 = INPUT_IMAGES + "l25.jpg"
II_3 = INPUT_IMAGES + "l37.jpg"
II_4 = INPUT_IMAGES + "l74.jpg"
II_5 = INPUT_IMAGES + "l80.jpg"
II_6 = INPUT_IMAGES + "r25.jpg"
II_7 = INPUT_IMAGES + "r37.jpg"
II_8 = INPUT_IMAGES + "r74.jpg"
II_9 = INPUT_IMAGES + "r80.jpg"
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Stereo-Vision-System/bergar/com.simonsen.stereovision/Settings/Constant.py",
"copies": "1",
"size": "1753",
"license": "apache-2.0",
"hash": -3348677096725438500,
"line_mean": 46.3783783784,
"line_max": 127,
"alpha_frac": 0.5698802054,
"autogenerated": false,
"ratio": 2.9561551433389543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40260353487389544,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
from SIGBTools import *
from Tracker import *
import numpy as np
import cv as cv
# -----------------------------
# Global variables
# -----------------------------
files = [
"eye1.avi",
"eye2.avi",
"eye3.avi",
"eye4.avi",
"eye5.avi",
"eye6.avi",
"eye7.avi",
"eye8.avi",
"eye9.avi",
"eye10.avi",
"eye11.avi",
"eye12.avi",
"eye13.mp4",
"eye14.mp4",
"eye15.mp4",
"eye16.mp4",
"eye17.mp4",
"eye18.mp4",
"eye19.mp4",
"EyeBizaro.avi"
]
dir = "../Sequences/"
inputFile = dir + files[6]
outputFile = dir + "eyeTrackerResult.mp4"
imgOrig = []
#These are used for template matching
leftTemplate = []
rightTemplate = []
frameNr =0
# counter for storing images
imgNr = 0
def setText(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
def update(I):
global frameNr,drawImg
img = I.copy()
sliderVals = getSliderVals()
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# kmeanTresh = detectPupilKMeans(gray, K=12, distanceWeight=2, reSize=(40,40))
# Do the magic
# pupils = GetPupil(gray,sliderVals['pupilThr'], sliderVals['minSize'], sliderVals['maxSize'])
# pupils2 = GetPupil(gray,kmeanTresh, sliderVals['minSize'], sliderVals['maxSize'])
# glints = GetGlints(gray,sliderVals['glintThr'], 0, 150)
# glints = FilterPupilGlint(pupils,glints)
#Do template matching
global leftTemplate
global rightTemplate
if len(leftTemplate) > 0 and len(rightTemplate) > 0:
GetEyeCorners(img, leftTemplate, rightTemplate)
#Display results
global frameNr,drawImg
x,y = 10,10
#setText(img,(x,y),"Frame:%d" %frameNr)
sliderVals = getSliderVals()
# for non-windows machines we print the values of the threshold in the original image
if sys.platform != 'win32':
step=18
cv2.putText(img, "pupilThr :"+str(sliderVals['pupilThr']), (x, y+step), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
cv2.putText(img, "glintThr :"+str(sliderVals['glintThr']), (x, y+2*step), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
cv2.imshow('Result',img)
#Uncomment these lines as your methods start to work to display the result in the
#original image using blob detection and sliders for treshold
# for pupil in pupils:
# cv2.ellipse(img,pupil,(0,255,0),1)
# C = int(pupil[0][0]),int(pupil[0][1])
# cv2.circle(img,C, 2, (0,0,255),4)
# for glint in glints:
# C = int(glint[0][0]),int(glint[0][1])
# cv2.circle(img,C, 2,(255,0,255),5)
# cv2.imshow("Result", img)
# draw results using kmeans clustering
# for pupil in pupils2:
# cv2.ellipse(img,pupil,(255,255,0),1)
# C = int(pupil[0][0]),int(pupil[0][1])
# cv2.circle(img,C, 2, (0,255,255),4)
# for glint in glints:
# C = int(glint[0][0]),int(glint[0][1])
# cv2.circle(img,C, 2,(255,0,255),5)
# cv2.imshow("Result", img)
#For Iris detection - Week 2
#circularHough(gray)
#copy the image so that the result image (img) can be saved in the movie
drawImg = img.copy()
def printUsage():
print "Q or ESC: Stop"
print "SPACE: Pause"
print "r: reload video"
print 'm: Mark region when the video has paused'
print 's: toggle video writing'
print 'c: close video sequence'
def onSlidersChange(dummy=None):
''' Handle updates when slides have changed.
This function only updates the display when the video is put on pause'''
global imgOrig;
sv = getSliderVals()
if(not sv['Running']): # if pause
update(imgOrig)
def setupWindowSliders():
''' Define windows for displaying the results and create trackbars'''
cv2.namedWindow("Result")
cv2.namedWindow('Threshold')
#cv2.namedWindow("TempResults")
#Threshold value for the pupil intensity
cv2.createTrackbar('pupilThr','Threshold', 90, 255, onSlidersChange)
#Threshold value for the glint intensities
cv2.createTrackbar('glintThr','Threshold', 240, 255,onSlidersChange)
#define the minimum and maximum areas of the pupil
cv2.createTrackbar('minSize','Threshold', 20, 200, onSlidersChange)
cv2.createTrackbar('maxSize','Threshold', 200,200, onSlidersChange)
#Value to indicate whether to run or pause the video
cv2.createTrackbar('Stop/Start','Threshold', 0,1, onSlidersChange)
def getSliderVals():
'''Extract the values of the sliders and return these in a dictionary'''
sliderVals={}
sliderVals['pupilThr'] = cv2.getTrackbarPos('pupilThr', 'Threshold')
sliderVals['glintThr'] = cv2.getTrackbarPos('glintThr', 'Threshold')
sliderVals['minSize'] = 50*cv2.getTrackbarPos('minSize', 'Threshold')
sliderVals['maxSize'] = 50*cv2.getTrackbarPos('maxSize', 'Threshold')
sliderVals['Running'] = 1==cv2.getTrackbarPos('Stop/Start', 'Threshold')
return sliderVals
def run(fileName,resultFile='eyeTrackingResults.avi'):
''' MAIN Method to load the image sequence and handle user inputs'''
global imgOrig, frameNr,drawImg
setupWindowSliders()
props = RegionProps()
cap,imgOrig,sequenceOK = getImageSequence(fileName)
videoWriter = 0
frameNr =0
if(sequenceOK):
#detectPupilKMeans(gray = cv2.cvtColor(imgOrig, cv2.COLOR_RGB2GRAY))
update(imgOrig)
printUsage()
frameNr=0;
saveFrames = False
while(sequenceOK):
sliderVals = getSliderVals();
frameNr=frameNr+1
ch = cv2.waitKey(1)
#Select regions
if(ch==ord('m')):
if(not sliderVals['Running']):
roiSelect=ROISelector(imgOrig)
pts,regionSelected= roiSelect.SelectArea('Select left eye corner',(400,200))
roiSelect2=ROISelector(imgOrig)
pts2,regionSelected2= roiSelect2.SelectArea('Select right eye corner',(400,200))
# TODO: Optimize
global leftTemplate
global rightTemplate
if(regionSelected):
leftTemplate = imgOrig[pts[0][1]:pts[1][1],pts[0][0]:pts[1][0]]
if(regionSelected2):
rightTemplate = imgOrig[pts2[0][1]:pts2[1][1],pts2[0][0]:pts2[1][0]]
if ch == 27:
break
if (ch==ord('s')):
if((saveFrames)):
videoWriter.release()
saveFrames=False
print "End recording"
else:
imSize = np.shape(imgOrig)
videoWriter = cv2.VideoWriter(resultFile, cv.CV_FOURCC('D','I','V','3'), 15.0,(imSize[1],imSize[0]),True) #Make a video writer
saveFrames = True
print "Recording..."
if (ch == ord('i')):
global imgNr
cv2.imwrite("tmp_" + str(imgNr) + ".png", drawImg)
imgNr = imgNr + 1
if(ch==ord('q')):
break
if(ch==32): #Spacebar
sliderVals = getSliderVals()
cv2.setTrackbarPos('Stop/Start','Threshold',not sliderVals['Running'])
if(ch==ord('r')):
frameNr =0
sequenceOK=False
cap,imgOrig,sequenceOK = getImageSequence(fileName)
(imgOrig)
sequenceOK=True
sliderVals=getSliderVals()
if(sliderVals['Running']):
sequenceOK, imgOrig = cap.read()
if(sequenceOK): #if there is an image
#detectPupilKMeans(gray = cv2.cvtColor(imgOrig, cv2.COLOR_RGB2GRAY))
update(imgOrig)
if(saveFrames):
videoWriter.write(drawImg)
if(videoWriter!=0):
videoWriter.release()
print "Closing videofile..."
def main():
run(inputFile)
if __name__ == "__main__":
main() | {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Eye-Tracking-System/bergar/com.bergar.simonsen.eyetracker/Main.py",
"copies": "1",
"size": "7998",
"license": "apache-2.0",
"hash": 8514558339414949000,
"line_mean": 31.7827868852,
"line_max": 147,
"alpha_frac": 0.6029007252,
"autogenerated": false,
"ratio": 3.143867924528302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4246768649728302,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
import cv2
from matplotlib import *
from tools import IO
import numpy as np
from matplotlib.pyplot import *
from config.Const import *
from tools import Utils
from tools import Calc
import personMapLocation as pml
import textureMapping as tm
def texturemapObjectSequence():
""" Poor implementation of simple texturemap """
fn = BOOK_3
cap = cv2.VideoCapture(fn)
drawContours = True;
texture = cv2.imread(ITU_LOGO)
#texture = cv2.transpose(texture)
mTex,nTex,t = texture.shape
#load Tracking data
running, imgOrig = cap.read()
mI,nI,t = imgOrig.shape
print running
while(running):
for t in range(20):
running, imgOrig = cap.read()
if(running):
squares = Calc.DetectPlaneObject(imgOrig)
for sqr in squares:
#TODO Do texturemap here!!!!
if(drawContours):
for p in sqr:
cv2.circle(imgOrig,(int(p[0]),int(p[1])),3,(255,0,0))
if(drawContours and len(squares)>0):
cv2.drawContours( imgOrig, squares, -1, (0, 255, 0), 3 )
cv2.circle(imgOrig,(100,100),10,(255,0,0))
cv2.imshow("Detection",imgOrig)
cv2.waitKey(1)
def showImageandPlot(N):
#A simple attenmpt to get mouse inputs and display images using matplotlib
# I = cv2.imread('groundfloor.bmp')
I = cv2.imread(ITU_MAP)
drawI = I.copy()
#make figure and two subplots
fig = figure(1)
ax1 = subplot(1,2,1)
ax2 = subplot(1,2,2)
ax1.imshow(I)
ax2.imshow(drawI)
ax1.axis('image')
ax1.axis('off')
points = fig.ginput(5)
fig.hold('on')
for p in points:
#Draw on figure
subplot(1,2,1)
plot(p[0],p[1],'rx')
#Draw in image
cv2.circle(drawI,(int(p[0]),int(p[1])),2,(0,255,0),10)
# ax2.cla
ax2.imshow(drawI)
draw() #update display: updates are usually defered
show()
savefig('somefig.jpg')
cv2.imwrite("drawImage.jpg", drawI)
def realisticTexturemap(scale,point,map):
#H = np.load('H_G_M')
print "Not implemented yet\n"*30
def main():
# pml.showFloorTrackingData()
# tm.simpleTextureMap()
# tm.textureMapGroundFloor()
# realisticTexturemap(0,0,0)
tm.texturemapGridSequence()
if __name__ == "__main__":
main() | {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/main.py",
"copies": "1",
"size": "2384",
"license": "apache-2.0",
"hash": -2883631979372783600,
"line_mean": 22.1553398058,
"line_max": 78,
"alpha_frac": 0.5952181208,
"autogenerated": false,
"ratio": 3.195710455764075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9210793158260229,
"avg_score": 0.016027083660769246,
"num_lines": 103
} |
__author__ = 'bs'
import cv2
from matplotlib.pyplot import *
from tools import Utils
from tools import IO
from tools import Calc
from config.Const import *
from pylab import *
def showFloorTrackingData():
#Load videodata
map = cv2.imread(ITU_MAP)
fn = GROUND_FLOOR_VIDEO
cap = cv2.VideoCapture(fn)
#load Tracking data
running, imgOrig = cap.read()
dataFile = np.loadtxt(TRACKING_DATA)
m,n = dataFile.shape
getPoints = True
H = []
imagePoints = []
# Hardcoded points for simpler testing
if PML_AUTO_POINTS:
p1 = array([[85.74780454, 194.09873055], [258.12098948, 174.51086862], [270.36340318, 192.13994435], [89.17568038, 218.0938614]])
p2 = array([[332.45191722, 179.62120685], [330.00343448, 100.04551778], [338.57312408, 103.71824189], [338.57312408, 179.62120685]])
H = Utils.estimateHomography(p1, p2)
fig = figure()
for k in range(m):
curData = k
running, imgOrig = cap.read()
# only do the calibration once
if getPoints and not PML_AUTO_POINTS:
H, imagePoints = Utils.getHomographyFromMouse(imgOrig, map, 4)
getPoints = False
if(running):
boxes = frameTrackingData2BoxData(dataFile[k,:])
boxColors = [(255,0,0),(0,255,0),(0,0,255)]
for k in range(0,3):
aBox = boxes[k]
cv2.rectangle(imgOrig, aBox[0], aBox[1], boxColors[k])
if k == 1: # only use the "legs"
x1, y1 = Calc.getRectangleLowerCenter(aBox[0], aBox[1]) # calculate the center of the "legs" rectangle
displayTrace(map, H, x1, y1, curData, m-2)
cv2.imshow("boxes",imgOrig)
cv2.waitKey(DELAY)
def displayTrace(I, H, x, y, data, maxData):
# convert x, y coordinates to homography matrix
vec = [
[1, 0 ,x],
[0, 1, y],
[0, 0, 1]
]
# calculate the transformed coordinates
p = H * vec
# divide coordinates with the homogenous scaling factor
x = p.item(2) / p.item(8)
y = p.item(5) / p.item(8)
cv2.circle(I, (int(x), int(y)), 1, (0, 0, 255), thickness=0, lineType=cv2.CV_AA, shift=0)
# save image when sequence is done i.e., when data == maxData
if data == maxData:
if SAVE_MAP_IMAGE:
IO.writeImage(I)
if SAVE_H_M_G:
IO.writeHomography(H)
# TODO: Write video
cv2.imshow("map", I)
def frameTrackingData2BoxData(data):
#Convert a row of points into tuple of points for each rectangle
pts= [ (int(data[i]),int(data[i+1])) for i in range(0,11,2) ]
boxes = [];
for i in range(0,7,2):
box = tuple(pts[i:i+2])
boxes.append(box)
return boxes | {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/personMapLocation.py",
"copies": "1",
"size": "2759",
"license": "apache-2.0",
"hash": -7008032015193684000,
"line_mean": 28.3617021277,
"line_max": 140,
"alpha_frac": 0.5871692642,
"autogenerated": false,
"ratio": 3.0689655172413794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41561347814413796,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
import cv2
from SIGBTools import *
import pylab
import numpy as np
import sys
from scipy.cluster.vq import *
from scipy.misc import imresize
from matplotlib.pyplot import *
from matplotlib import pyplot as plt
from Filter import *
def FilterPupilGlint(pupils,glints):
''' Given a list of pupil candidates and glint candidates returns a list of pupil and glints'''
retval = []
for p in pupils:
for g in glints:
maxLength = int(p[2]) # Max length is the radius of the pupil ellipse.
dx = math.fabs(p[0][0] - g[0][0])
dy = math.fabs(p[0][1] - g[0][1])
length = math.sqrt((dx**2 + dy**2))
if length < maxLength:
retval.append(g)
return retval
def GetPupil(gray, thr, areaMin, areaMax):
props = RegionProps()
val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Threshold",binI)
#Calculate blobs
contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
pupils = []
for c in contours:
tmp = props.CalcContourProperties(c, properties=["centroid", "area", "extend"])
x, y = tmp['Centroid']
area = tmp['Area']
extend = tmp['Extend']
if area > areaMin and area < areaMax and extend < 1:
if len(c) >= 5:
el = cv2.fitEllipse(c)
pupils.append(el)
#cv2.ellipse(tempResultImg, el, (0, 255, 0), 4)
#cv2.circle(tempResultImg,(int(x),int(y)), 2, (0,0,255),4) #draw a circle
#cv2.imshow("TempResults",tempResultImg)
return pupils
def GetGlints(gray, thr, areaMin, areaMax):
props = RegionProps()
gray = cv2.GaussianBlur(gray, (11, 11), 0)
val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)
cv2.imshow("Threshold Glint",binI)
#cv2.imshow("Gray", gray)
#Calculate blobs
contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
glints = [];
for c in contours:
tmp = props.CalcContourProperties(c, properties=["centroid", "area", "extend"])
area = tmp['Area']
extend = tmp['Extend']
#print tmp['Area']
if area > areaMin and area < areaMax and extend < 1:
if len(c) >= 5:
el = cv2.fitEllipse(c)
glints.append(el)
#canny = cv2.Canny(tempResultImg, 30, 150)
#cv2.imshow("Canny", canny)
#x, y = tmp['Centroid']
#cv2.circle(tempResultImg,(int(x), int(y)), 2, (0,0,255),4)
#cv2.ellipse(tempResultImg,el,(0,255,0),1)
#cv2.imshow("Glint detect", tempResultImg)
return glints
def detectPupilKMeans(gray,K=2,distanceWeight=2,reSize=(40,40)):
''' Detects the pupil in the image, gray, using k-means
gray : grays scale image
K : Number of clusters
distanceWeight : Defines the weight of the position parameters
reSize : the size of the image to do k-means on
'''
#Resize for faster performance
smallI = cv2.resize(gray, reSize)
# smallI = Filter.blur(smallI)
smallI = Filter.gaussianBlur(smallI)
M,N = smallI.shape
#Generate coordinates in a matrix
X,Y = np.meshgrid(range(M),range(N))
#Make coordinates and intensity into one vectors
z = smallI.flatten()
x = X.flatten()
y = Y.flatten()
O = len(x)
#make a feature vectors containing (x,y,intensity)
features = np.zeros((O,3))
features[:,0] = z;
features[:,1] = y/distanceWeight; #Divide so that the distance of position weighs less than intensity
features[:,2] = x/distanceWeight;
features = np.array(features,'f')
# cluster data
centroids,variance = kmeans(features,K)
#use the found clusters to map
label,distance = vq(features,centroids)
# re-create image from
labelIm = np.array(np.reshape(label,(M,N)))
tmp = centroids[[range(0, K)], [0]]
return min(tmp[0])
# Debugging
# print "-----"
# print tmp
# print darkest
# print "-----"
# Show figure ?
# f = figure(1)
# f = figure(distanceWeight)
# imshow(labelIm)
# f.savefig("gaussian_" + str(K) + "_" + str(distanceWeight))
# f.show()
def detectPupilHough(gray):
#Using the Hough transform to detect ellipses
blur = cv2.GaussianBlur(gray, (9,9),3)
##Pupil parameters
dp = 6; minDist = 10
highThr = 30 #High threshold for canny
accThr = 600; #accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected
maxRadius = 70;
minRadius = 20;
#See help for http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghcircle#cv2.HoughCirclesIn thus
circles = cv2.HoughCircles(blur,cv2.cv.CV_HOUGH_GRADIENT, dp,minDist, None, highThr,accThr,minRadius, maxRadius)
#Print the circles
gColor = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
if (circles !=None):
#print circles
all_circles = circles[0]
M,N = all_circles.shape
k=1
for c in all_circles:
cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (int(k*255/M),k*128,0))
K=k+1
#Circle with max votes
c=all_circles[0,:]
cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (0,0,255))
cv2.imshow("hough",gColor)
def GetEyeCorners(img, leftTemplate, rightTemplate,pupilPosition=None):
img2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
leftTemplate = cv2.cvtColor(leftTemplate, cv2.COLOR_RGB2GRAY)
rightTemplate = cv2.cvtColor(rightTemplate, cv2.COLOR_RGB2GRAY)
lw, lh = leftTemplate.shape[::-1]
rw, rh = rightTemplate.shape[::-1]
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
colors = [(0,255,0), (255,0,0), (0,0,255), (255,255,0), (0,255,255), (255,0,255)] # [green, red, blue, yellow, teal, purple]
# methods = ['cv2.TM_SQDIFF_NORMED']
for m in range(len(methods)):
i = img2.copy()
method = eval(methods[m])
color = colors[m]
# apply template matching
res = cv2.matchTemplate(i, leftTemplate, method)
resRight = cv2.matchTemplate(i, rightTemplate, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
min_valr, max_valr, min_locr, max_locr = cv2.minMaxLoc(resRight)
# if method is tm_sqdiff or tm_sqdiff_normed
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
top_leftr = min_locr
else:
top_left = max_loc
top_leftr = max_locr
bottom_right = (top_left[0] + lw, top_left[1] + lh)
bottom_rightr = (top_leftr[0] + rw, top_leftr[1] + rh)
cv2.rectangle(img, top_left, bottom_right, color, 1)
cv2.rectangle(img, top_leftr, bottom_rightr, color, 1)
def GetIrisUsingThreshold(gray,pupil):
''' Given a gray level image, gray and threshold
value return a list of iris locations'''
# YOUR IMPLEMENTATION HERE !!!!
pass
def circularHough(gray):
''' Performs a circular hough transform of the image, gray and shows the detected circles
The circe with most votes is shown in red and the rest in green colors '''
#See help for http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghcircle#cv2.HoughCircles
blur = cv2.GaussianBlur(gray, (31,31), 11)
dp = 6; minDist = 30
highThr = 20 #High threshold for canny
accThr = 850; #accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected
maxRadius = 50;
minRadius = 155;
circles = cv2.HoughCircles(blur,cv2.cv.CV_HOUGH_GRADIENT, dp,minDist, None, highThr,accThr,maxRadius, minRadius)
#Make a color image from gray for display purposes
gColor = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
if (circles !=None):
#print circles
all_circles = circles[0]
M,N = all_circles.shape
k=1
for c in all_circles:
cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (int(k*255/M),k*128,0))
K=k+1
c=all_circles[0,:]
cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (0,0,255),5)
cv2.imshow("hough",gColor)
def GetIrisUsingNormals(gray,pupil,normalLength):
''' Given a gray level image, gray and the length of the normals, normalLength
return a list of iris locations'''
# YOUR IMPLEMENTATION HERE !!!!
pass
def GetIrisUsingSimplifyedHough(gray,pupil):
''' Given a gray level image, gray
return a list of iris locations using a simplified Hough transformation'''
# YOUR IMPLEMENTATION HERE !!!!
pass
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Eye-Tracking-System/bergar/com.bergar.simonsen.eyetracker/Tracker.py",
"copies": "1",
"size": "8907",
"license": "apache-2.0",
"hash": -2601893713989755000,
"line_mean": 35.805785124,
"line_max": 145,
"alpha_frac": 0.6165936904,
"autogenerated": false,
"ratio": 3.0916348490107604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42082285394107605,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
import cv2
import numpy as np
from config.Const import *
from tools import Utils
from matplotlib.pyplot import figure
def simpleTextureMap():
I1 = cv2.imread(ITU_LOGO)
I2 = cv2.imread(ITU_MAP)
#Print Help
H,Points = Utils.getHomographyFromMouse(I1,I2,4)
h, w,d = I2.shape
overlay = cv2.warpPerspective(I1, H,(w, h))
M = cv2.addWeighted(I2, 0.5, overlay, 0.5,0)
cv2.imshow("Overlayed Image",M)
cv2.waitKey(0)
def textureMapGroundFloor():
#Load videodata
# logo = cv2.imread(ITU_LOGO)
texture = cv2.imread(TEXTURE)
fn = GROUND_FLOOR_VIDEO
cap = cv2.VideoCapture(fn)
#load Tracking data
running, imgOrig = cap.read()
H,Points = Utils.getHomographyFromMouse(texture, imgOrig, -1)
h, w,d = imgOrig.shape
while(cap.isOpened()):
ret, frame = cap.read()
try:
overlay = cv2.warpPerspective(texture, H,(w, h))
wFirst = 0.9
wSecond = 0.1
gamma = 9
M = cv2.addWeighted(frame, wFirst, overlay, wSecond, gamma)
except:
break
cv2.imshow("Overlayed Image",M)
if cv2.waitKey(DELAY) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def texturemapGridSequence():
""" Skeleton for texturemapping on a video sequence"""
fn = GRID_1
cap = cv2.VideoCapture(fn)
drawContours = True
texture = cv2.imread(ITU_LOGO)
texture = cv2.pyrDown(texture)
mTex,nTex,t = texture.shape
#load Tracking data
running, imgOrig = cap.read()
mI,nI,t = imgOrig.shape
cv2.imshow("win2",imgOrig)
pattern_size = (9, 6)
idx = [0,8,45,53]
while(running):
#load Tracking data
running, imgOrig = cap.read()
if(running):
imgOrig = cv2.pyrDown(imgOrig)
gray = cv2.cvtColor(imgOrig,cv2.COLOR_BGR2GRAY)
found, corners = cv2.findChessboardCorners(gray, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)
for t in idx:
cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))
cv2.imshow("win2",imgOrig)
cv2.waitKey(DELAY) | {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/textureMapping.py",
"copies": "1",
"size": "2441",
"license": "apache-2.0",
"hash": 8030232565652007000,
"line_mean": 25.2580645161,
"line_max": 94,
"alpha_frac": 0.589922163,
"autogenerated": false,
"ratio": 3.0360696517412937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9054955831865958,
"avg_score": 0.014207196575067117,
"num_lines": 93
} |
__author__ = 'bs'
import cv2
import numpy as np
import pylab
from pylab import *
import matplotlib as mpl
import math
from scipy import linalg
import os.path
''' This module contains sets of functions useful for basic image analysis and should be useful in the SIGB course.
Written and Assembled (2012,2013) by Dan Witzner Hansen, IT University.
'''
def doesFileExist(file):
return os.path.isfile(file)
def hFromPoints(fp, tp):
if fp.shape != tp.shape:
raise RuntimeError('Number of points do not match')
# from points
m = mean(fp[:2], axis = 1)
maxstd = max(std(fp[:2], axis = 1)) + 1e-9
T1 = diag([1 / maxstd, 1 / maxstd, 1])
T1[0][2] = -m[0] / maxstd
T1[1][2] = -m[1] / maxstd
fp = dot(T1, fp)
# to points
m = mean(tp[:2], axis = 1)
maxstd = max(std(tp[:2], axis = 1)) + 1e-9
T2 = diag([1 / maxstd, 1 / maxstd, 1])
T2[0][2] = -m[0] / maxstd
T2[1][2] = -m[1] / maxstd
tp = dot(T2, tp)
nbr_correspondence = fp.shape[1]
A = zeros((2 * nbr_correspondence, 9))
for i in range(nbr_correspondence):
A[2 * i] = [
-fp[0][i], -fp[1][i], -1,
0, 0, 0,
tp[0][i] * fp[0][i], tp[0][i] * fp[1][i], tp[0][i]
]
A[2 * i +1] = [
0, 0, 0,
-fp[0][i], -fp[1][i], -1,
tp[1][i] * fp[0][i], tp[1][i] * fp[1][i], tp[1][i]
]
U, S, V = linalg.svd(A)
H = V[8].reshape((3, 3))
# decondition
H = dot(linalg.inv(T2), dot(H, T1)) # normalize and return
return H / H[2, 2]
def computeRigidTransform(refpoints, points):
A = array([
[points[0], -points[1], 1, 9],
[points[1], points[0], 0, 1],
[points[2], -points[3], 1, 0],
[points[3], points[2], 0, 1],
[points[4], -points[5], 1, 0],
[points[5], points[4], 0, 1]
])
y = array([refpoints[0], refpoints[1], refpoints[2], refpoints[3], refpoints[4], refpoints[5]])
a, b, tx, ty = linalg.lstsq(A, y)[0]
R = array([
[a, -b],
[b, a]
])
return R, tx, ty
def getHomographyFromMouse(I1,I2,N=4):
"""
getHomographyFromMouse(I1,I2,N=4)-> homography, mousePoints
Calculates the homography from a plane in image I1 to a plane in image I2 by using the mouse to define corresponding points
Returns: the 3x3 homography matrix and the set of corresponding points used to define the homography
Parameters: N>=4 is the number of expected mouse points in each image.
when N<0: then the corners of image I1 will be used as input and thus only 4 mouse clicks are needed in I2
Usage: use left click to select a point and right click to remove the most recently selected point
"""
#Copy images
drawImg = []
drawImg.append(copy(cv2.cvtColor(I1,cv2.COLOR_BGR2RGB)))
drawImg.append(copy(cv2.cvtColor(I2,cv2.COLOR_BGR2RGB)))
imagePoints = []
firstImage = 0
if(N<0):
N=4 #Force 4 points to be selected
firstImage = 1
m,n,d = I1.shape
#Define corner points
imagePoints.append([(float(0.0),float(0.0)),(float(n),0),(float(n),float(m)),(0,m)])
if(math.fabs(N)<4):
print('at least 4 points are needed')
#Make figure
fig = figure(1)
for k in range(firstImage,2):
ax= subplot(1,2,k+1)
ax.imshow(drawImg[k])
ax.axis('image')
title("Click" + str(N)+ " times in the image")
fig.canvas.draw()
ax.hold('On')
#Get mouse inputs
imagePoints.append(fig.ginput(N, -1))
# Draw selected points
for p in imagePoints[k]:
cv2.circle(drawImg[k],(int(p[0]),int(p[1])),2,(0,255,0),2)
ax.imshow(drawImg[k])
for (x,y) in imagePoints[k]:
plot(x,y,'rx')
fig.canvas.draw()
#Convert to openCV format
ip1 = np.array([[x,y] for (x,y) in imagePoints[0]])
ip2 = np.array([[x,y] for (x,y) in imagePoints[1]])
#Calculate homography
H = estimateHomography(ip1, ip2)
# H,mask = cv2.findHomography(ip1, ip2)
return H, imagePoints
def getCircleSamples(center=(0,0),radius=1,nPoints=30):
''' Samples a circle with center center = (x,y) , radius =1 and in nPoints on the circle.
Returns an array of a tuple containing the points (x,y) on the circle and the curve gradient in the point (dx,dy)
Notice the gradient (dx,dy) has unit length'''
s = np.linspace(0, 2*math.pi, nPoints)
#points
P = [(radius*np.cos(t)+center[0], np.sin(t)+center[1],np.cos(t),np.sin(t) ) for t in s ]
return P
def estimateHomography(points1, points2):
""" Calculates a homography from one plane to another using
a set of 4 points from each plane
points1 is 4 points (nparray) on a plane
points2 is the corresponding 4 points (nparray) on an other plane
"""
#Check if there is sufficient points
if len(points1) == 4 and len(points2) == 4:
#Get x, y values
x1, y1 = points1[0]
x2, y2 = points1[1]
x3, y3 = points1[2]
x4, y4 = points1[3]
#Get x tilde and y tilde values
x_1, y_1 = points2[0]
x_2, y_2 = points2[1]
x_3, y_3 = points2[2]
x_4, y_4 = points2[3]
#Create matrix A
A = np.matrix([
[-x1, -y1, -1, 0, 0, 0, x1 * x_1, y1 * x_1, x_1],
[0, 0, 0, -x1, -y1, -1, x1 * y_1, y1 * y_1, y_1],
[-x2, -y2, -1, 0, 0, 0, x2 * x_2, y2 * x_2, x_2],
[0, 0, 0, -x2, -y2, -1, x2 * y_2, y2 * y_2, y_2],
[-x3, -y3, -1, 0, 0, 0, x3 * x_3, y3 * x_3, x_3],
[0, 0, 0, -x3, -y3, -1, x3 * y_3, y3 * y_3, y_3],
[-x4, -y4, -1, 0, 0, 0, x4 * x_4, y4 * x_4, x_4],
[0, 0, 0, -x4, -y4, -1, x4 * y_4, y4 * y_4, y_4]
])
#Calculate SVD
U, D, V = np.linalg.svd(A)
#Get last row of V returned from SVD
h = V [8]
#Create homography matrix
Homography = np.matrix([
[h[0,0], h[0,1], h[0,2]],
[h[0,3], h[0,4], h[0,5]],
[h[0,6], h[0,7], h[0,8]]
])
#Normalize homography
Homography = Homography / Homography[2,2]
#Return matrix
return Homography
def getImageSequence(fn,fastForward =2):
'''Load the video sequence (fn) and proceeds, fastForward number of frames.'''
cap = cv2.VideoCapture(fn)
for t in range(fastForward):
running, imgOrig = cap.read() # Get the first frames
return cap,imgOrig,running
def getLineCoordinates(p1, p2):
"Get integer coordinates between p1 and p2 using Bresenhams algorithm"
" When an image I is given the method also returns the values of I along the line from p1 to p2. p1 and p2 should be within the image I"
" Usage: coordinates=getLineCoordinates((x1,y1),(x2,y2))"
(x1, y1)=p1
x1=int(x1); y1=int(y1)
(x2,y2)=p2
x2 = int(x2);y2=int(y2)
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append([y, x])
else:
points.append([x, y])
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
retPoints = np.array(points)
X = retPoints[:,0];
Y = retPoints[:,1];
return retPoints
class RegionProps:
'''Class used for getting descriptors of contour-based connected components
The main method to use is: CalcContourProperties(contour,properties=[]):
contour: a contours found through cv2.findContours
properties: list of strings specifying which properties should be calculated and returned
The following properties can be specified:
Area: Area within the contour - float
Boundingbox: Bounding box around contour - 4 tuple (topleft.x,topleft.y,width,height)
Length: Length of the contour
Centroid: The center of contour: (x,y)
Moments: Dictionary of moments: see
Perimiter: Permiter of the contour - equivalent to the length
Equivdiameter: sqrt(4*Area/pi)
Extend: Ratio of the area and the area of the bounding box. Expresses how spread out the contour is
Convexhull: Calculates the convex hull of the contour points
IsConvex: boolean value specifying if the set of contour points is convex
Returns: Dictionary with key equal to the property name
Example:
contours, hierarchy = cv2.findContours(I, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
goodContours = []
for cnt in contours:
vals = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull'])
if vals['Area']>100 and vals['Area']<200
goodContours.append(cnt)
'''
def __calcArea(self,m,c):
return cv2.contourArea(c) #,m['m00']
def __calcLength(self,c):
return cv2.arcLength(c, True)
def __calcPerimiter(self,c):
return cv2.arcLength(c,True)
def __calcBoundingBox(self,c):
return cv2.boundingRect(c)
def __calcCentroid(self,m):
if(m['m00']!=0):
retVal = ( m['m10']/m['m00'],m['m01']/m['m00'] )
else:
retVal = (-1,-1)
return retVal
def __calcEquivDiameter(self,contur):
Area = self.__calcArea(m)
return np.sqrt(4*Area/np.pi)
def __calcExtend(self,m,c):
Area = self.__calcArea(m,c)
BoundingBox = self.__calcBoundingBox(c)
return Area/(BoundingBox[2]*BoundingBox[3])
def __calcConvexHull(self,m,c):
#try:
CH = cv2.convexHull(c)
#ConvexArea = cv2.contourArea(CH)
#Area = self.__calcArea(m,c)
#Solidity = Area/ConvexArea
return {'ConvexHull':CH} #{'ConvexHull':CH,'ConvexArea':ConvexArea,'Solidity':Solidity}
#except:
# print "stuff:", type(m), type(c)
def CalcContourProperties(self,contour,properties=[]):
failInInput = False;
propertyList=[]
contourProps={};
for prop in properties:
prop = str(prop).lower()
m = cv2.moments(contour) #Always call moments
if (prop=='area'):
contourProps.update({'Area':self.__calcArea(m,contour)});
elif (prop=="boundingbox"):
contourProps.update({'BoundingBox':self.__calcBoundingBox(contour)});
elif (prop=="length"):
contourProps.update({'Length':self.__calcLength(contour)});
elif (prop=="centroid"):
contourProps.update({'Centroid':self.__calcCentroid(m)});
elif (prop=="moments"):
contourProps.update({'Moments':m});
elif (prop=="perimiter"):
contourProps.update({'Perimiter':self.__calcPermiter(contour)});
elif (prop=="equivdiameter"):
contourProps.update({'EquivDiameter':self.__calcEquiDiameter(m,contour)});
elif (prop=="extend"):
contourProps.update({'Extend':self.__calcExtend(m,contour)});
elif (prop=="convexhull"): #Returns the dictionary
contourProps.update(self.__calcConvexHull(m,contour));
elif (prop=="isConvex"):
contourProps.update({'IsConvex': cv2.isContourConvex(contour)});
elif failInInput:
pass
else:
print "--"*20
print "*** PROPERTY ERROR "+ prop+" DOES NOT EXIST ***"
print "THIS ERROR MESSAGE WILL ONLY BE PRINTED ONCE"
print "--"*20
failInInput = True;
return contourProps
class ROISelector:
def __resetPoints(self):
self.seed_Left_pt = None
self.seed_Right_pt = None
def __init__(self,inputImg):
self.img=inputImg.copy()
self.seed_Left_pt = None
self.seed_Right_pt = None
self.winName ='SELECT AN AREA'
self.help_message = '''This function returns the corners of the selected area as: [(UpperLeftcorner),(LowerRightCorner)]
Use the Right Button to set Upper left hand corner and and the Left Button to set the lower righthand corner.
Click on the image to set the area
Keys:
Enter/SPACE - OK
ESC - exit (Cancel)
'''
def update(self):
if (self.seed_Left_pt is None) | (self.seed_Right_pt is None):
cv2.imshow(self.winName, self.img)
return
flooded = self.img.copy()
cv2.rectangle(flooded, self.seed_Left_pt, self.seed_Right_pt, (0, 0, 255),1)
cv2.imshow(self.winName, flooded)
def onmouse(self, event, x, y, flags, param):
if flags & cv2.EVENT_FLAG_LBUTTON:
self.seed_Left_pt = x, y
# print seed_Left_pt
if flags & cv2.EVENT_FLAG_RBUTTON:
self.seed_Right_pt = x, y
# print seed_Right_pt
self.update()
def setCorners(self):
points=[]
UpLeft=(min(self.seed_Left_pt[0],self.seed_Right_pt[0]),min(self.seed_Left_pt[1],self.seed_Right_pt[1]))
DownRight=(max(self.seed_Left_pt[0],self.seed_Right_pt[0]),max(self.seed_Left_pt[1],self.seed_Right_pt[1]))
points.append(UpLeft)
points.append(DownRight)
return points
def SelectArea(self,winName='SELECT AN AREA',winPos=(400,400)):# This function returns the corners of the selected area as: [(UpLeftcorner),(DownRightCorner)]
self.__resetPoints()
self.winName = winName
print self.help_message
self.update()
cv2.namedWindow(self.winName, cv2.WINDOW_AUTOSIZE )# cv2.WINDOW_AUTOSIZE
cv2.setMouseCallback(self.winName, self.onmouse)
cv2.moveWindow(self.winName, winPos[0],winPos[1])
while True:
ch = cv2.waitKey()
if ch == 27:#Escape
cv2.destroyWindow(self.winName)
return None,False
if ((ch == 13) or (ch==32)): #enter or space key
cv2.destroyWindow(self.winName)
return self.setCorners(),True
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def rotateImage(I, angle):
"Rotate the image, I, angle degrees around the image center"
size = I.shape
image_center = tuple(np.array(size)/2)
rot_mat = cv2.getRotationMatrix2D(image_center[0:2],angle,1)
result = cv2.warpAffine(image, rot_mat,dsize=size[0:2],flags=cv2.INTER_LINEAR)
return result
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, linetype=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), linetype=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
def imWeight(x,c):
return 1-(np.abs(x-c)/float(c))
def HatFunction(I):
imSize =I.shape; m = imSize[0]; n= imSize[1];
weightMatrix= np.array([ [(imWeight(i,n/2)*imWeight(j,m/2)) for i in range(n)] for j in range(m)])
return weightMatrix
def toHomogenious(points):
""" Convert a set of points (dim*n array) to
homogeneous coordinates. """
return vstack((points,ones((1,points.shape[1]))))
def normalizeHomogenious(points):
""" Normalize a collection of points in
homogeneous coordinates so that last row = 1. """
for row in points:
row /= points[-1]
return points
def H_from_points(fp,tp):
""" Find homography H, such that fp is mapped to tp
using the linear DLT method. Points are conditioned automatically. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points (important for numerical reasons)
#--from points
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
T1 = diag([1/maxstd, 1/maxstd, 1])
T1[0][2] = -m[0]/maxstd
T1[1][2] = -m[1]/maxstd
fp = dot(T1,fp)
# --to points--
m = mean(tp[:2], axis=1)
maxstd = max(std(tp[:2], axis=1)) + 1e-9
T2 = diag([1/maxstd, 1/maxstd, 1])
T2[0][2] = -m[0]/maxstd
T2[1][2] = -m[1]/maxstd
tp = dot(T2,tp)
# create matrix for linear method, 2 rows for each correspondence pair
nbr_correspondences = fp.shape[1]
A = zeros((2*nbr_correspondences,9))
for i in range(nbr_correspondences):
A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0, tp[0][i]*fp[0][i],tp[0][i]*fp[1][i],tp[0][i]]
A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1, tp[1][i]*fp[0][i],tp[1][i]*fp[1][i],tp[1][i]]
U,S,V = linalg.svd(A)
H = V[8].reshape((3,3))
# decondition
H = dot(linalg.inv(T2),dot(H,T1)) # normalize and return
return H / H[2,2]
def calibrateCamera(camNum =0,nPoints=5,patternSize=(9,6),saveImage=False):
''' CalibrateCamera captures images from camera (camNum)
The user should press spacebar when the calibration pattern
is in view.
When saveImage is a boolean it indicates whether the images used for calibration should be saved
When it is True the image will be save into a default filename. When saveImage is a string the images
will be saved using the string as the filename.
'''
print('click on the image window and then press the space key to take samples')
cv2.namedWindow("camera",1)
pattern_size=patternSize
n=nPoints #number of images before calibration
#temp=n
calibrated=False
square_size=1
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
camera_matrix = np.zeros((3, 3))
dist_coefs = np.zeros(4)
rvecs=np.zeros((3, 3))
tvecs=np.zeros((3, 3))
obj_points = []
img_points = []
capture = cv2.VideoCapture(camNum)
imgCnt = 0
running = True
while running:
ret, imgOrig =capture.read()
img= imgOrig.copy()
h, w = img.shape[:2]
imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if (calibrated==False):
found,corners=cv2.findChessboardCorners(imgGray, pattern_size )
ch = cv2.waitKey(1)
if(ch==27): #ESC
running = False
found = False
d = False
return (calibrated,None,None,None)
if (found!=0)&(n>0):
cv2.drawChessboardCorners(img, pattern_size, corners,found)
if ((ch == 13) or (ch==32)): #enter or space key :
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(imgGray, corners, (5, 5), (-1, -1), term)
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
n=n-1
imgCnt=imgCnt+1;
print('sample %s taken')%(imgCnt)
if(saveImage!=False):
if(saveImage==True):
fileName = 'CalibrationImage'+str(imgCnt)+".jpg"
else:
fileName =saveImage+str(imgCnt)+".jpg"
print("saving Image " + fileName)
cv2.imwrite(fileName,imgOrig)
if n==0:
# print( img_points)
# print(obj_points)
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h),camera_matrix,dist_coefs,flags = 0)
# print "RMS:", rms
print "camera matrix:\n", camera_matrix
print "distortion coefficients: ", dist_coefs
calibrated=True
return (calibrated, camera_matrix, dist_coefs,rms)
elif(found==0)&(n>0):
print("chessboard not found")
#if (calibrated):
# img=cv2.undistort(img, camera_matrix, dist_coefs )
# found,corners=cv2.findChessboardCorners(imgGray, pattern_size )
# if (found!=0):
# cv2.drawChessboardCorners(img, pattern_size, corners,found)
cv2.imshow("camera", img)
class Camera:
""" Class for representing pin-hole cameras. """
def __init__(self,P):
""" Initialize P = K[R|t] camera model. """
self.P = P
self.K = None # calibration matrix
self.R = None # rotation
self.t = None # translation
self.c = None # camera center
def project(self,X):
""" Project points in X (4*n array) and normalize coordinates. """
x = dot(self.P,X)
for i in range(3):
x[i] /= x[2]
return x
def factor(self):
""" Factorize the camera matrix into K,R,t as P = K[R|t]. """
# factor first 3*3 part
K,R = linalg.rq(self.P[:,:3])
# make diagonal of K positive
T = diag(sign(diag(K)))
self.K = dot(K,T)
self.R = dot(T,R) # T is its own inverse
self.t = dot(linalg.inv(self.K),self.P[:,3])
return self.K, self.R, self.t
def center(self):
""" Compute and return the camera center. """
if self.c is not None:
return self.c
else:
# compute c by factoring
self.factor()
self.c = -dot(self.R.T,self.t)
return self.c
def calibrate_from_points(x1,x2):
return self.K
def simple_calibrate(a,b):
return self.K
# helper functions
def rotation_matrix(a):
""" Creates a 3D rotation matrix for rotation
around the axis of the vector a. """
R = eye(4)
R[:3,:3] = linalg.expm([[0,-a[2],a[1]],[a[2],0,-a[0]],[-a[1],a[0],0]])
return R
def rq(A):
from scipy.linalg import qr
Q,R = qr(flipud(A).T)
R = flipud(R.T)
Q = Q.T
return R[:,::-1],Q[::-1,:]
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/tools/Utils.py",
"copies": "1",
"size": "25106",
"license": "apache-2.0",
"hash": -7882189912306916000,
"line_mean": 31.5207253886,
"line_max": 162,
"alpha_frac": 0.5548872779,
"autogenerated": false,
"ratio": 3.0914911956655584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41463784735655584,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
import cv2
import numpy as np
import pylab
from pylab import *
import matplotlib as mpl
import math
''' This module contains sets of functions useful for basic image analysis and should be useful in the SIGB course.
Written and Assembled (2012,2013) by Dan Witzner Hansen, IT University.
'''
def getCircleSamples(center=(0,0),radius=1,nPoints=30):
''' Samples a circle with center center = (x,y) , radius =1 and in nPoints on the circle.
Returns an array of a tuple containing the points (x,y) on the circle and the curve gradient in the point (dx,dy)
Notice the gradient (dx,dy) has unit length'''
s = np.linspace(0, 2*math.pi, nPoints)
#points
P = [(radius*np.cos(t)+center[0], radius*np.sin(t)+center[1],np.cos(t),np.sin(t) ) for t in s ]
return P
def getImageSequence(fn,fastForward =2):
'''Load the video sequence (fn) and proceeds, fastForward number of frames.'''
cap = cv2.VideoCapture(fn)
for t in range(fastForward):
running, imgOrig = cap.read() # Get the first frames
return cap,imgOrig,running
def getLineCoordinates(p1, p2):
"Get integer coordinates between p1 and p2 using Bresenhams algorithm"
" When an image I is given the method also returns the values of I along the line from p1 to p2. p1 and p2 should be within the image I"
" Usage: coordinates=getLineCoordinates((x1,y1),(x2,y2))"
(x1, y1)=p1
x1=int(x1); y1=int(y1)
(x2,y2)=p2
x2 = int(x2);y2=int(y2)
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append([y, x])
else:
points.append([x, y])
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
retPoints = np.array(points)
X = retPoints[:,0];
Y = retPoints[:,1];
return retPoints
class RegionProps:
'''Class used for getting descriptors of contour-based connected components
The main method to use is: CalcContourProperties(contour,properties=[]):
contour: a contours found through cv2.findContours
properties: list of strings specifying which properties should be calculated and returned
The following properties can be specified:
Area: Area within the contour - float
Boundingbox: Bounding box around contour - 4 tuple (topleft.x,topleft.y,width,height)
Length: Length of the contour
Centroid: The center of contour: (x,y)
Moments: Dictionary of moments: see
Perimiter: Permiter of the contour - equivalent to the length
Equivdiameter: sqrt(4*Area/pi)
Extend: Ratio of the area and the area of the bounding box. Expresses how spread out the contour is
Convexhull: Calculates the convex hull of the contour points
IsConvex: boolean value specifying if the set of contour points is convex
Returns: Dictionary with key equal to the property name
Example:
contours, hierarchy = cv2.findContours(I, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
goodContours = []
for cnt in contours:
vals = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull'])
if vals['Area']>100 and vals['Area']<200
goodContours.append(cnt)
'''
def __calcArea(self,m,c):
return cv2.contourArea(c) #,m['m00']
def __calcLength(self,c):
return cv2.arcLength(c, True)
def __calcPerimiter(self,c):
return cv2.arcLength(c,True)
def __calcBoundingBox(self,c):
return cv2.boundingRect(c)
def __calcCentroid(self,m):
if(m['m00']!=0):
retVal = ( m['m10']/m['m00'],m['m01']/m['m00'] )
else:
retVal = (-1,-1)
return retVal
def __calcEquivDiameter(self,contur):
Area = self.__calcArea(m)
return np.sqrt(4*Area/np.pi)
def __calcExtend(self,m,c):
Area = self.__calcArea(m,c)
BoundingBox = self.__calcBoundingBox(c)
return Area/(BoundingBox[2]*BoundingBox[3])
def __calcConvexHull(self,m,c):
#try:
CH = cv2.convexHull(c)
#ConvexArea = cv2.contourArea(CH)
#Area = self.__calcArea(m,c)
#Solidity = Area/ConvexArea
return {'ConvexHull':CH} #{'ConvexHull':CH,'ConvexArea':ConvexArea,'Solidity':Solidity}
#except:
# print "stuff:", type(m), type(c)
def CalcContourProperties(self,contour,properties=[]):
failInInput = False;
propertyList=[]
contourProps={};
for prop in properties:
prop = str(prop).lower()
m = cv2.moments(contour) #Always call moments
if (prop=='area'):
contourProps.update({'Area':self.__calcArea(m,contour)});
elif (prop=="boundingbox"):
contourProps.update({'BoundingBox':self.__calcBoundingBox(contour)});
elif (prop=="length"):
contourProps.update({'Length':self.__calcLength(contour)});
elif (prop=="centroid"):
contourProps.update({'Centroid':self.__calcCentroid(m)});
elif (prop=="moments"):
contourProps.update({'Moments':m});
elif (prop=="perimiter"):
contourProps.update({'Perimiter':self.__calcPerimiter(contour)});
elif (prop=="equivdiameter"):
contourProps.update({'EquivDiameter':self.__calcEquiDiameter(m,contour)});
elif (prop=="extend"):
contourProps.update({'Extend':self.__calcExtend(m,contour)});
elif (prop=="convexhull"): #Returns the dictionary
contourProps.update(self.__calcConvexHull(m,contour));
elif (prop=="isConvex"):
contourProps.update({'IsConvex': cv2.isContourConvex(contour)});
elif failInInput:
pass
else:
print "--"*20
print "*** PROPERTY ERROR "+ prop+" DOES NOT EXIST ***"
print "THIS ERROR MESSAGE WILL ONLY BE PRINTED ONCE"
print "--"*20
failInInput = True;
return contourProps
class ROISelector:
def __resetPoints(self):
self.seed_Left_pt = None
self.seed_Right_pt = None
def __init__(self,inputImg):
self.img=inputImg.copy()
self.seed_Left_pt = None
self.seed_Right_pt = None
self.winName ='SELECT AN AREA'
self.help_message = '''This function returns the corners of the selected area as: [(UpperLeftcorner),(LowerRightCorner)]
Use the Right Button to set Upper left hand corner and and the Left Button to set the lower righthand corner.
Click on the image to set the area
Keys:
Enter/SPACE - OK
ESC - exit (Cancel)
'''
def update(self):
if (self.seed_Left_pt is None) | (self.seed_Right_pt is None):
cv2.imshow(self.winName, self.img)
return
flooded = self.img.copy()
cv2.rectangle(flooded, self.seed_Left_pt, self.seed_Right_pt, (0, 0, 255),1)
cv2.imshow(self.winName, flooded)
def onmouse(self, event, x, y, flags, param):
if flags & cv2.EVENT_FLAG_LBUTTON:
self.seed_Left_pt = x, y
# print seed_Left_pt
if flags & cv2.EVENT_FLAG_RBUTTON:
self.seed_Right_pt = x, y
# print seed_Right_pt
self.update()
def setCorners(self):
points=[]
UpLeft=(min(self.seed_Left_pt[0],self.seed_Right_pt[0]),min(self.seed_Left_pt[1],self.seed_Right_pt[1]))
DownRight=(max(self.seed_Left_pt[0],self.seed_Right_pt[0]),max(self.seed_Left_pt[1],self.seed_Right_pt[1]))
points.append(UpLeft)
points.append(DownRight)
return points
def SelectArea(self,winName='SELECT AN AREA',winPos=(400,400)):# This function returns the corners of the selected area as: [(UpLeftcorner),(DownRightCorner)]
self.__resetPoints()
self.winName = winName
print self.help_message
self.update()
cv2.namedWindow(self.winName, cv2.WINDOW_AUTOSIZE )# cv2.WINDOW_AUTOSIZE
cv2.setMouseCallback(self.winName, self.onmouse)
cv2.cv.MoveWindow(self.winName, winPos[0],winPos[1])
while True:
ch = cv2.waitKey()
if ch == 27:#Escape
cv2.destroyWindow(self.winName)
return None,False
break
if ((ch == 13) or (ch==32)): #enter or space key
cv2.destroyWindow(self.winName)
return self.setCorners(),True
break
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def rotateImage(I, angle):
"Rotate the image, I, angle degrees around the image center"
size = I.shape
image_center = tuple(np.array(size)/2)
rot_mat = cv2.getRotationMatrix2D(image_center[0:2],angle,1)
result = cv2.warpAffine(image, rot_mat,dsize=size[0:2],flags=cv2.INTER_LINEAR)
return result
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, linetype=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), linetype=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
def toHomogenious(points):
""" Convert a set of points (dim*n array) to
homogeneous coordinates. """
return vstack((points,ones((1,points.shape[1]))))
def normalizeHomogenious(points):
""" Normalize a collection of points in
homogeneous coordinates so that last row = 1. """
for row in points:
row /= points[-1]
return points
def H_from_points(fp,tp):
""" Find homography H, such that fp is mapped to tp
using the linear DLT method. Points are conditioned automatically. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points (important for numerical reasons)
#--from points
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
T1 = diag([1/maxstd, 1/maxstd, 1])
T1[0][2] = -m[0]/maxstd
T1[1][2] = -m[1]/maxstd
fp = dot(T1,fp)
# --to points--
m = mean(tp[:2], axis=1)
maxstd = max(std(tp[:2], axis=1)) + 1e-9
T2 = diag([1/maxstd, 1/maxstd, 1])
T2[0][2] = -m[0]/maxstd
T2[1][2] = -m[1]/maxstd
tp = dot(T2,tp)
# create matrix for linear method, 2 rows for each correspondence pair
nbr_correspondences = fp.shape[1]
A = zeros((2*nbr_correspondences,9))
for i in range(nbr_correspondences):
A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0, tp[0][i]*fp[0][i],tp[0][i]*fp [1][i],tp[0][i]]
A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1, tp[1][i]*fp[0][i],tp[1][i]*fp [1][i],tp[1][i]]
U,S,V = linalg.svd(A)
H = V[8].reshape((3,3))
# decondition
H = dot(linalg.inv(T2),dot(H,T1)) # normalize and return
return H / H[2,2]
def calibrateCamera(camNum =0,nPoints=5,patternSize=(9,6)):
''' CalibrateCamera captures images from camera (camNum)
The user should press spacebar when the calibration pattern
is in view.
'''
print('click on the image window and then press space key to take some samples')
cv2.namedWindow("camera",1)
pattern_size=patternSize
n=nPoints #number of images before calibration
#temp=n
calibrated=False
square_size=1
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
camera_matrix = np.zeros((3, 3))
dist_coefs = np.zeros(4)
rvecs=np.zeros((3, 3))
tvecs=np.zeros((3, 3))
obj_points = []
img_points = []
capture = cv2.VideoCapture(camNum)
imgCnt = 0
running = True
while running:
ret, img =capture.read()
h, w = img.shape[:2]
imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if (calibrated==False):
found,corners=cv2.findChessboardCorners(imgGray, pattern_size )
ch = cv2.waitKey(1)
if(ch==27): #ESC
running = False
found = False
calibrated = False
return (calibrated,None,None,None)
if (found!=0)&(n>0):
cv2.drawChessboardCorners(img, pattern_size, corners,found)
if ((ch == 13) or (ch==32)): #enter or space key :
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(imgGray, corners, (5, 5), (-1, -1), term)
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
n=n-1
imgCnt=imgCnt+1;
print('sample %s taken')%(imgCnt)
if n==0:
# print( img_points)
# print(obj_points)
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h),camera_matrix,dist_coefs,flags = 0)
# print "RMS:", rms
print "camera matrix:\n", camera_matrix
print "distortion coefficients: ", dist_coefs
calibrated=True
return (calibrated, camera_matrix, dist_coefs,rms)
elif(found==0)&(n>0):
print("chessboard not found")
#if (calibrated):
# img=cv2.undistort(img, camera_matrix, dist_coefs )
# found,corners=cv2.findChessboardCorners(imgGray, pattern_size )
# if (found!=0):
# cv2.drawChessboardCorners(img, pattern_size, corners,found)
cv2.imshow("camera", img)
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Eye-Tracking-System/bergar/com.bergar.simonsen.eyetracker/SIGBTools.py",
"copies": "1",
"size": "17102",
"license": "apache-2.0",
"hash": -3763092544119162000,
"line_mean": 33.9020408163,
"line_max": 162,
"alpha_frac": 0.5687054146,
"autogenerated": false,
"ratio": 3.1859165424739193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.917602868390422,
"avg_score": 0.015718654633939698,
"num_lines": 490
} |
__author__ = 'bs'
import numpy as np
from tools import Utils
import cv2
from tools.Utils import Camera
def calibrationExample():
camNum =0 # The number of the camera to calibrate
nPoints = 5 # number of images used for the calibration (space presses)
patternSize=(9,6) #size of the calibration pattern
saveImage = False
calibrated, camera_matrix,dist_coefs,rms = Utils.calibrateCamera(camNum,nPoints,patternSize,saveImage)
K = camera_matrix
cam1 =Camera( np.hstack((K,np.dot(K,np.array([[0],[0],[-1]])) )) )
cam1.factor()
#Factor projection matrix into intrinsic and extrinsic parameters
print "K=", cam1.K
print "R=", cam1.R
print "t", cam1.t
if (calibrated):
capture = cv2.VideoCapture(camNum)
running = True
while running:
running, img =capture.read()
imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ch = cv2.waitKey(1)
if(ch==27) or (ch==ord('q')): #ESC
running = False
img=cv2.undistort(img, camera_matrix, dist_coefs )
found,corners=cv2.findChessboardCorners(imgGray, patternSize )
if (found!=0):
cv2.drawChessboardCorners(img, patternSize, corners,found)
cv2.imshow("Calibrated",img)
calibrationExample()
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/tools/calibrationExample.py",
"copies": "1",
"size": "1340",
"license": "apache-2.0",
"hash": 7650915450502721000,
"line_mean": 34.2631578947,
"line_max": 106,
"alpha_frac": 0.6231343284,
"autogenerated": false,
"ratio": 3.462532299741602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9503824943887329,
"avg_score": 0.016368336850854568,
"num_lines": 38
} |
__author__ = 'bs'
import numpy as np
import cv2
def getRectangleLowerCenter(pt1, pt2):
deltax = abs(pt2[0] - pt1[0])
centerx = pt1[0] + deltax / 2
return centerx, pt2[1]
def angle_cos(p0, p1, p2):
d1, d2 = p0-p1, p2-p1
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def findSquares(img,minSize = 2000,maxAngle = 1):
""" findSquares intend to locate rectangle in the image of minimum area, minSize, and maximum angle, maxAngle, between
sides"""
squares = []
contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.08*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > minSize and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < maxAngle:
squares.append(cnt)
return squares
def DetectPlaneObject(I,minSize=1000):
""" A simple attempt to detect rectangular
color regions in the image"""
HSV = cv2.cvtColor(I, cv2.COLOR_BGR2HSV)
h = HSV[:,:,0].astype('uint8')
s = HSV[:,:,1].astype('uint8')
v = HSV[:,:,2].astype('uint8')
b = I[:,:,0].astype('uint8')
g = I[:,:,1].astype('uint8')
r = I[:,:,2].astype('uint8')
# use red channel for detection.
s = (255*(r>230)).astype('uint8')
iShow = cv2.cvtColor(s, cv2.COLOR_GRAY2BGR)
cv2.imshow('ColorDetection',iShow)
squares = findSquares(s,minSize)
return squares | {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/tools/Calc.py",
"copies": "1",
"size": "1640",
"license": "apache-2.0",
"hash": -8069965697174358000,
"line_mean": 33.1875,
"line_max": 122,
"alpha_frac": 0.6036585366,
"autogenerated": false,
"ratio": 2.774957698815567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3878616235415567,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bs'
# Sequence & image files
PREFIX = "../"
BOOK = PREFIX + "BOOK/"
GRID_VIDEOS = PREFIX + "GridVideos/"
GROUND_FLOOR_DATA = PREFIX + "GroundFloorData/"
IMAGES = PREFIX + "Images/"
# Book files
BOOK_1 = BOOK + "Seq1_scene.mp4"
BOOK_2 = BOOK + "Seq2_scene.mp4"
BOOK_3 = BOOK + "Seq3_scene.mp4"
BOOK_4 = BOOK + "Seq4_scene.mp4"
BOOK_5 = BOOK + "Seq5_scene.mp4"
BOOK_6 = BOOK + "SunClipDS.avi"
# Grid videos
GRID_1 = GRID_VIDEOS + "grid1.mp4"
GRID_2 = GRID_VIDEOS + "grid2.mp4"
GRID_3 = GRID_VIDEOS + "grid3.mp4"
GRID_4 = GRID_VIDEOS + "grid4.mp4"
GRID_5 = GRID_VIDEOS + "Grid_converted.mp4"
# Ground floor data
GROUND_FLOOR_VIDEO = GROUND_FLOOR_DATA + "SunClipDS.avi"
TRACKING_DATA = GROUND_FLOOR_DATA + "trackingdata.dat"
# Image files
CALIBRATION_PATTERN = IMAGES + "CalibrationPattern.png"
ITU_LOGO = IMAGES + "ITULogo.jpg"
ITU_MAP = IMAGES + "ITUMap.bmp"
TEXTURE = IMAGES + "texture.jpg"
# Folder for saving files
SAVE_FOLDER = PREFIX + "SavedFiles/"
OUTPUT_IMAGE = "output_image"
OUTPUT_MATRIX = "homography"
JPG_EXTENSION = ".jpg"
NPY_EXTENSION = ".npy"
# Max files
MAX_FILES = 100
# Global variables
DELAY = 1
SAVE_MAP_IMAGE = False
SAVE_H_M_G = False
# Testing
PML_AUTO_POINTS = True
| {
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"path": "Projective-Geometry/bergar/com.bergar.simonsen.homography/config/Const.py",
"copies": "1",
"size": "1525",
"license": "apache-2.0",
"hash": 5504885270041008000,
"line_mean": 28.3269230769,
"line_max": 60,
"alpha_frac": 0.5436065574,
"autogenerated": false,
"ratio": 2.850467289719626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8817540251675449,
"avg_score": 0.01530671908883553,
"num_lines": 52
} |
__author__ = 'bsoer'
from crypto.algorithms.algorithminterface import AlgorithmInterface
from tools.argparcer import ArgParcer
import tools.rsatools as RSATools
import math
import sys
class PureRSA(AlgorithmInterface):
n = None
totient = None
e = None
d = None
publicKey = None
privateKey = None
other_publicKey = None
other_e = 0
other_n = 0
def __init__(self, arguments):
prime1 = ArgParcer.getValue(arguments, "-p1")
prime2 = ArgParcer.getValue(arguments, "-p2")
if prime1 == "" or prime2 == "":
raise AttributeError("Two Prime Number Parameters -p1 and -p2 are required to use PureRSA Encryption")
else:
intPrime1 = int(prime1)
intPrime2 = int(prime2)
# calculate all components
self.n = intPrime1 * intPrime2
self.totient = (intPrime1 - 1)*(intPrime2-1)
if intPrime1 > intPrime2:
self.e = RSATools.findCoPrimeToTotient(self.totient, intPrime1)
else:
self.e = RSATools.findCoPrimeToTotient(self.totient, intPrime2)
self.d = RSATools.findDFromTotientAndE(self.totient, self.e)
# e and n make our public key
# were going to arbitrarily format our public key
# <eLength>:<eValue><nValue>
strE = str(self.e)
strELen = len(strE)
self.publicKey = str(strELen) + ":" + strE + str(self.n)
# d and n make our private key
strD = str(self.d)
strDLen = len(strD)
self.privateKey = str(strDLen) + ":" + strD + str(self.n)
def sendFirstMessage(self):
return self.publicKey.encode()
def receiveFirstMessage(self, firstMessage):
self.other_publicKey = firstMessage.decode()
colonIndex = self.other_publicKey.index(':')
strELen = self.other_publicKey[0:colonIndex]
eLen = int(strELen)
strE = self.other_publicKey[colonIndex+1:colonIndex + 1 + eLen]
self.other_e = int(strE)
strN = self.other_publicKey[colonIndex+1+eLen:]
self.other_n = int(strN)
self.logger.debug("Received Public Key Values: N " + str(self.other_n) + ", E " + str(self.other_e))
return False # return true for debug to display public key
def encryptString(self, unencryptedMessage):
plaintext_message_seg_length = int(math.floor(math.log(float(self.other_n), 2)))
encrypted_message_seg_length = int(math.ceil(math.log(float(self.other_n), 2)))
self.logger.debug("Based On Key Parameters, the maximum message lengths are as follows: Plaintext: "
+ str(plaintext_message_seg_length) + " Ciphertext: " + str(encrypted_message_seg_length))
# convert the message to all binary bits - padd out to make sure they all are 8 bits long for the character
binaryUnencryptedMessage = ''.join(format(ord(x), '08b') for x in unencryptedMessage)
self.logger.debug(binaryUnencryptedMessage)
# post pad the string to get an even number
while len(binaryUnencryptedMessage) % plaintext_message_seg_length != 0:
binaryUnencryptedMessage += '0'
self.logger.debug(binaryUnencryptedMessage)
# split it up into segments of plaintext_message_seg_length
unencryptedMessageSegments = list()
for i in range(0, len(binaryUnencryptedMessage), plaintext_message_seg_length):
unencryptedMessageSegments.append(binaryUnencryptedMessage[i: i + plaintext_message_seg_length])
self.logger.debug(unencryptedMessageSegments)
# encrypt each segment using RSA
encryptedMessageSegments = list()
for i in unencryptedMessageSegments:
segmentInt = int(i, 2) # converts string to int, interpreting it as in base 2
encryptedSegmentInt = (segmentInt ** self.other_e) % self.other_n
encryptedSegmentBinary = format(encryptedSegmentInt, '0' + str(encrypted_message_seg_length) + 'b')
encryptedMessageSegments.append(encryptedSegmentBinary)
self.logger.debug(encryptedMessageSegments)
encryptedMessageBinaryString = ''.join(encryptedMessageSegments)
self.logger.debug(encryptedMessageBinaryString)
encryptedMessageInt = int(encryptedMessageBinaryString, 2)
self.logger.debug(encryptedMessageInt)
self.logger.debug(bin(encryptedMessageInt))
encryptedMessage = encryptedMessageInt.to_bytes(byteorder=sys.byteorder,
length=math.ceil(len(encryptedMessageBinaryString) / 8))
return encryptedMessage
def decryptString(self, encryptedMessage):
plaintext_message_seg_length = int(math.floor(math.log(self.n, 2)))
encrypted_message_seg_length = int(math.ceil(math.log(self.n, 2)))
self.logger.debug("Based On Key Parameters, the maximum message lengths are as follows: Plaintext: "
+ str(plaintext_message_seg_length) + " Ciphertext: " + str(encrypted_message_seg_length))
number = int.from_bytes(encryptedMessage, byteorder=sys.byteorder, signed=False)
self.logger.debug(number)
binaryEncryptedMessage = str(bin(number))[2:]
self.logger.debug(binaryEncryptedMessage)
while len(binaryEncryptedMessage) % encrypted_message_seg_length != 0:
binaryEncryptedMessage = '0' + binaryEncryptedMessage
encryptedMessageSegments = list()
for i in range(0, len(binaryEncryptedMessage), encrypted_message_seg_length):
encryptedMessageSegments.append(binaryEncryptedMessage[i: i + encrypted_message_seg_length])
self.logger.debug(encryptedMessageSegments)
unencryptedSegments = list()
for i in encryptedMessageSegments:
segmentInt = int(i, 2) # converts string to int, interpreting it as in base 2
unencryptedSegmentInt = int((segmentInt ** self.d) % self.n)
unencryptedSegmentBinary = format(unencryptedSegmentInt, '0' + str(plaintext_message_seg_length) + 'b')
unencryptedSegments.append(unencryptedSegmentBinary)
self.logger.debug(unencryptedSegments)
joinedSegments = ''.join(unencryptedSegments)
self.logger.debug(joinedSegments)
letters = list()
for i in range(0, len(joinedSegments), 8):
letters.append(joinedSegments[i: i + 8])
self.logger.debug(letters)
plainMessage = ""
for letter in letters:
letterInt = int(letter, 2)
character = chr(letterInt)
plainMessage += character
return plainMessage
| {
"repo_name": "bensoer/pychat",
"path": "crypto/algorithms/purersa.py",
"copies": "1",
"size": "6730",
"license": "mit",
"hash": 7783865099114733000,
"line_mean": 40.0365853659,
"line_max": 116,
"alpha_frac": 0.6494799406,
"autogenerated": false,
"ratio": 3.9449003516998826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094380292299883,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buckbaskin'
from graphics import *
from mathiz import locate, size, dist_sort
from math import sin, cos
from random import uniform
from visualization.NetViz import *
from leap_motion.location_sim import *
from leap_motion.simple_motion import *
def main():
win = GraphWin("My 3D view", 1920, 1080)
shift_x = 1920 / 2
shift_y = 1080 / 2
m2pix = 1920 / 8
stopper = 8
screen_distance = 1
# (start_x, start_y, start_theta, start_v, start_w, start_pitch, start_roll)
lsim = LeapSimulator(4, 4, -2.356, -.735, 0.0, 0.0, 0.0)
controller = Leap.Controller()
# self , x, y, z, theta, pitch, screen_distance, sim_hook
v = Viewer(screen_distance, win, shift_x, shift_y, m2pix, lsim)
obs = []
for i in range(0, 50):
obs.append(ViewNode(rand_location_gen(stopper), rand_location_gen(stopper), rand_location_gen(stopper), 0.1, v))
for obj in obs:
#print 'object '+str(obj)
pass
for obj in obs:
obj.draw(win)
print 'create vn'
vn = ViewNode(rand_location_gen(stopper), rand_location_gen(stopper), rand_location_gen(stopper), 0.1, v)
vn.draw(win)
print 'draw vn '+str(vn)
x_axis = Line(Point(.4 * (2 * shift_x), shift_y), Point(.6 * (2 * shift_x), shift_y))
x_scale = Line(Point(shift_x + m2pix, shift_y - 20), Point(shift_x + m2pix, shift_y + 20))
y_axis = Line(Point(shift_x, .4 * (2 * shift_y)), Point(shift_x, .6 * (2 * shift_y)))
x_axis.setOutline('black')
x_scale.setOutline('black')
y_axis.setOutline('black')
x_axis.draw(win)
x_scale.draw(win)
y_axis.draw(win)
print 'animate'
### Update and run forward ###
'''
while (True):
try:
print 'update'
lsim.update(controller)
v.update()
for obj in obs:
obj.update(win)
except Exception as e:
print str(e)
print 'error'
raise e
time.sleep(.01)'''
win.getMouse() # Pause to view result
win.close() # Close window when done
if __name__ == '__main__':
main() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "visualization/graphClass3D.py",
"copies": "1",
"size": "2122",
"license": "mit",
"hash": -7192740826703281000,
"line_mean": 25.5375,
"line_max": 120,
"alpha_frac": 0.5815268615,
"autogenerated": false,
"ratio": 3.066473988439306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4148000849939306,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buckbaskin'
from graphics import *
from mathiz import locate, size, dist_sort
from math import sin, cos
from random import uniform
import copy
### UTILS ###
def rand_location_gen( stopper ):
return uniform(-stopper/2,stopper/2)
class ViewNode(object):
def __init__(self, x, y, z, radius, viewer):
self.x = x
self.y = y
self.z = z
self.radius = radius
self.viewer = viewer
self.xy = locate(self.location_tup(), viewer.location_tup(), viewer.theta, viewer.pitch, viewer.sd)
self.apparent_size = size(self.radius, self.location_tup(), viewer.location_tup(), viewer.theta, viewer.pitch, viewer.sd)
self.shape = Circle(Point(viewer.shift_x+viewer.m2pix*x,viewer.shift_y+viewer.m2pix*y),radius)
def __str__(self):
return '[VN:'+str(self.xy)+' sz:'+str(self.apparent_size)+']'
def shape(self):
return self.shape
def draw(self, window):
self.shape.draw(window)
def update(self, window):
old_xy = copy.copy(self.xy)
self.xy = locate(self.location_tup(), self.viewer.location_tup(), self.viewer.theta,
self.viewer.pitch, self.viewer.sd)
dx = (self.xy[0]-old_xy[0] , self.xy[1]-old_xy[1])
self.apparent_size = size(self.radius, self.location_tup(), self.viewer.location_tup(),
self.viewer.theta, self.viewer.pitch, self.viewer.sd)
self.shape.radius = self.apparent_size
self.shape.move(self.viewer.m2pix*dx[0],self.viewer.m2pix*dx[1])
def location_tup(self):
return (self.x, self.y, self.z)
class Viewer(object):
def __init__(self , screen_distance, window, shift_x, shift_y, m2pix, sim_hook):
self.x = sim_hook.state[0]
self.y = sim_hook.state[1]
self.z = 0.0
self.theta = sim_hook.state[2]
self.pitch = sim_hook.state[3]
self.sd = screen_distance
self.simulator = sim_hook
self.window = window
self.shift_x = shift_x
self.shift_y = shift_y
self.m2pix = m2pix
def location_tup(self):
return (self.x, self.y, self.z)
def update(self):
self.x = self.simulator.state[0]
self.y = self.simulator.state[1]
self.z = 0.0
self.theta = self.simulator.state[2]
self.pitch = self.simulator.state[3] | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "visualization/NetViz.py",
"copies": "1",
"size": "2382",
"license": "mit",
"hash": -6835856739101909000,
"line_mean": 33.0428571429,
"line_max": 129,
"alpha_frac": 0.6015952981,
"autogenerated": false,
"ratio": 3.2016129032258065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9273426036986728,
"avg_score": 0.005956432867815623,
"num_lines": 70
} |
__author__ = 'buckbaskin'
from graphics import *
from mathiz import locate, size, dist_sort
from math import sin, cos
from random import uniform
def main():
win = GraphWin("My 3D view", 1920, 1080)
stopper = 8
points = [None]*50
for i in range(0,len(points),1):
points[i] = tuple([uniform(-stopper/2,stopper/2),uniform(-stopper/2,stopper/2),uniform(-stopper/2,stopper/2)])
print 'points: '+str(points)
r = (3,3,3)
theta = -2.356
pitch = -0.735
r1 = (sin(theta)*cos(pitch),cos(theta)*cos(pitch),sin(pitch))
screen_distance = 1
shift_x = 1920/2
shift_y = 1080/2
m2pix = 1920/8
scalar = 10000
points = dist_sort(points, r, r1)
locs = [locate(p,r,theta,pitch,screen_distance) for p in points]
for loc in locs:
loc = (m2pix*scalar*loc[0]+shift_x,m2pix*scalar*loc[1]+shift_y)
print 'locs: '+str(locs)
actual_size = 0.10
loc = locate((2,-1,0.25),r,theta,pitch,screen_distance)
#print 'loc: '+str(loc)
depth_y = Line(Point(shift_x+m2pix,shift_y),
Point(m2pix*loc[0]+shift_x,
m2pix*loc[1]+shift_y))
print 'list comprehension'
# S = [x**2 for x in range(10)]
circles = [Circle(Point(shift_x+m2pix*locate(point,r,theta,pitch,screen_distance)[0],
shift_y+m2pix*locate(point,r,theta,pitch,screen_distance)[1]),
m2pix*size(actual_size,point,r,theta,pitch,screen_distance)) for point in points]
print 'list comprehension complete'
count = 0
for c in circles:
c.setFill('orange')
c.draw(win)
count = count + 1
print count
print 'done with circles loop'
x_axis = Line(Point(.4*(2*shift_x),shift_y) , Point(.6*(2*shift_x),shift_y))
x_scale = Line(Point(shift_x+m2pix,shift_y-20), Point(shift_x+m2pix,shift_y+20))
y_axis = Line(Point(shift_x,.4*(2*shift_y)) , Point(shift_x,.6*(2*shift_y)))
x_axis.setOutline('black')
x_scale.setOutline('black')
y_axis.setOutline('black')
depth_y.setOutline('black')
x_axis.draw(win)
x_scale.draw(win)
y_axis.draw(win)
depth_y.draw(win)
win.getMouse() # Pause to view result
win.close() # Close window when done
if __name__=='__main__':
main() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "visualization/graph3D.py",
"copies": "1",
"size": "2287",
"license": "mit",
"hash": -7222961931492593000,
"line_mean": 30.7777777778,
"line_max": 118,
"alpha_frac": 0.6003498032,
"autogenerated": false,
"ratio": 2.905972045743329,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8929535296273983,
"avg_score": 0.015357310533869082,
"num_lines": 72
} |
__author__ = 'buckbaskin'
from twitter import *
import os
class TheTwitter(object):
def __init__(self,smile_file):
self.consumer_key = smile_file.readline()[:-1]
self.consumer_secret = smile_file.readline()
if (os.path.isfile('..\\simile2.smile'))==True:
print 'use shifted file'
with open('..\\simile2.smile') as f:
token = f.readline()[:-1]
token_key = f.readline()[:-1]
elif (os.path.isfile('simile2.smile') == False):
print('oauth_dance')
token,token_key = oauth_dance('The Insight Project',self.consumer_key,self.consumer_secret,token_filename='..\\simile2.smile')
else:
print 'use existing file'
with open('simile2.smile','r') as f:
token = f.readline()[:-1]
token_key = f.readline()[:-1]
#print '|'+token+'|'
#print '|'+token_key+'|'
#print 'making the twitter_object'
self.twitter_search = Twitter(auth=OAuth(token, token_key,
self.consumer_key, self.consumer_secret))
self.twitter_stream = TwitterStream(auth=OAuth(token, token_key, self.consumer_key, self.consumer_secret))
search_result = self.twitter_search.search.tweets(q='test')
#print 'twitter search test'
#print search_result['statuses'][0]['text']
stream_result = self.twitter_stream.statuses.sample(language='en')
#print 'twitter stream test'
for tweet in stream_result:
#print tweet
break
print 'successful twitter creation test'
def tweet_stream(self, mode='sample', lang='en'):
if mode == 'firehose':
stream_result = self.twitter_stream.statuses.firehose(lanugage=lang)
else:
stream_result = self.twitter_stream.statuses.sample(language=lang)
return stream_result
def tweet_search(self, query = 'test', lang= 'en'):
return self.twitter_search.search.tweets(q=query,language=lang)
def user_search(self, query):
return self.twitter_search.search.tweets(q=query)
def friend_lookup(self, usr_id):
print 'friend lookup for |'+str(usr_id)+'|'
a = self.twitter_search.friends.ids(user_id=usr_id)
print 'a a a '+str(a)
print 'friend lookup complete for |'+str(usr_id)+'|'
return a
def user_lookup_id(self, user_id):
'''returns a twitter user object, lookup 1 by id'''
print 'lookup by id '+str(user_id)
return self.twitter_search.users.lookup(user_id=user_id)
def user_lookup_name(self, user_name):
print 'lookup by name '+str(user_name)
'''returns a twitter user object, lookup 1 by screen_name'''
return self.twitter_search.users.lookup(screen_name=user_name)
def bfs(self, root_id):
b = BreadthFirst(root_id)
return b
### API ###
def live(self):
'''Use this for streaming live generic data'''
return self.twitter_stream.statuses.sample(language='en')
def fill(self, user_id):
'''Use this for filling out a network around a user'''
return self.bfs(user_id)
def user(self, user_id):
'''Equivalent to fill. Complete a network around a user'''
return self.fill(user_id)
class BreadthFirst(object):
def __init__(self, node_id):
self.queue = [node_id]
def __iter__(self):
return self
def next(self):
if not self.queue: raise StopIteration
node = self.queue.pop(0) #node id
self.queue += node.children
return node
def test():
t = TheTwitter(open('..\\simile.smile','r'))
if __name__ == '__main__':
test() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "data_collection/Twitter.py",
"copies": "1",
"size": "3775",
"license": "mit",
"hash": -5018149402096177000,
"line_mean": 33.962962963,
"line_max": 138,
"alpha_frac": 0.5864900662,
"autogenerated": false,
"ratio": 3.704612365063788,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4791102431263788,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buckbaskin'
import os, sys, inspect, thread, time
import datetime
import math
from math import cos, sin, tan, atan2
src_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
arch_dir = '../lib/x64'
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir)))
import Leap
class LeapSimulator(object):
def __init__(self, start_x, start_y, start_theta, start_pitch, start_v, start_w, start_roll):
# [ 0 1 2 3 4 5 6 ]
self.state = [start_x, start_y, start_theta, start_pitch, start_v, start_w, start_roll]
#controller
self.last_frame = None
self.this_frame = None
self.last_v = 0.0
self.last_w = 0.0
self.last_r = 0.0
self.last_time = datetime.datetime.now()
self.this_time = datetime.datetime.now()
def update_frames(self, zero, one):
self.this_frame = zero
self.last_frame = one
def update_cmd(self):
# calculated from frames
if self.this_frame and len(self.this_frame.hands) == 2:
hands = self.this_frame.hands
leftmost = hands.leftmost.palm_position
rightmost = hands.rightmost.palm_position
print 'rightmost '+str(rightmost)
print ' --> x: '+str(rightmost.x)+' y: '+str(rightmost.y)+' z: '+str(rightmost.z)
print 'leftmost '+str(leftmost)
print ' --> x: '+str(rightmost.x)+' y: '+str(rightmost.y)+' z: '+str(rightmost.z)
print 't? '+str(not abs(leftmost.x))
if ((abs(leftmost.x) or abs(leftmost.y) or abs(leftmost.z)) and ( abs(rightmost.x) or abs(rightmost.y) or abs(rightmost.x))):
print str(len(self.this_frame.hands))+ ' hands visible'
print 'L x: '+str(leftmost.x)+' y: '+str(leftmost.y)+' z: '+str(leftmost.z)
print 'R x: '+str(rightmost.x)+' y: '+str(rightmost.y)+' z: '+str(rightmost.z)
print '\n\n'
self.last_v = .0125 * (-rightmost.z-leftmost.z)*.5
#print '|v|'
self.last_w = .4 * (rightmost.z-leftmost.z)*(1.0/self.xzdist(leftmost, rightmost))
if (abs(self.last_v) < .1):
self.last_v = 0.0
if (abs(self.last_w) < .03):
self.last_w = 0.0
#print '|w|'
self.last_r = 0.0
#print '|r|'
else:
self.last_v = 0.0
self.last_w = 0.0
self.last_r = 0.0
#print '|'
print '( v w r ) ( '+str(self.last_v)+' '+str(self.last_w)+' '+str(self.last_r)+' )'
def xzdist(self, l_palm, r_palm):
print 'xzdist'
print 'lpalm '+str(l_palm)
dx = l_palm.x-r_palm.x
print 'dx'
dz = l_palm.z-r_palm.z
print 'dz'
return math.sqrt( dx * dx + dz * dz)
def update_state(self):
self.this_time = datetime.datetime.now()
dt = (self.this_time-self.last_time).total_seconds()
new_s = [None]*7
new_s[0] = self.state[0]+cos(self.state[2])*self.state[4]*dt
new_s[1] = self.state[1]+sin(self.state[2])*self.state[4]*dt
new_s[2] = self.state[2]+self.state[5]*dt
new_s[3] = max(-1, min(1, self.state[3]+self.state[6]*dt)) #bounded to 1 to -1 radians
new_s[4] = self.last_v
new_s[5] = self.last_w
new_s[6] = self.last_r
self.last_time = datetime.datetime.now()
def update(self, controller):
self.update_frames(controller.frame(), controller.frame(1))
self.update_cmd()
self.update_state()
def state(self):
return self.state
class SampleListener(Leap.Listener):
def on_connect(self, controller):
print "Connected"
def on_frame(self, controller):
print 'on frame'
if self.simulator:
self.simulator.this_frame = controller.frame()
self.simulator.last_frame = controller.frame(1)
#print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d" % ...
# (frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools))
def main():
#def __init__(start_x, start_y, start_theta, start_v, start_w, start_pitch, start_roll):
lsim = LeapSimulator(0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0)
controller = Leap.Controller()
while(True):
try:
print 'update'
lsim.update(controller)
except Exception as e:
print str(e)
print 'error'
raise e
time.sleep(.1)
if __name__ == '__main__':
main() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "leap_motion/location_sim.py",
"copies": "1",
"size": "4719",
"license": "mit",
"hash": -4329353356843871700,
"line_mean": 34.7575757576,
"line_max": 137,
"alpha_frac": 0.5308328036,
"autogenerated": false,
"ratio": 3.1251655629139075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9123873831885312,
"avg_score": 0.006424906925718993,
"num_lines": 132
} |
__author__ = 'buckbaskin'
import os, sys, inspect, thread, time
src_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
arch_dir = '../lib/x64'
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir)))
import Leap
class SampleListener(Leap.Listener):
def on_connect(self, controller):
print "Connected"
def on_frame(self, controller):
frame = controller.frame()
print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d" % (
frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools))
def main():
# Create a sample listener and controller
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == '__main__':
main() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "leap_motion/simple_motion.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": -1579335045437132500,
"line_mean": 27.525,
"line_max": 96,
"alpha_frac": 0.6561403509,
"autogenerated": false,
"ratio": 3.7012987012987013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9806541331060727,
"avg_score": 0.010179544227594813,
"num_lines": 40
} |
__author__ = 'buckbaskin'
import time
import twitter
from data_collection.Twitter import TheTwitter
from data_representation.Network import Network
def testBFS(network, length, user_id):
network.add_local_blocking(user_id, length)
def testLive(network, length):
network.add_stream_blocking(length)
if __name__ == '__main__':
length = 10
t = TheTwitter(open('..\\simile.smile','r'))
n = Network(t)
screenname = 'beBaskin'
print 'name: '+screenname
user_id = n.twitter.user_lookup_name(screenname)[0]['id']
print 'id: '+str(user_id)
u_id = 2618802005
print 'id: '+str(u_id)
print 'screen_name: '+str(n.twitter.user_lookup_id(u_id)[0]['screen_name'])
#print 'test: id 564193304536166400 '+n.twitter.user_lookup_id(5641933045361664)[0]
try:
#testBFS(n, length, user_id)
testLive(n, length)
except twitter.TwitterHTTPError as the:
print 'Please wait for 15 minutes due to rate error'
time.sleep(15*60)
print 'And now, what you have all been waiting for....'
| {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "tests/search_test.py",
"copies": "1",
"size": "1062",
"license": "mit",
"hash": -8918694805675430000,
"line_mean": 27.7027027027,
"line_max": 87,
"alpha_frac": 0.6563088512,
"autogenerated": false,
"ratio": 3.1607142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9299717521620128,
"avg_score": 0.003461123058831366,
"num_lines": 37
} |
__author__ = 'buckbaskin'
import twitter
from data_collection.Twitter import TheTwitter
import threading
import os
class Network(object):
def __init__(self, twitter):
self.nodes = dict()# list of nodes (id , Node object)
self.connections = dict() # list of connections (id , connection weight))
self.twitter = twitter
def add_node(self, node, connections_list):
print 'add node'
print 'node node '+str(node)
self.nodes[node.user_id] = node
print 'self.nodes worked'
self.connections[node.user_id] = dict()
print 'enter try catch'
try:
print 'try connections_list as a list'
for connection in connections_list: #connection is a node_id
self.connections[node.user_id][connection.user_id] = 1.0
print 'connections_list as a list'
except:
print 'try connections_list as a dict'
self.connections[node.user_id] = connections_list
print 'connections_list as a dict'
return True
def add_nodes(self, node_list, max_depth):
count = 0
for node in node_list:
print 'node '+str(node['user']['id'])+' added as part of node_list'
try:
print 'trying [u][i] --> '+ str(node['user']['id'])
a = self.make_node(node['user']['id'])
print 'just a for now'
b = self.make_connections(node['user']['id'])
print 'both a and b'
self.add_node(a, b)
print 'add node with user extra'
except twitter.TwitterHTTPError as te:
print 'pass up twitter rate error'
raise te
except:
try:
print 'trying [i]'
self.add_node(self.make_node(node['id']), self.make_connections(node['id']))
print 'add node without user extra'
except:
print 'trying _'
self.add_node(self.make_node(node), self.make_connections(node))
print 'add node plain pain train crane'
count = count+1
if (count == max_depth):
break
print 'return that thing to be true cuz I\'m done'
return True
def lookup_node(self, node_id):
print 'lookup_node '+str(node_id)
try:
return self.nodes[node_id]
except KeyError as ke:
self.add_node(self.make_node(node_id))
return self.lookup_node(node_id)
def get_connections(self, node_id):
return self.connections[node_id]
def help_stream(self, stream_iterator, length):
count = 0
for tweet in stream_iterator:
self.add_node(self.make_node(tweet['id']), self.make_connections(tweet['id']))
count = count + 1
if count == length:
break
def help_network(self, bfs_iterator, length):
count = 0
for tweet in bfs_iterator:
self.add_node(self.make_node(tweet['id']), self.make_connections(tweet['id']))
count = count + 1
if count == length:
break
def make_node(self, user_id):
print 'making a node for id '+str(user_id)
data = self.twitter.user_lookup_id(user_id)
n = Node(user_id, self)
return n
def make_connections(self, user_id):
print 'make connections for '+str(user_id)
print 'user_id goes to screen name --> '+str(self.twitter.user_lookup_id(user_id)[0]['screen_name'])
try:
'friend lookup by user id'
data = self.twitter.friend_lookup(user_id)
data = data['ids']
'friend lookup by user id worked'
except Exception as e:
print '----EXCEPTION type '+e.__class__.__name__+'----\n'+str(e)+'\n----EXCEPTION----'
'friend lookup by screenname'
#if (isinstance(e,twitter.TwitterHttpError)):
# print 'pass up the twitter error, this means I\'m over rate'
raise e
#data = self.twitter.friend_lookup(self.twitter.user_lookup_id(user_id)[0]['screen_name'])['ids']
print 'data data '+str(data)
connect = dict()
print 'made connect dict. start for loop'
for friend in data:
print 'friend friend '+str(friend)
#if (not str(friend) == 'next_cursor_str' and not str(friend) == 'previous_cursor'):
try:
connect[friend['user']['id']] = 1.0
except:
try:
connect[friend['id']] = 1.0
except:
connect[friend] = 1.0
print('return connect')
return connect
### API ###
def add_stream_thread(self, max_depth):
'''use this for continuous adding from stream on a separate thread. Possibly infinite calls? planned'''
t = threading.Thread(target=self.help_stream, args=(self.twitter.live(), max_depth))
t.daemon = True
t.start()
def add_stream_blocking(self, max_depth):
'''use this for finite adding from stream on the same thread.'''
print 'add stream blocking '
self.add_nodes(self.twitter.live(), max_depth)
def add_local_blocking(self, user_id, max_depth):
''' use this for finite adding from a local network on the same thread. '''
self.add_nodes(self.twitter.fill(user_id), max_depth)
def add_local_thread(self, max_depth):
'''Use this for continuous adding from stream on a separate thread. Possibly infinite calls? planned'''
t = threading.Thread(target=self.help_network, args=(self.twitter.fill(), max_depth))
t.daemon = True
t.start()
class Node(object):
def __init__(self,user_id, network):
self.user_id = user_id
self.network = network
def get_connections(self):
return self.network.get_connections(self.user_id)
def __str__(self):
return '<Node user_id: '+str(self.user_id)+' network '+str(self.network)+" >"
def test():
#print 'cwd directory'+str(os.getcwd())
t = TheTwitter(open('..\\simile.smile','r'))
net = Network(t)
#print 'network made'
n = Node('1',net)
#print 'new node n'
print 'successful network representation test'
if __name__ == '__main__':
test() | {
"repo_name": "buckbaskin/CWRUHacks2015",
"path": "data_representation/Network.py",
"copies": "1",
"size": "6442",
"license": "mit",
"hash": 1426743462984351200,
"line_mean": 36.6783625731,
"line_max": 111,
"alpha_frac": 0.5577460416,
"autogenerated": false,
"ratio": 3.974090067859346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031836109459346,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buddha'
from . import app
import models
from BeautifulSoup import BeautifulStoneSoup
from pprint import pprint
# import simplejson
import urllib, urllib2
from flask import render_template, flash, redirect, url_for
class TMError(Exception):
pass
@app.route('/')
@app.route('/index')
def index():
return "Hello, World!"
@app.route('/busroutes')
def busroutes(carrier=40, debug=1, format='json', **kwargs):
TM_BASE = 'http://developer.metro.net/tm/routes.php'
APIKEY = 'euVh%2A%26Kz.%3FrA.fQ%3A%7ER%23O'
kwargs.update({
'apikey': APIKEY,
'carrier': carrier,
'format': format
})
url = TM_BASE + '?' + urllib.urlencode(kwargs)
if debug: print url
if (format=='json'):
# result = simplejson.load(urllib.urlopen(url))
result = urllib.urlopen(url)
if 'Error' in result:
# An error occurred; raise an exception
raise TMError, result['Error']
# return result['routes']['item']
return urllib.urlopen(url)
else:
xml = urllib2.urlopen(url)
soup = BeautifulStoneSoup(xml)
return soup.prettify()
return url;
| {
"repo_name": "buddha314/mizmetroweb",
"path": "app/views.py",
"copies": "1",
"size": "1160",
"license": "apache-2.0",
"hash": -4279567766473183700,
"line_mean": 23.6808510638,
"line_max": 60,
"alpha_frac": 0.6336206897,
"autogenerated": false,
"ratio": 3.411764705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4545385395582353,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buec'
import modgrammar
import sys
from pyspeechgrammar import model
class JavaIdentifier(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.WORD("A-Za-z$", "A-Za-z0-9_$"))
def grammar_elem_init(self, session_data):
self.value = self[0].string
class Package(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LIST_OF(JavaIdentifier, sep=".", min=1))
def grammar_elem_init(self, session_data):
self.value = self[0].string
class SelfIdentifyingHeader(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL("#JSGF"), modgrammar.WHITESPACE,
modgrammar.WORD("A-Za-z0-9._\-"),
modgrammar.OPTIONAL(modgrammar.WHITESPACE, modgrammar.WORD("A-Za-z0-9._\-")),
modgrammar.OPTIONAL(modgrammar.WHITESPACE, modgrammar.WORD("A-Za-z0-9._\-")),
modgrammar.LITERAL(";"), modgrammar.LITERAL('\n'))
def grammar_elem_init(self, session_data):
self.version = self[2].string
if self[3] is not None:
self.encoding = self[3][1].string
else:
self.encoding = None
if self[4] is not None:
self.locale = self[4][1].string
else:
self.locale = None
class NameDeclaration(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL("grammar"), modgrammar.WHITESPACE,
modgrammar.OPTIONAL(Package, modgrammar.LITERAL('.')),
JavaIdentifier,
modgrammar.LITERAL(";"))
def grammar_elem_init(self, session_data):
self.name = self[3].value
if self[2] is None:
self.package = None
else:
self.package = self[2][0].value
class ImportStatement(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (
modgrammar.LITERAL("import"), modgrammar.WHITESPACE, modgrammar.LITERAL("<"),
Package,
modgrammar.LITERAL('.'),
JavaIdentifier | modgrammar.LITERAL('*'),
modgrammar.LITERAL(">"),
modgrammar.LITERAL(";"), modgrammar.OPTIONAL(modgrammar.WHITESPACE))
def grammar_elem_init(self, session_data):
self.package = self[3].value
self.rule = self[5].string
class Header(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (SelfIdentifyingHeader, NameDeclaration, modgrammar.ZERO_OR_MORE(ImportStatement))
def grammar_elem_init(self, session_data):
self.version = self[0].version
self.encoding = self[0].encoding
self.locale = self[0].locale
self.name = self[1].name
self.package = self[1].package
self.imports = []
for i in range(len(self[2].elements)):
self.imports.append(self[2][i])
class PublicModifier(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (modgrammar.LITERAL("public"))
class MetaCharacter(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OR(modgrammar.L(';'), modgrammar.L('='), modgrammar.L('|'), modgrammar.L('*'),
modgrammar.L('+'), modgrammar.L('<'), modgrammar.L('>'), modgrammar.L('('),
modgrammar.L(')'), modgrammar.L('['), modgrammar.L(']'), modgrammar.L('{'),
modgrammar.L('}'), modgrammar.L('/*'), modgrammar.L('*/'), modgrammar.L('//'),
modgrammar.L(" "), modgrammar.L('"')))
class Tag(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('{'), modgrammar.ZERO_OR_MORE(modgrammar.ANY_EXCEPT('^}')), modgrammar.L('}'))
def grammar_elem_init(self, session_data):
self.model = model.Tag(name=self[1].string)
class UnaryOperator(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.L('*') |
modgrammar.L('+') |
modgrammar.LIST_OF(Tag, sep=" ", min=1))
def grammar_elem_init(self, session_data):
self.is_kleene_star = False
self.is_plus = False
self.tags = []
if self[1].string == '*':
self.is_kleene_star = True
elif self[1].string == '+':
self.is_plus = True
else:
for i in range(0, len(self[1].elements), 2):
self.tags.append(self[1][i].model)
class Token(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.ONE_OR_MORE(modgrammar.EXCEPT(modgrammar.ANY(""), MetaCharacter)))
def grammar_elem_init(self, session_data):
self.model = model.Token(value=self[0].string.strip())
class QuotedToken(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL('"'), modgrammar.OPTIONAL(Token), modgrammar.ONE_OR_MORE(modgrammar.WHITESPACE, Token),
modgrammar.LITERAL('"'))
def grammar_elem_init(self, session_data):
if self[1] is not None:
value = self[1].string + self[2].string
else:
value = self[2].string
self.model = model.Token(value=value.strip())
class RuleReference(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('<'),
modgrammar.OPTIONAL(Package, modgrammar.L('.')),
JavaIdentifier,
modgrammar.L('>'))
def grammar_elem_init(self, session_data):
if self[1] is not None:
value = self[1].string + self[2].string
else:
value = self[2].string
self.model = model.RuleReference(value)
class Group(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('('), modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.REF("RuleExpansion", module=sys.modules[__name__]),
modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.L(')'))
def grammar_elem_init(self, session_data):
self.model = model.Group(self[2].model)
class OptionalGroup(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('['), modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.REF("RuleExpansion", module=sys.modules[__name__]),
modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.L(']'))
def grammar_elem_init(self, session_data):
self.model = model.OptionalGroup(self[2].model)
class SequenceRuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token |
QuotedToken |
RuleReference |
Group |
OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Sequence(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (SequenceRuleExpansion,
modgrammar.WHITESPACE,
modgrammar.LIST_OF(SequenceRuleExpansion, sep=modgrammar.WHITESPACE, min=1))
def grammar_elem_init(self, session_data):
self.model = model.Sequence()
self.model.add_element(self[0].model)
for i in range(0, len(self[2].elements), 2):
self.model.add_element(self[2][i].model)
class AlternativeWeight(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L("/"), modgrammar.WORD("0-9.", "0-9.ef"), modgrammar.LITERAL("/"), modgrammar.WHITESPACE)
def grammar_elem_init(self, session_data):
self.value = self[1].string
class AlternativeSeparator(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.LITERAL("|"),
modgrammar.OPTIONAL(modgrammar.WHITESPACE))
class AlternativeRuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token |
QuotedToken |
RuleReference |
Sequence |
Group |
OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Alternatives(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(AlternativeWeight),
AlternativeRuleExpansion,
modgrammar.ONE_OR_MORE(AlternativeSeparator,
modgrammar.OPTIONAL(AlternativeWeight),
AlternativeRuleExpansion
))
def grammar_elem_init(self, session_data):
self.model = model.Alternatives()
element = self[1].model
if self[0] is not None:
element.weight = self[0].value
self.model.add_element(element)
for i in range(0, len(self[2].elements)):
element = self[2][i][2].model
if self[2][i][1] is not None:
element.weight = self[2][i][1].value
self.model.add_element(element)
class RuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token | QuotedToken | RuleReference | Sequence | Alternatives | Group | OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Rule(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (modgrammar.OPTIONAL(PublicModifier),
modgrammar.LITERAL("<"),
JavaIdentifier,
modgrammar.LITERAL(">"),
modgrammar.LITERAL("="),
RuleExpansion,
modgrammar.LITERAL(";"))
def grammar_elem_init(self, session_data):
scope = model.Rule.SCOPE_PRIVATE
if self[0] is not None:
scope = model.Rule.SCOPE_PUBLIC
self.model = model.Rule(name=self[2].value, value=self[5].model, scope=scope)
class Grammar(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (Header, modgrammar.ZERO_OR_MORE(Rule), modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.EOI)
def grammar_elem_init(self, session_data):
self.model = model.Grammar(name=self[0].name, language=self[0].locale, encoding=self[0].encoding)
for i in range(len(self[1].elements)):
rule = self[1][i].model
self.model.add_rule(rule)
if rule.scope == model.Rule.SCOPE_PUBLIC and self.model.root_rule is None:
self.model.root_rule = rule
| {
"repo_name": "ynop/pyspeechgrammar",
"path": "pyspeechgrammar/jsgf/grammars.py",
"copies": "1",
"size": "12226",
"license": "mit",
"hash": -1061808625851100900,
"line_mean": 32.3133514986,
"line_max": 121,
"alpha_frac": 0.5956976934,
"autogenerated": false,
"ratio": 3.9160794362588085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000604987848118568,
"num_lines": 367
} |
__author__ = 'buec'
import re
from pyspeechgrammar import parser
from pyspeechgrammar.jsgf import grammars
class JSGFParser(parser.BaseParser):
def parse_string(self, data):
p = grammars.Grammar.parser()
# remove newlines after alternative separator (performance issue)
jsgf_string = self._remove_newlines_within_alternatives(data)
# add whitespace before groups, rulerefs (easier and faster in parsing)
jsgf_string = self._add_space_around_rule_expansions(jsgf_string)
parsed_jsgf_data = p.parse_string(jsgf_string)
return parsed_jsgf_data.model
def _remove_newlines_within_alternatives(self, jsgf_string):
regex = re.compile(r"\s*\|\s*")
return regex.sub(" | ", jsgf_string)
def _add_space_around_rule_expansions(self, jsgf_string):
result = str(jsgf_string).replace("(", " (")
result = str(result).replace(")", ") ")
result = str(result).replace("[", " [")
result = str(result).replace("]", "] ")
result = str(result).replace("<", " <")
result = str(result).replace(">", "> ")
return result
def write_string(self, model):
pass | {
"repo_name": "ynop/pyspeechgrammar",
"path": "pyspeechgrammar/jsgf/__init__.py",
"copies": "1",
"size": "1193",
"license": "mit",
"hash": -192487786456882700,
"line_mean": 30.4210526316,
"line_max": 79,
"alpha_frac": 0.6253143336,
"autogenerated": false,
"ratio": 3.5191740412979353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46444883748979354,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buec'
import xml.etree.ElementTree as et
from pyspeechgrammar import model
class SRGSXMLSerializer:
def create_grammar_element(self, grammar):
grammar_element = et.Element('grammar')
for rule in grammar.rules:
rule_element = self.create_rule_element(rule)
grammar_element.append(rule_element)
grammar_element.set('version', '1.0')
grammar_element.set('xmlns', 'http://www.w3.org/2001/06/grammar')
if grammar.language is not None:
grammar_element.set('xml:lang', grammar.language)
else:
grammar_element.set('xml:lang', 'en-US')
if grammar.root_rule is not None:
grammar_element.set('root', grammar.root_rule.name)
elif len(grammar.rules) > 0:
grammar_element.set('root', grammar.rules[0].name)
return grammar_element
def create_rule_element(self, rule):
rule_element = et.Element("rule")
rule_element.set('id', rule.name)
if rule.scope == model.Rule.SCOPE_PUBLIC:
rule_element.set('scope', 'public')
self.add_model_element_to_xml_element(rule_element, rule.value)
return rule_element
def add_model_element_to_xml_element(self, xml_element, model_element):
the_element = None
if isinstance(model_element, model.Token):
the_element = self.add_token_to_xml_element(xml_element, model_element)
elif isinstance(model_element, model.RuleReference):
the_element = self.add_rule_reference_to_xml_element(xml_element, model_element)
elif isinstance(model_element, model.Group):
the_element = self.add_group_to_xml_element(xml_element, model_element)
elif isinstance(model_element, model.OptionalGroup):
the_element = self.add_optional_group_to_xml_element(xml_element, model_element)
elif isinstance(model_element, model.Sequence):
the_element = self.add_sequence_to_xml_element(xml_element, model_element)
elif isinstance(model_element, model.Alternatives):
the_element = self.add_alternatives_to_xml_element(xml_element, model_element)
self.set_repetitions_on_xml_element(the_element, model_element)
self.add_tags_to_element(the_element, model_element)
def add_token_to_xml_element(self, xml_element, token):
xml_element.text = token.value
return xml_element
def add_rule_reference_to_xml_element(self, xml_element, rule_reference):
ref_element = et.SubElement(xml_element, 'ruleref')
ref_element.set('uri', '#' + rule_reference.rule_name)
return ref_element
def add_group_to_xml_element(self, xml_element, group):
group_element = et.SubElement(xml_element, 'item')
self.add_model_element_to_xml_element(group_element, group.value)
return group_element
def add_optional_group_to_xml_element(self, xml_element, optional_group):
group_element = et.SubElement(xml_element, 'item')
group_element.set('repeat', '0-1')
self.add_model_element_to_xml_element(group_element, optional_group.value)
return group_element
def add_sequence_to_xml_element(self, xml_element, sequence):
seq_element = et.SubElement(xml_element,'item')
for element in sequence.elements:
item_element = et.SubElement(seq_element, 'item')
self.add_model_element_to_xml_element(item_element, element)
return seq_element
def add_alternatives_to_xml_element(self, xml_element, alternatives):
alt_element = et.SubElement(xml_element,'one-of')
for element in alternatives.elements:
item_element = et.SubElement(alt_element, 'item')
if element.weight != 1:
item_element.set('weight', str(element.weight))
self.add_model_element_to_xml_element(item_element, element)
return alt_element
def set_repetitions_on_xml_element(self, xml_element, model_element):
if model_element.min_repeat == 1 and model_element.max_repeat == 1:
return
if model_element.max_repeat == model.Element.INFINITY_REPEAT:
xml_element.set('repeat', '{}-'.format(model_element.min_repeat))
else:
xml_element.set('repeat', '{}-{}'.format(model_element.min_repeat, model_element.max_repeat))
def add_tags_to_element(self, xml_element, model_element):
for tag in model_element.tags:
tag_element = et.SubElement(xml_element, 'tag')
tag_element.text = tag.name | {
"repo_name": "ynop/pyspeechgrammar",
"path": "pyspeechgrammar/srgs_xml/serialize.py",
"copies": "1",
"size": "4614",
"license": "mit",
"hash": -5587511030674739000,
"line_mean": 39.1304347826,
"line_max": 105,
"alpha_frac": 0.6497615951,
"autogenerated": false,
"ratio": 3.7000801924619084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4849841787561909,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buec'
class Grammar:
def __init__(self, name="", language="en-US", encoding=""):
self.name = name
self.language = language
self.encoding = encoding
self.rules = []
self.root_rule = None
def add_rule(self, rule):
if self.contains_rule_with_name(rule.name):
raise ValueError("Grammar already contains a rule with the name '{}'".format(rule.name))
self.rules.append(rule)
def remove_rule(self, rule):
self.rules.remove(rule)
def contains_rule_with_name(self, rule_name):
for rule in self.rules:
if rule.name == rule_name:
return True
return False
def get_rule_with_name(self, rule_name):
for rule in self.rules:
if rule.name == rule_name:
return rule
class Element:
INFINITY_REPEAT = -1
def __init__(self, min_repeat=1, max_repeat=1, weight=1):
self.min_repeat = 1
self.max_repeat = 1
self.tags = []
self.weight = weight
self.set_repeat(min_repeat, max_repeat)
def set_repeat(self, min_repeat, max_repeat):
if min_repeat < 0:
raise ValueError("Minimal repeat value ({}) has to be greater than or equal 0.".format(min_repeat))
if max_repeat < Element.INFINITY_REPEAT:
raise ValueError("Maximal repeat value ({}) has to be greater than or equal 0.".format(max_repeat))
if min_repeat > max_repeat and max_repeat != Element.INFINITY_REPEAT:
raise ValueError(
"Minimal repeat value ({}) has to be greater than or equal to the maximal repeat value ({})".format(min_repeat, max_repeat))
self.min_repeat = min_repeat
self.max_repeat = max_repeat
def add_tag(self, tag):
self.tags.append(tag)
def remove_tag(self, tag):
self.tags.remove(tag)
class ElementContainer:
def __init__(self):
self.elements = []
def add_element(self, element):
self.elements.append(element)
def remove_element(self, element):
self.elements.remove(element)
def __getitem__(self, index):
return self.elements[index]
class Rule(Element):
SCOPE_PRIVATE = 'private'
SCOPE_PUBLIC = 'public'
def __init__(self, name="", value=None, scope=SCOPE_PRIVATE, min_repeat=1, max_repeat=1, weight=1):
super(Rule, self).__init__(min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
self.name = name
self.value = value
self.scope = scope
class RuleReference(Element):
def __init__(self, rule_name, min_repeat=1, max_repeat=1, weight=1):
super(RuleReference, self).__init__(min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
self.rule_name = rule_name
class Tag:
def __init__(self, name):
super(Tag, self).__init__()
self.name = name
class Token(Element):
def __init__(self, value="", min_repeat=1, max_repeat=1, weight=1):
super(Token, self).__init__(min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
self.value = value
class Group(Element):
def __init__(self, value, min_repeat=1, max_repeat=1, weight=1):
super(Group, self).__init__(min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
self.value = value
class OptionalGroup(Element):
def __init__(self, value, min_repeat=1, max_repeat=1, weight=1):
super(OptionalGroup, self).__init__(min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
self.value = value
class Sequence(Element, ElementContainer):
def __init__(self, min_repeat=1, max_repeat=1, weight=1):
Element.__init__(self, min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
ElementContainer.__init__(self)
pass
class Alternatives(Element, ElementContainer):
def __init__(self, min_repeat=1, max_repeat=1, weight=1):
Element.__init__(self, min_repeat=min_repeat, max_repeat=max_repeat, weight=weight)
ElementContainer.__init__(self)
pass
| {
"repo_name": "ynop/pyspeechgrammar",
"path": "pyspeechgrammar/model.py",
"copies": "1",
"size": "4095",
"license": "mit",
"hash": 7675188225440731000,
"line_mean": 30.0227272727,
"line_max": 140,
"alpha_frac": 0.6161172161,
"autogenerated": false,
"ratio": 3.6271036315323295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9738816010927662,
"avg_score": 0.0008809673409333099,
"num_lines": 132
} |
__author__ = 'bug85'
#!python
# coding=utf-8
import os, sys, subprocess, hashlib, re, tempfile, binascii, base64
import rsa, requests
import tea
def fromhex(s):
# Python 3: bytes.fromhex
return bytes(bytearray.fromhex(s))
pubKey=rsa.PublicKey(int(
'F20CE00BAE5361F8FA3AE9CEFA495362'
'FF7DA1BA628F64A347F0A8C012BF0B25'
'4A30CD92ABFFE7A6EE0DC424CB6166F8'
'819EFA5BCCB20EDFB4AD02E412CCF579'
'B1CA711D55B8B0B3AEB60153D5E0693A'
'2A86F3167D7847A0CB8B00004716A909'
'5D9BADC977CBB804DBDCBA6029A97108'
'69A453F27DFDDF83C016D928B3CBF4C7',
16
), 3)
def pwdencode(vcode, uin, pwd):
# uin is the bytes of QQ number stored in unsigned long (8 bytes)
salt = uin.replace(r'\x', '')
h1 = hashlib.md5(pwd.encode()).digest()
s2 = hashlib.md5(h1 + fromhex(salt)).hexdigest().upper()
rsaH1 = binascii.b2a_hex(rsa.encrypt(h1, pubKey)).decode()
rsaH1Len = hex(len(rsaH1) // 2)[2:]
hexVcode = binascii.b2a_hex(vcode.upper().encode()).decode()
vcodeLen = hex(len(hexVcode) // 2)[2:]
l = len(vcodeLen)
if l < 4:
vcodeLen = '0' * (4 - l) + vcodeLen
l = len(rsaH1Len)
if l < 4:
rsaH1Len = '0' * (4 - l) + rsaH1Len
pwd1 = rsaH1Len + rsaH1 + salt + vcodeLen + hexVcode
saltPwd = base64.b64encode(
tea.encrypt(fromhex(pwd1), fromhex(s2))
).decode().replace('/', '-').replace('+', '*').replace('=', '_')
return saltPwd
print pwdencode('!EMD','\\x00\\x00\\x00\\x00\\x04\\x87\\x4d\\xe4','Stringint123')
# function getEncryption(password, salt, vcode, isMd5) {
# vcode = vcode || "";
# password = password || "";
# var md5Pwd = isMd5 ? password : md5(password)
# , h1 = hexchar2bin(md5Pwd)
# , s2 = md5(h1 + salt)
# , rsaH1 = $pt.RSA.rsa_encrypt(h1)
# , rsaH1Len = (rsaH1.length / 2).toString(16)
# , hexVcode = TEA.strToBytes(vcode.toUpperCase(), true)
# , vcodeLen = Number(hexVcode.length / 2).toString(16);
# while (vcodeLen.length < 4) {
# vcodeLen = "0" + vcodeLen
# }
# while (rsaH1Len.length < 4) {
# rsaH1Len = "0" + rsaH1Len
# }
# TEA.initkey(s2);
# var saltPwd = TEA.enAsBase64(rsaH1Len + rsaH1 + TEA.strToBytes(salt) + vcodeLen + hexVcode);
# TEA.initkey("");
# setTimeout(function() {
# __monitor(488358, 1)
# }
# , 0);
# return saltPwd.replace(/[\/\+=]/g, function(a) {
# return {
# "/": "-",
# "+": "*",
# "=": "_"
# }[a]
# }
# )
# } | {
"repo_name": "azber/QQLib-python",
"path": "qq_lib.py",
"copies": "1",
"size": "2572",
"license": "apache-2.0",
"hash": 4017790604384419000,
"line_mean": 31.5696202532,
"line_max": 102,
"alpha_frac": 0.5754276827,
"autogenerated": false,
"ratio": 2.454198473282443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35296261559824427,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bukun@osgeo.cn'
import tornado.web
from pycate.model.catalog_model import MCatalog
from pycate.module import imgslide_module
from pycate.module import refreshinfo_module
from pycate.module import showjianli_module
ImgSlide = imgslide_module.ImgSlide
RefreshInfo = refreshinfo_module.RefreshInfo
ShowJianli = showjianli_module.ShowJianli
class UserInfo(tornado.web.UIModule):
def render(self, uinfo, uop):
return self.render_string('modules/user_info.html', userinfo=uinfo, userop = uop)
class VipInfo(tornado.web.UIModule):
def render(self, uinfo, uvip):
return self.render_string('modules/vip_info.html', userinfo=uinfo, uservip = uvip)
class ToplineModule(tornado.web.UIModule):
def render(self):
return self.render_string('modules/topline.html')
class BannerModule(tornado.web.UIModule):
def __init__(self, parentid=''):
self.parentid = parentid
def render(self):
self.mcat = MCatalog()
parentlist = self.mcat.get_parent_list()
kwd = {
'parentlist': parentlist,
'parentid': self.parentid,
}
return self.render_string('modules/banner.html', kwd=kwd)
class BreadCrumb(tornado.web.UIModule):
def render(self, info):
return self.render_string('modules/bread_crumb.html', info=info)
class ContactInfo(tornado.web.UIModule):
def render(self, info):
ip = info['userip'][0]
uu = ip.split('.')
uu[3] = '*'
maskip = '.'.join(uu)
kwd = {
'maskip': maskip,
}
return self.render_string('modules/contact_info.html', post_info=info, kwd=kwd)
class BreadcrumbPublish(tornado.web.UIModule):
def render(self, sig=0):
kwd = {
'sig': sig,
}
return self.render_string('modules/breadcrumb_publish.html', kwd=kwd)
class InfoList:
def renderit(self, info=''):
zhiding_str = ''
tuiguang_str = ''
imgname = 'fixed/zhanwei.png'
if len(info['mymps_img']) > 0:
imgname = info['mymps_img'][0]
if info['def_zhiding'] == 1:
zhiding_str = '<span class="red">(已置顶)</span>'
if info['def_tuiguang'] == 1:
tuiguang_str = '<span class="red">(已推广)</span>'
list_type = info['catid']
kwd = {
'imgname': imgname,
'zhiding': zhiding_str,
'tuiguan': tuiguang_str,
}
return self.render_string('infolist/infolist_{0}.html'.format(list_type),
kwd=kwd,
post_info=info) | {
"repo_name": "jiaxiaolei/pycate",
"path": "core/modules.py",
"copies": "1",
"size": "2642",
"license": "mit",
"hash": 33791322681051800,
"line_mean": 28.4719101124,
"line_max": 90,
"alpha_frac": 0.6003051106,
"autogenerated": false,
"ratio": 3.229064039408867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4329369150008867,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bukun'
# __all__ = ['get_uid', 'md5','get_timestamp', 'get_time_str', 'markit']
import uuid
import hashlib
import time
def get_uid():
return( str(uuid.uuid1()))
def md5(instr):
# if type(instr) is bytes:
m = hashlib.md5()
m.update(instr.encode('utf-8'))
return m.hexdigest()
def get_timestamp():
return (int(time.time()))
def get_time_str(timestamp):
timeArray = time.localtime(timestamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return (str(otherStyleTime))
def mark_it():
print('=' * 20)
def gen_pager(catalog_slug, total_page_num, current_page_num):
if total_page_num == 1:
return ''
pager_shouye = '''
<br/>
<ul class="yiiPager">
<li class="first {0} ">
<a href="/{1}/1"><< 首页</a>
</li>'''.format( 'hidden' if current_page_num <= 1 else '', catalog_slug)
pager_pre = '''
<li class="previous {0}"><a href="/{1}/{2}" class="previous">< 前页</a>
</li>
'''.format('hidden' if current_page_num <= 1 else '', catalog_slug, current_page_num - 1)
pager_mid = ''
for ind in range(0, total_page_num):
tmp_mid = '''
<li class="page {0}"><a href="/{1}/{2}" class="page">{2}</a></li>
'''.format('selected' if ind+1 == current_page_num else '', catalog_slug, ind + 1)
pager_mid += tmp_mid
pager_next = '''
<li class="next {0}"><a href="/{1}/{2}" class="page">后页 ></a>
</li>
'''.format('hidden' if current_page_num >= total_page_num else '', catalog_slug, current_page_num + 1)
pager_last = '''
<li class="last {0}"><a href="/{1}/{2}" >末页
>></a>
</li></ul>
'''.format('hidden' if current_page_num >= total_page_num else '', catalog_slug, total_page_num)
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return(pager) | {
"repo_name": "jiaxiaolei/pycate",
"path": "libs/tool.py",
"copies": "1",
"size": "2096",
"license": "mit",
"hash": -8400251608777745000,
"line_mean": 31.5806451613,
"line_max": 118,
"alpha_frac": 0.5067307692,
"autogenerated": false,
"ratio": 3.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9180859352733437,
"avg_score": 0.005174283293312654,
"num_lines": 62
} |
__author__ = 'bukun'
from torlite.model.mpost import MPost
def get_dic():
out_arr = []
with open('./keywords_dic.txt') as fi:
uu = fi.readlines()
for u in uu:
u = u.strip()
if len(u) > 0:
tt = u.split()
out_arr.append(tt)
return (out_arr)
def do_for_x(rec):
kw_dic = get_dic()
out_dic = {}
for kw in kw_dic:
count = rec.title.count(kw[0])
count2 = rec.cnt_md.count(kw[0])
if count > 0:
out_dic[kw[0]] = count * .3 + count2 * .2 + int(kw[1]) * .5
out_dic2 = sorted(out_dic.items(), key=lambda asd: asd[1], reverse=True)
return (out_dic2[:8])
if __name__ == '__main__':
mpost = MPost()
uu = mpost.query_keywords_empty()
for x in uu:
tt = (do_for_x(x))
vv = [x[0] for x in tt]
if len(vv) > 0:
print(','.join(vv))
mpost.update_keywords(x.uid, ','.join(vv))
else:
mpost.update_keywords(x.uid, 'OSGeo中国中心,开放地理空间实验室')
| {
"repo_name": "Geoion/TorCMS",
"path": "update_keywords.py",
"copies": "3",
"size": "1112",
"license": "mit",
"hash": -6203769971211191000,
"line_mean": 22.6818181818,
"line_max": 76,
"alpha_frac": 0.4659300184,
"autogenerated": false,
"ratio": 2.865435356200528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48313653746005275,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bukun'
import pickle
class cNode(object):
def __init__(self):
self.children = None
# The encode of word is UTF-8
# The encode of message is UTF-8
class cDfa(object):
def __init__(self):
# self.pklfile = 'sdaf.pkl'
self.root=cNode()
# The encode of word is UTF-8
def addWord(self,word):
node = self.root
iEnd=len(word)-1
for i in range(len(word)):
if node.children == None:
node.children = {}
if i!=iEnd:
node.children[word[i]]=(cNode(),False)
else:
node.children[word[i]]=(cNode(),True)
elif word[i] not in node.children:
if i!=iEnd:
node.children[word[i]]=(cNode(),False)
else:
node.children[word[i]]=(cNode(),True)
else: #word[i] in node.children:
if i==iEnd:
Next,bWord=node.children[word[i]]
node.children[word[i]]=(Next,True)
node=node.children[word[i]][0]
def isContain(self,sMsg):
root=self.root
iLen=len(sMsg)
for i in range(iLen):
p = root
j = i
while (j<iLen and p.children!=None and sMsg[j] in p.children):
(p,bWord) = p.children[sMsg[j]]
if bWord:
# markit()
# containwhich(sMsg)
# markit()
return True
j = j + 1
return False
def filter(self,sMsg):
lNew=[]
root=self.root
iLen=len(sMsg)
i=0
bContinue=False
while i<iLen:
p=root
j=i
while (j<iLen and p.children!=None and sMsg[j] in p.children):
(p,bWord) = p.children[sMsg[j]]
if bWord:
#print sMsg[i:j+1]
lNew.append(u'*'*(j-i+1))#关键字替换
i=j+1
bContinue=True
break
j=j+1
if bContinue:
bContinue=False
continue
lNew.append(sMsg[i])
i=i+1
return ''.join(lNew)
filter = cDfa()
kwf = './resource/keywords.pkl'
with open(kwf,'rb') as fi:
# print(fi)
keywords_set = pickle.load(fi)
for keyword in keywords_set:
filter.addWord(keyword)
# normal()
del(keywords_set)
def containwhich(msg_str):
tmp_fi = open(kwf,'rb')
tmp_keywords_set = pickle.load(tmp_fi)
for x in tmp_keywords_set:
# print('a' + x)
if x in msg_str:
# print(x)
return
| {
"repo_name": "jiaxiaolei/pycate",
"path": "libs/dfa.py",
"copies": "1",
"size": "2827",
"license": "mit",
"hash": 7184112677823788000,
"line_mean": 26.4545454545,
"line_max": 74,
"alpha_frac": 0.4437344693,
"autogenerated": false,
"ratio": 3.6727509778357237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46164854471357236,
"avg_score": null,
"num_lines": null
} |
__author__ = 'buyvich'
from pprint import pprint
import logging
import functools
import json
import sqlalchemy
from tornado.web import RequestHandler
from weekly_training.settings import TemplateEngine, get_session
from weekly_training.models import *
LOG = logging.getLogger()
def auth(f):
@functools.wraps(f)
def wrapper(s, *args, **kwargs):
'''
@type s: RequestHandler
'''
user = s.get_secure_cookie('user')
if not user:
s.redirect('/login/')
return f(s, *args, **kwargs)
return wrapper
class BaseHandler(RequestHandler):
def initialize(self):
self.session = get_session()
def get_current_user(self):
user = self.get_secure_cookie('user')
if user:
return self.session.query(User).filter(
User.login == user).one()
return None
class IndexHandler(BaseHandler):
@auth
def get(self):
template = TemplateEngine.get_template('index.html')
self.write(template.render_unicode(xsrf=self.xsrf_token,
user=self.get_current_user()))
class TrainingHandler(BaseHandler):
@auth
def get(self):
pprint(self.request)
#callback = self.get_argument('callback')
user = self.get_current_user()
trainings = self.session.query(Training).filter(
Training.user == user
).all()
result = []
for training in trainings:
result.append(training.to_dict())
self.set_header('Content-Type', 'application/json')
pprint(result)
self.write(json.dumps(result))
@auth
def post(self):
user = self.get_current_user()
pprint(self.request.body_arguments)
pprint(self.request.body)
data = json.loads(self.request.body)
tng = Training(name=data['name'], goal=data['goal'],
user=user, units=data['units'])
self.session.add(tng)
self.session.commit()
self.write('OK')
class LoginHandler(BaseHandler):
def get(self):
template = TemplateEngine.get_template('login.html')
self.write(template.render_unicode(xsrf=self.xsrf_token))
def post(self):
login = self.get_argument('username')
password = self.get_argument('password')
try:
self.session.query(User).filter(
User.login == login,
User.password == password).one()
except sqlalchemy.orm.exc.NoResultFound:
self.redirect('/login/')
self.set_secure_cookie('user', login)
self.redirect('/')
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.redirect('/') | {
"repo_name": "gh0st-dog/weekly-tng",
"path": "weekly_training/handlers.py",
"copies": "1",
"size": "2774",
"license": "mit",
"hash": -2235702650978240300,
"line_mean": 24.6944444444,
"line_max": 73,
"alpha_frac": 0.5937274694,
"autogenerated": false,
"ratio": 4.002886002886003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005413105413105413,
"num_lines": 108
} |
__author__ = 'bwagner'
import os
from ConfigParser import SafeConfigParser
from ScriptLog import log, log2, closeLog, error, warning, info, debug, entry, exit, lopen, handleException
try:
config = SafeConfigParser()
config.read( './Configuration/base.cfg' )
logFileDir = str( config.get( "base", "logDir" ) )
logFile = str( config.get( "base", 'dhcpTodnsmasqLogFile') )
lopen( logFile, logFileDir )
entry( "Entering dhcpTodnsmasq.init" )
exit( "Exiting dhcpTodnsmasq.init" )
except:
handleException( "dhcpTodnsmasq.init" )
raise
class convertDhcpdTodnsmasqconf:
def __init__ ( self ):
pass
def processDhcpdConfFile():
entry("Opening DHCPD Config file")
try:
# Read the source dhcpd.conf configuration file
dhcpConFileLocation = str( config.get("base","dhcpdConfigFile"))
# Open the file
dhcpConFile = open(dhcpConFileLocation,mode='r')
# Read in the output directory
dnsmasqConfDir = str(config.get("base","dnsmasqConfigDir"))
# Read in the output file name
dnsmasqDHCPFileName = str(config.get("base","dhcpDnsmasqConfFile"))
# Check the path and create it if it doesn't exist
if not os.path.isdir( dnsmasqConfDir ):
log2( "Log directory %s does not exist" % dnsmasqConfDir)
try:
os.mkdir( dnsmasqConfDir )
except:
handleException( "Unable to create log directory" )
raise
# Double check to make sure the directory exists
if not os.path.isdir( dnsmasqConfDir ):
log2("%s is not a directory" % dnsmasqConfDir)
raise
# Since it must certainly exist now let's make sure it's writable
if not os.access( dnsmasqConfDir, os.W_OK ):
print "Directory %s is not writable" % dnsmasqConfDir
raise
# Join the Directory and filename so that we can open the file for writing
dnsmasqDHCPFile = os.path.join( dnsmasqConfDir , dnsmasqDHCPFileName )
# While we have the file open for writing keep writing to it. We use the with statement so if the
# script crashes it will close the file cleanly first flushing the buffers.
with open(dnsmasqDHCPFile,mode='w') as dnsmasqConf:
#While we have the file open for reading and keep it open until we are done with it.
with open(dhcpConFileLocation,mode='r') as dhcpConFile:
count = 0
for line in dhcpConFile:
# Look for Host Entries
if line.__contains__("host") and line.__contains__("{"):
splitline = line.split()
name = str(splitline[1])
count =count + 1
# Look for MAC Address
if line.__contains__("ethernet"):
splitline = line.split()
macaddress = str(splitline[2].strip(";"))
# Look for the assigned IP Address
if line.__contains__("fixed-address"):
splitline = line.split()
ipaddress = str(splitline[1].strip(";"))
# Look for the ending } and place the entry into the output file
if line.__contains__("}") and count > 0:
dnsmasqConf.write("dhcp-host=%s,%s,%s\n" % (name,macaddress,ipaddress))
count = 0
if line.__contains__("default-lease-time"):
maxLease = line.split()
dnsmasqConf.write("dhcp-lease-max=%s\n" % (maxLease[1].strip().strip(";")))
if line.__contains__("ntp-servers"):
ntpServers = line.split()
dnsmasqConf.write("dhcp-option=option:ntp-server,%s\n" % (ntpServers[2].strip(";")))
if line.__contains__("routes"):
routes = line.split()
dnsmasqConf.write("dhcp-option=121,%s/255.255.255.0" % (routes[2]))
except:
handleException("openDhcpdConfFile")
raise
processDhcpdConfFile()
| {
"repo_name": "wags007/BIND_DHCP_to_dnsmasq",
"path": "dhcpTodnsmasq.py",
"copies": "1",
"size": "4491",
"license": "apache-2.0",
"hash": 3655429254421607000,
"line_mean": 47.8152173913,
"line_max": 112,
"alpha_frac": 0.5315074594,
"autogenerated": false,
"ratio": 4.411591355599215,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014181791400689469,
"num_lines": 92
} |
__author__ = 'bwagner'
import sys, argparse
import json
#from fabric.api import *
try:
import requests
from requests.auth import HTTPBasicAuth
# if int(requests.__version__.split('.')[1]) < 12:
# print "You may have to upgrade your python requests version!"
except:
print "Please install python requests!"
print "Instructions can be found here:"
print "http://docs.python-requests.org/en/latest/user/install/"
# Auphonic API url
API_URL = "https://auphonic.com/api/simple/productions.json"
SERVICES_URL = "https://auphonic.com/api/services.json"
def Get_Services(username,password):
try:
result = requests.get(SERVICES_URL,auth=HTTPBasicAuth(str(username),str(password)))
if result.status_code == 200:
print "We got %s" % str(result.status_code)
#print result.json()
Services = result.json()
return Services[u'data']
else:
print "We got a non-200 response: %s" % result.status_code
raise IOError("Failed to connect to the Auphonic Service")
except:
raise
def main(argv):
argumentParser = argparse.ArgumentParser(description='Script to pull Hangouts from Youtube and push them to Auphonic')
argumentParser.add_argument('--episode-regex',dest='epregex',default='*',
help='If you want to find an episode by regex expression')
argumentParser.add_argument('--outputname',dest='outputname',default='None',
help='The name of the file to be uploaded')
argumentParser.add_argument('--channel-url',dest='channelUrl',default='None',
help='The location of your channel on youtube')
args = argumentParser.parse_args()
print args.epregex
print args.outputname
print args.channelUrl
if __name__ == "__main__":
main(sys.argv[1:])
try:
ServiceList = Get_Services("wags007@gmail.com","69dpRSTsNKlv")
except Exception as e:
raise type(e)(e.message)
ServiceCount = 0
print "Available Services:"
#print " Type : UUID"
while ServiceCount < len(ServiceList):
ServiceDict = ServiceList[ServiceCount]
print (" %s : %s " % (ServiceDict["type"], ServiceDict['uuid']))
ServiceCount = ServiceCount + 1
| {
"repo_name": "wags007/podcastMaster",
"path": "AuphonicsProcessing/processHangout.py",
"copies": "1",
"size": "2423",
"license": "apache-2.0",
"hash": -3741447997403820000,
"line_mean": 36.859375,
"line_max": 126,
"alpha_frac": 0.6108130417,
"autogenerated": false,
"ratio": 4.058626465661642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5169439507361642,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bwall'
import json
import os
import time
import ExtractHosts
import base64
def create_command_to_run(pbot, output_folder=None, proxy=None):
if proxy is None:
proxy = ""
else:
proxy = "--proxy {0}".format(proxy)
password = ""
if 'pass' in pbot['information']:
password = "-p '{0}'".format(pbot['information']['pass'])
server = ""
if 'server' in pbot['information']:
server = pbot['information']['server']
if ExtractHosts.extract_domain(server) is None and ExtractHosts.extract_ipv4(
server) is None and ExtractHosts.extract_ipv6(server) is None:
try:
server = base64.b64decode(server)
except:
pass
else:
return None
port = ""
if 'port' in pbot['information']:
port = pbot['information']['port']
if output_folder is None:
output_folder = ""
else:
output_folder = "-o {0}".format(output_folder)
return "python ircsnapshot.py {0} {4} {1} {2} {3}".format(proxy, password, server, port, output_folder)
with open("dump.json", "r") as f:
doc = json.load(f)
cmds = []
for key in doc.keys():
cmd = create_command_to_run(doc[key], "fbi2")
if cmd is not None and cmd not in cmds:
print cmd
cmds.append(cmd)
os.system(cmd) | {
"repo_name": "bwall/ircsnapshot",
"path": "ircsnapshot/run_from_json.py",
"copies": "1",
"size": "1196",
"license": "mit",
"hash": -7179033803488863000,
"line_mean": 22.4705882353,
"line_max": 104,
"alpha_frac": 0.6588628763,
"autogenerated": false,
"ratio": 2.9029126213592233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4061775497659223,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bwall'
import markovobfuscate.obfuscation as obf
import logging
import re
import random
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# Regular expression to split our training files on
split_regex = r'\n'
# File/book to read for training the Markov model (will be read into memory)
training_file = "datasets/lyrics.ts.txt"
# Obfuscating Markov engine
m1 = obf.MarkovKeyState()
m2 = obf.MarkovKeyState()
# Read the shared key into memory
with open(training_file, "r") as f:
text = f.read()
# Split learning data into sentences, in this case, based on periods.
map(m1.learn_sentence, re.split(split_regex, text))
map(m2.learn_sentence, re.split(split_regex, text))
try:
logging.info("Hit CTRL-C to stop testing")
while True:
# Run a random test
rand_string = "".join([chr(random.randint(0, 255)) for k in xrange(random.randint(1, 1024))])
if rand_string != m2.deobfuscate_string(m1.obfuscate_string(rand_string)):
print "Failed integrity test"
raise
except KeyboardInterrupt:
pass | {
"repo_name": "bwall/markovobfuscate",
"path": "testing.py",
"copies": "1",
"size": "1182",
"license": "mit",
"hash": 2767963912356903400,
"line_mean": 30.972972973,
"line_max": 105,
"alpha_frac": 0.641285956,
"autogenerated": false,
"ratio": 3.592705167173252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47339911231732523,
"avg_score": null,
"num_lines": null
} |
__author__ = 'byt3smith'
#
# Generates a dir for carbonblack feeds
# Can also stand up a SimpleHTTPServer to host the feeds
#
#stdlib
from os import chdir, listdir, mkdir, getcwd, path
import http.server
import socketserver
from re import sub, search
from json import dump, loads
from socket import gethostname
#pypi
from colorama import Fore, Back, Style, init
#local
from .feeds import FeedModules
from .tools import regex
from .cb import generate_feed
# Initialize colorama
init(autoreset=True)
def gen_feed_list():
#generates feed list from FeedModules()
feed_list = []
for f in listdir('intel'):
if f.endswith('_ioc'):
#strip _update suffix
f = sub("_ioc", '', f)
feed_list.append(f)
return feed_list
def run_feed_server():
#stands up the feed server, points to the CB/json_feeds dir
chdir('data/json_feeds/')
port = 8000
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), handler)
try:
print((Fore.GREEN + '\n[+]' + Fore.RESET), end=' ')
print(('Feed Server listening at http://%s:8000' % gethostname()))
httpd.serve_forever()
except:
print((Fore.RED + '\n[-]' + Fore.RESET), end=' ')
print("Server exited")
return
def cb_gen(run_mode):
#cbfeed generator
feed_list = gen_feed_list()
# Check for data/cb/ dir
if path.isdir("cb/"):
pass
else:
try:
mkdir("cb/")
except:
print((Fore.RED + '[-] ' + Fore.RESET + 'Could not create data/cb/ directory'))
exit()
# Check for feed_meta dir
if path.isdir("cb/feed_meta/"):
feedinfo = listdir("cb/feed_meta/")
else:
try:
mkdir('cb/feed_meta')
feedinfo = listdir("cb/feed_meta/")
except:
print((Fore.RED + '[-] ' + Fore.RESET + 'Error creating feed_meta directory, may need to adjust permissions'))
exit()
#Check for JSON_feed dir
if path.isdir("cb/json_feeds/"):
pass
else:
try:
mkdir('cb/json_feeds')
except:
print((Fore.RED + '[-] ' + Fore.RESET + ' Error creating json_feeds directory, may need to adjust permissions'))
exit()
## Run function based on CLI args
if run_mode == 'a':
# run all feeds
generate_all(feed_list, feedinfo)
elif run_mode == 'i':
# list all feeds for selection
generate_one(feed_list, feedinfo)
return
def create_json_feed(meta, json_path):
#Creating JSON feed using scripts in cbfeeds/
data = generate_feed.create_feed(meta)
#print data
#Saving the data to file in json_feeds/
try:
print((Fore.YELLOW + '[*]' + Fore.RESET), end=' ')
print('Saving report to: %s' % json_path)
dump_data = open(json_path, 'w+').write(data)
except:
print((Fore.RED + '[-]' + Fore.RESET), end=' ')
print('Could not dump report to %s' % json_path)
exit(0)
return
def generate_all(feed_list, feedinfo):
# Check for feed metadata
print((Fore.YELLOW + '[*] ' + Fore.RESET + 'Checking for existing feed metadata necessary to generate feeds...\n'))
for f in feed_list:
#check for feed_info files correlating to feed_list
json_path = 'cb/json_feeds/%s' % f
if f in feedinfo:
print(('\n' + f + ': ' + '[' + Fore.GREEN + ' yes ' + Fore.RESET + ']'))
else:
print(('\n' + f + ': ' + '[' + Fore.RED + ' no ' + Fore.RESET + ']'))
meta = get_feed_info(f)
#generate json_feed for feed module
meta_file = 'cb/feed_meta/%s' % f
meta = open(meta_file, 'r').read()
try:
loads(meta) # checks that meta file is valid JSON string
except:
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print(('%s is not valid JSON.\nWould you like to create a valid metadata file?' % meta_file))
choice = input('> (y/n) ')
if choice == 'y':
meta = get_feed_info(f)
return
elif choice == 'n':
print((Fore.YELLOW + '[*] Moving on..'))
return
else:
print((Fore.RED + '[!] Invalid choice. Better luck next time..'))
exit(0)
create_json_feed(meta, json_path)
def generate_one(feed_list, feedinfo):
print((Fore.YELLOW + '[*] ' + Fore.RESET + ' soon to be individual feed generation'))
def get_feed_info(f):
#interactive prompt for gathering and storing feed info data
feed_dict = {}
feedpath = 'cb/feed_meta/%s' % f # Path for new feed metadata
meta_file = open(feedpath, 'w+')
name = ''.join(e for e in f if e.isalnum())
host = gethostname()
ioc_file = 'intel/%s_ioc' % f
feed_link = 'http://%s/%s' % (host, ioc_file)
report_name = f + '_report'
# Find URL in feeds.py
try:
feedfile = open('../forager/feeds.py', 'r').readlines()
except:
print((Fore.RED + '\n[-]' + Fore.RESET), end=' ')
print('Could not open file')
exit(0)
count = 0
stat = 0
for line in feedfile:
line = line.lower()
fn = f.lower()
if fn in line:
loc = feedfile[count+1]
searches = search(regex('URL'), loc.encode('utf-8'))
if searches == None:
pass
else:
result = searches.group(0)
stat=1
else:
count+=1
if stat == 0:
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print('Provider URL for {}:'.format(f))
provider_url = input('> ')
else:
provider_url = result
# Choose Display Name
display_name = f
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print(("Is '%s' okay for Feed Display Name? ([RETURN], or specify new display name)" % display_name))
choice = input('\r> ')
if len(choice) == 0:
pass
else:
display_name = choice
# Choose Summary
summary = f
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print(("Is '%s' okay for Feed Summary? ([RETURN], or specify summary)" % summary))
choice = input('\r> ')
if len(choice) == 0:
pass
else:
summary = choice
# Choose Tech Data
tech_data = 'There are no requirements to share any data to receive this feed.'
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print(("Is '%s'\n okay for Tech Data? ([RETURN], or specify new display name)" % tech_data))
choice = input('\r> ')
if len(choice) == 0:
pass
else:
tech_data = choice
# Icon
icon = ''
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
iconic = input('Do you have an icon to upload? (Y/N)\n> ')
if iconic.lower() == 'y':
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
icon = input('Please provide the full path to the image here:\n> ')
elif iconic.lower() == 'n':
pass
else:
print((Fore.YELLOW + '\n[*]' + Fore.RESET), end=' ')
print('[*] Sorry, did not recognize that. You can add an icon later..')
# Parsing values into the feed dictionary
feed_meta = ['name', 'display_name', 'provider_url', 'summary', 'tech_data', 'icon', 'ioc_file', 'feed_link', 'report_name']
for i in feed_meta:
feed_dict[i] = str(locals()[i])
try:
json_data = dump(feed_dict, meta_file)
print((Fore.GREEN + '\n[+] Successfully wrote metadata to %s' % feedpath))
meta_file.close()
return json_data
except:
print((Fore.RED + '\n[-] Could not write JSON stream to file'))
| {
"repo_name": "byt3smith/Forager",
"path": "forager/cb_tools.py",
"copies": "1",
"size": "7841",
"license": "mit",
"hash": -5407950016755085000,
"line_mean": 29.62890625,
"line_max": 128,
"alpha_frac": 0.544700931,
"autogenerated": false,
"ratio": 3.6217090069284064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46664099379284063,
"avg_score": null,
"num_lines": null
} |
__author__ = 'byt3smith'
#
# Generates a dir for carbonblack feeds
# Can also stand up a SimpleHTTPServer to host the feeds
#
#stdlib
from os import chdir, listdir, mkdir, getcwd, path
import SimpleHTTPServer
import SocketServer
from re import sub, search
from json import dump, loads
from socket import gethostname
#pypi
from colorama import Fore, Back, Style, init
#local
from feeds import FeedModules
from tools import regex
from cb import generate_feed
# Initialize colorama
init(autoreset=True)
def gen_feed_list():
#generates feed list from FeedModules()
feed_list = []
for f in listdir('intel'):
if f.endswith('_ioc'):
#strip _update suffix
f = sub("_ioc", '', f)
feed_list.append(f)
return feed_list
def run_feed_server():
#stands up the feed server, points to the CB/json_feeds dir
chdir('../bin/cb/json_feeds/')
port = 8000
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), handler)
try:
print(Fore.GREEN + '\n[+]' + Fore.RESET),
print('Feed Server listening at http://%s:8000' % gethostname())
httpd.serve_forever()
except:
print(Fore.RED + '\n[-]' + Fore.RESET),
print("Server exited")
return
def CB_gen(run_mode):
#cbfeed generator
#
feed_list = gen_feed_list()
# Check for feed_meta dir
if path.isdir("bin/cb/feed_meta/"):
feedinfo = listdir("bin/cb/feed_meta/")
else:
try:
mkdir('bin/cb/feed_meta')
feedinfo = listdir("bin/cb/feed_meta/")
except:
print(Fore.RED + '[-] Error creating feed_meta directory, may need to adjust permissions')
#Check for JSON_feed dir
if path.isdir("bin/cb/json_feeds/"):
pass
else:
try:
mkdir('bin/cb/json_feeds')
except:
print(Fore.RED + '[-] Error creating json_feeds directory, may need to adjust permissions')
## Run function based on CLI args
if run_mode == 'a':
# run all feeds
generate_all(feed_list, feedinfo)
elif run_mode == 'i':
# list all feeds for selection
generate_one(feed_list, feedinfo)
return
def create_json_feed(meta, json_path):
#Creating JSON feed using scripts in cbfeeds/
data = generate_feed.create_feed(meta)
#print data
#Saving the data to file in json_feeds/
try:
print(Fore.YELLOW + '[*]' + Fore.RESET),
print 'Saving report to: %s' % json_path
dump_data = open(json_path, 'w+').write(data)
except:
print(Fore.RED + '[-]' + Fore.RESET),
print 'Could not dump report to %s' % json_path
exit(0)
return
def generate_all(feed_list, feedinfo):
# Check for feed metadata
print(Fore.YELLOW + '[*] Checking for existing feed metadata necessary to generate feeds...\n')
for f in feed_list:
#check for feed_info files correlating to feed_list
json_path = 'bin/cb/json_feeds/%s' % f
if f in feedinfo:
print('\n' + f + ': ' + '[' + Fore.GREEN + ' yes ' + Fore.RESET + ']')
else:
print('\n' + f + ': ' + '[' + Fore.RED + ' no ' + Fore.RESET + ']')
meta = get_feed_info(f)
#generate json_feed for feed module
meta_file = 'bin/cb/feed_meta/%s' % f
meta = open(meta_file, 'r').read()
try:
loads(meta) # checks that meta file is valid JSON string
except:
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print('%s is not valid JSON.\nWould you like to create a valid metadata file?' % meta_file)
choice = raw_input('> (y/n) ')
if choice == 'y':
meta = get_feed_info(f)
return
elif choice == 'n':
print(Fore.YELLOW + '[*] Moving on..')
return
else:
print(Fore.RED + '[!] Invalid choice. Better luck next time..')
exit(0)
create_json_feed(meta, json_path)
def generate_one(feed_list, feedinfo):
print(Fore.YELLOW + '[*] soon to be individual feed generation')
def get_feed_info(f):
#interactive prompt for gathering and storing feed info data
feed_dict = {}
feedpath = 'bin/cb/feed_meta/%s' % f # Path for new feed metadata
meta_file = open(feedpath, 'w+')
name = ''.join(e for e in f if e.isalnum())
host = gethostname()
ioc_file = 'intel/%s_ioc' % f
feed_link = 'http://%s/%s' % (host, ioc_file)
report_name = f + '_report'
# Find URL in feeds.py
try:
feedfile = open('bin/feeds.py', 'r').readlines()
except:
print(Fore.RED + '\n[-]' + Fore.RESET),
print 'Could not open file'
exit(0)
count = 0
stat = 0
for line in feedfile:
line = line.lower()
fn = f.lower()
if fn in line:
loc = feedfile[count+1]
searches = search(regex('URL'), loc)
if searches == None:
pass
else:
result = searches.group(0)
stat=1
else:
count+=1
if stat == 0:
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print('Could not locate provider URL in feed module.. please provide it below:')
provider_url = raw_input('> ')
else:
provider_url = result
# Choose Display Name
display_name = f
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print("Is '%s' okay for Feed Display Name? ([RETURN], or specify new display name)" % display_name)
choice = raw_input('\r> ')
if len(choice) == 0:
pass
else:
display_name = choice
# Choose Summary
summary = f
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print("Is '%s' okay for Feed Summary? ([RETURN], or specify summary)" % summary)
choice = raw_input('\r> ')
if len(choice) == 0:
pass
else:
summary = choice
# Choose Tech Data
tech_data = 'There are no requirements to share any data to receive this feed.'
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print("Is '%s'\n okay for Tech Data? ([RETURN], or specify new display name)" % tech_data)
choice = raw_input('\r> ')
if len(choice) == 0:
pass
else:
tech_data = choice
#Icon
icon = ''
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
iconic = raw_input('Do you have an icon to upload? (Y/N)\n> ')
if iconic.lower() == 'y':
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
icon = raw_input('Please provide the full path to the image here:\n> ')
elif iconic.lower() == 'n':
pass
else:
print(Fore.YELLOW + '\n[*]' + Fore.RESET),
print('[*] Sorry, did not recognize that. You can add an icon later..')
feed_meta = ['name', 'display_name', 'provider_url', 'summary', 'tech_data', 'icon', 'ioc_file', 'feed_link', 'report_name']
for i in feed_meta:
feed_dict[i] = locals()[i]
try:
json_data = dump(feed_dict, meta_file)
print(Fore.GREEN + '\n[+] Successfully wrote metadata to %s' % feedpath)
meta_file.close()
return json_data
except:
print(Fore.RED + '\n[-] Could not write JSON stream to file')
| {
"repo_name": "sberrydavis/Forager",
"path": "bin/cb_tools.py",
"copies": "1",
"size": "7370",
"license": "mit",
"hash": -5996535434025156000,
"line_mean": 29.2049180328,
"line_max": 128,
"alpha_frac": 0.5598371777,
"autogenerated": false,
"ratio": 3.6074400391581007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9608923801584723,
"avg_score": 0.011670683054675363,
"num_lines": 244
} |
__author__ = 'byt3smith'
#
# Purpose: Import module for pulling and formatting
# all necessary intelligence feeds
#
from tools import *
from re import search
ip_addr = regex('ip')
hostname = regex('domain')
class FeedModules():
## Malc0de
def malc0de_update(self):
iocs = gather('http://malc0de.com/bl/IP_Blacklist.txt', ip_addr)
## Malc0de Domain
host_ioc = gather('http://malc0de.com/bl/BOOT', hostname)
add2file('malc0de_ioc', iocs)
add2file('malc0de_ioc', host_ioc)
## Malware Domain List
def MDL_update(self):
url = 'http://mirror1.malwaredomains.com/files/domains.txt'
r = hostname
ioc_list = []
count = 0
f = connect(url)
sleep(2)
for line in f:
rex = search(r, line)
if rex == None:
pass
else:
ioc = rex.group(0)
if ioc in ioc_list:
pass
else:
ioc_list.append(ioc)
count += 1
add2file('MDL_ioc', ioc_list)
## Feodo Tracker
def feodo_update(self):
iocs = gather('https://feodotracker.abuse.ch/blocklist/?download=ipblocklist', ip_addr)
host_ioc = gather('https://feodotracker.abuse.ch/blocklist/?download=domainblocklist', hostname)
add2file('feodo_ioc', iocs)
add2file('feodo_ioc', host_ioc)
## reputation.alienvault.com
def alienvault_update(self):
iocs = gather('https://reputation.alienvault.com/reputation.generic', ip_addr)
add2file('alienvault_ioc', iocs)
## DShield High Pri suspicious domain list
def dshieldHigh_update(self):
host_iocs = gather('http://www.dshield.org/feeds/suspiciousdomains_High.txt', hostname)
add2file('dShieldHigh_ioc', host_iocs)
## Spyeye Tracker
def spyeye_update(self):
iocs = gather('https://spyeyetracker.abuse.ch/blocklist.php?download=hostsdeny', ip_addr)
host_ioc = gather('https://spyeyetracker.abuse.ch/blocklist.php?download=hostsdeny', hostname)
add2file('spyeye_ioc', iocs)
add2file('spyeye_ioc', host_ioc)
## Zeus Tracker
def zeus_update(self):
iocs = gather('https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist', ip_addr)
host_ioc = gather('https://zeustracker.abuse.ch/blocklist.php?download=domainblocklist', hostname)
add2file('zeus_ioc', iocs)
add2file('zeus_ioc', host_ioc)
## Palevo Tracker
def palevo_tracker_update(self):
iocs = gather('https://palevotracker.abuse.ch/blocklists.php?download=ipblocklist', ip_addr)
host_ioc = gather('https://palevotracker.abuse.ch/blocklists.php?download=domainblocklist', hostname)
add2file('palevo_ioc', iocs)
add2file('palevo_ioc', host_ioc)
## OpenBL
def openbl_update(self):
iocs = gather('http://www.openbl.org/lists/base.txt', ip_addr)
add2file('openbl_ioc', iocs)
## MalwareDomains
def malwaredomains_update(self):
iocs = gather('http://mirror1.malwaredomains.com/files/domains.txt', hostname)
add2file('malwaredomains_ioc', iocs)
## NoThink Honeypots -- DNS Traffic
def nothinkDns_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_dns.txt', ip_addr)
host_ioc = gather('http://www.nothink.org/blacklist/blacklist_malware_dns.txt', hostname)
add2file('nothinkDNS_ioc', iocs)
add2file('noThinkDNS_ioc', host_ioc)
## NoThink Honeypots -- HTTP Traffic
def nothinkHttp_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_http.txt', ip_addr)
host_iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_http.txt', hostname)
add2file('nothinkHTTP_ioc', iocs)
add2file('nothinkHTTP_ioc', host_iocs)
## NoThink Honeypots -- IRC Traffic
def nothinkIrc_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_irc.txt', ip_addr)
add2file('nothinkIRC_ioc', iocs)
## MalwaredRU Tracker
def MalwaredRU_update(self):
iocs = gather('http://malwared.ru/db/fulllist.php', ip_addr)
host_iocs = gather('http://malwared.ru/db/fulllist.php', hostname)
add2file('MalwaredRU_ioc', iocs)
add2file('MalwaredRU_ioc', host_iocs)
## ET-Open BOTCC
def ETOpenBotCC_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-botcc.rules', ip_addr)
add2file('ETOpenBotCC_ioc', iocs)
## ET-Open Emerging CIarmy
def ETOpenCIArmy_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-ciarmy.rules', ip_addr)
add2file('ETOpenCIArmy_ioc', iocs)
## ET-Open Compromised
def ETOpenCompd_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-compromised-BLOCK.rules', ip_addr)
add2file('ETOpenCompd_ioc', iocs)
| {
"repo_name": "sberrydavis/Forager",
"path": "bin/feeds.py",
"copies": "1",
"size": "5061",
"license": "mit",
"hash": -8740538772429036000,
"line_mean": 31.6516129032,
"line_max": 110,
"alpha_frac": 0.6342620036,
"autogenerated": false,
"ratio": 3.050632911392405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4184894914992405,
"avg_score": null,
"num_lines": null
} |
__author__ = 'byt3smith'
#
# Purpose: Import module for pulling and formatting
# all necessary intelligence feeds
#
from .tools import *
from re import search
ip_addr = regex('ip')
hostname = regex('domain')
class FeedModules():
## Malc0de
def malc0de_update(self):
iocs = gather('http://malc0de.com/bl/IP_Blacklist.txt', ip_addr)
## Malc0de Domain
host_ioc = gather('http://malc0de.com/bl/BOOT', hostname)
add2file('malc0de_ioc', iocs)
add2file('malc0de_ioc', host_ioc)
## Malware Domain List
def MDL_update(self):
url = 'http://mirror1.malwaredomains.com/files/domains.txt'
r = hostname
ioc_list = []
count = 0
f = connect(url)
sleep(2)
for line in f:
rex = search(r, line)
if rex == None:
pass
else:
ioc = rex.group(0)
if ioc in ioc_list:
pass
else:
ioc_list.append(ioc)
count += 1
add2file('MDL_ioc', ioc_list)
## Ransomware Tracker
def ransomware_update(self):
host_ioc = gather('https://ransomwaretracker.abuse.ch/downloads/RW_DOMBL.txt', hostname)
add2file('ransomware_ioc', host_ioc)
## Feodo Tracker
def feodo_update(self):
iocs = gather('https://feodotracker.abuse.ch/blocklist/?download=ipblocklist', ip_addr)
host_ioc = gather('https://feodotracker.abuse.ch/blocklist/?download=domainblocklist', hostname)
add2file('feodo_ioc', iocs)
add2file('feodo_ioc', host_ioc)
## reputation.alienvault.com
def alienvault_update(self):
iocs = gather('https://reputation.alienvault.com/reputation.generic', ip_addr)
add2file('alienvault_ioc', iocs)
## DShield High Pri suspicious domain list
def dshieldHigh_update(self):
host_iocs = gather('http://www.dshield.org/feeds/suspiciousdomains_High.txt', hostname)
add2file('dShieldHigh_ioc', host_iocs)
## Spyeye Tracker
def spyeye_update(self):
iocs = gather('https://spyeyetracker.abuse.ch/blocklist.php?download=hostsdeny', ip_addr)
host_ioc = gather('https://spyeyetracker.abuse.ch/blocklist.php?download=hostsdeny', hostname)
add2file('spyeye_ioc', iocs)
add2file('spyeye_ioc', host_ioc)
## Zeus Tracker
def zeus_update(self):
iocs = gather('https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist', ip_addr)
host_ioc = gather('https://zeustracker.abuse.ch/blocklist.php?download=domainblocklist', hostname)
add2file('zeus_ioc', iocs)
add2file('zeus_ioc', host_ioc)
## Palevo Tracker
def palevo_tracker_update(self):
iocs = gather('https://palevotracker.abuse.ch/blocklists.php?download=ipblocklist', ip_addr)
host_ioc = gather('https://palevotracker.abuse.ch/blocklists.php?download=domainblocklist', hostname)
add2file('palevo_ioc', iocs)
add2file('palevo_ioc', host_ioc)
## OpenBL
def openbl_update(self):
iocs = gather('http://www.openbl.org/lists/base.txt', ip_addr)
add2file('openbl_ioc', iocs)
## MalwareDomains
def malwaredomains_update(self):
iocs = gather('http://mirror1.malwaredomains.com/files/domains.txt', hostname)
add2file('malwaredomains_ioc', iocs)
## NoThink Honeypots -- DNS Traffic
def nothinkDns_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_dns.txt', ip_addr)
host_ioc = gather('http://www.nothink.org/blacklist/blacklist_malware_dns.txt', hostname)
add2file('nothinkDNS_ioc', iocs)
add2file('noThinkDNS_ioc', host_ioc)
## NoThink Honeypots -- HTTP Traffic
def nothinkHttp_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_http.txt', ip_addr)
host_iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_http.txt', hostname)
add2file('nothinkHTTP_ioc', iocs)
add2file('nothinkHTTP_ioc', host_iocs)
## NoThink Honeypots -- IRC Traffic
def nothinkIrc_update(self):
iocs = gather('http://www.nothink.org/blacklist/blacklist_malware_irc.txt', ip_addr)
add2file('nothinkIRC_ioc', iocs)
## MalwaredRU Tracker
def MalwaredRU_update(self):
iocs = gather('http://malwared.ru/db/fulllist.php', ip_addr)
host_iocs = gather('http://malwared.ru/db/fulllist.php', hostname)
add2file('MalwaredRU_ioc', iocs)
add2file('MalwaredRU_ioc', host_iocs)
## ET-Open BOTCC
def ETOpenBotCC_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-botcc.rules', ip_addr)
add2file('ETOpenBotCC_ioc', iocs)
## ET-Open Emerging CIarmy
def ETOpenCIArmy_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-ciarmy.rules', ip_addr)
add2file('ETOpenCIArmy_ioc', iocs)
## ET-Open Compromised
def ETOpenCompd_update(self):
iocs = gather('http://rules.emergingthreats.net/blockrules/emerging-compromised-BLOCK.rules', ip_addr)
add2file('ETOpenCompd_ioc', iocs)
| {
"repo_name": "byt3smith/Forager",
"path": "forager/feeds.py",
"copies": "1",
"size": "5266",
"license": "mit",
"hash": -2307119241185062000,
"line_mean": 31.5061728395,
"line_max": 110,
"alpha_frac": 0.6355867831,
"autogenerated": false,
"ratio": 3.0474537037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9113512035564288,
"avg_score": 0.013905690247883167,
"num_lines": 162
} |
__author__ = 'byt3smith'
#
# Purpose: Tools for gathering IP addresses, domain names, URL's, etc..
#
from time import sleep
from os import chdir, path
from xlrd import open_workbook, sheet
import re
import sys
import urllib2
import pdfConverter
import unicodedata
from colorama import Fore, Back, Style, init
init(autoreset=True) ## Initialize colorama
def connect(url):
try:
f = urllib2.urlopen(url).readlines()
return f
except:
#sys.stdout.write('[!] Could not connect to: %s\n' % url)
sys.exit(0)
def regex(ioc_type):
ioc_patts = {
"ip":"((?:(?:[12]\d?\d?|[1-9]\d|[1-9])(?:\[\.\]|\.)){3}(?:[12]\d?\d?|[\d+]{1,2}))",
"domain":"([A-Za-z0-9]+(?:[\-|\.][A-Za-z0-9]+)*(?:\[\.\]|\.)(?:com|net|edu|ru|org|de|uk|jp|br|pl|info|fr|it|cn|in|su|pw|biz|co|eu|nl|kr|me))",
"md5":"\W([A-Fa-f0-9]{32})(?:\W|$)",
"sha1":"\W([A-Fa-f0-9]{40})(?:\W|$)",
"sha256":"\W([A-Fa-f0-9]{64})(?:\W|$)",
"email":"[a-zA-Z0-9_]+(?:\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?!([a-zA-Z0-9]*\.[a-zA-Z0-9]*\.[a-zA-Z0-9]*\.))(?:[A-Za-z0-9](?:[a-zA-Z0-9-]*[A-Za-z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?",
"URL":"((?:http|ftp|https)\:\/\/(?:[\w+?\.\w+])+[a-zA-Z0-9\~\!\@\#\$\%\^\&\*\(\)_\-\=\+\\\/\?\.\:\;]+)",
"yara":"(rule\s[\w\W]{,30}\{[\w\W\s]*\})"
}
try:
pattern = re.compile(ioc_patts[ioc_type])
except re.error:
print '[!] Invalid type specified.'
sys.exit(0)
return pattern
def gather(url, rex):
ioc_list = []
count = 0
f = connect(url)
#source = '/'.join(regex('domain').findall(url))
sleep(2)
for line in f:
if line.startswith('/') or line.startswith('#') or line.startswith('\n'):
pass
else:
ioc = rex.findall(line)
for i in ioc:
if i in ioc_list:
pass
else:
ioc_list.append(i)
count += 1
#print 'Gathered %d items from %s' % (count, source)
return ioc_list
def add2file(filename, ioc_list):
if len(ioc_list) == 0:
pass
else:
patt = regex('ip')
test = patt.match(ioc_list[0])
if test is None:
f = open(filename, 'a+')
else:
f = open(filename, 'w+')
for ioc in ioc_list:
f.write(ioc + '\n')
f.close()
def extract(filename):
### Determine filetype to define how IOCs are processed
if filename[-3:] == 'pdf':
f = pdfConverter.convert_pdf_to_txt(filename)
elif filename[-3:] == 'xls' or filename[-4:] == 'xlsx':
f = open_workbook(filename)
datalist = []
vallist = []
asciilist = []
sheet = f.sheet_by_index(0)
cols = sheet.ncols
for i in range(cols):
collist = sheet.col(i)
datalist = collist + datalist
for cell in datalist:
val = cell.value
if len(val) < 2:
pass
else:
vallist.append(val)
for item in vallist:
ascii_val = unicodedata.normalize('NFKD', item).encode('ascii', 'ignore')
asciilist.append(ascii_val)
f = ', '.join(asciilist)
else:
f = open(filename, "r").read()
### Setup patterns for extraction
ip_patt = regex('ip')
host_patt = regex('domain')
md5_patt = regex('md5')
sha1_patt = regex('sha1')
sha256_patt = regex('sha256')
yara_patt = regex('yara')
### Declare temp list vars to store IOCs
ip_list = []
domain_list = []
md5_list = []
sha1_list = []
sha256_list = []
yara_list = []
### Iterate over lists of matched IOCs
ipaddr = ip_patt.findall(f)
for i in ipaddr:
# Remove brackets if defanged
i = re.sub('\[\.\]', '.', i)
if i in ip_list:
pass
else:
ip_list.append(i)
domains = host_patt.findall(f)
for i in domains:
# Remove brackets if defanged
i = re.sub('\[\.\]', '.', i)
if i in domain_list:
pass
else:
domain_list.append(i)
md5_hash = md5_patt.findall(f)
for i in md5_hash:
if i in md5_list:
pass
else:
md5_list.append(i)
sha1_hash = sha1_patt.findall(f)
for i in sha1_hash:
if i in sha1_list:
pass
else:
sha1_list.append(i)
sha256_hash = sha256_patt.findall(f)
for i in sha256_hash:
if i in sha1_list:
pass
else:
sha256_list.append(i)
yara_rules = yara_patt.findall(f)
for i in yara_rules:
if i in yara_list:
pass
else:
yara_list.append(i)
### Create _ioc file
chdir('intel/')
base = path.basename(filename)
base_noext = path.splitext(base)[0]
banner = '''
+-------------------+
| RESULTS |
+-------------------+'''
print banner
### Write IOCs to files
with open(base_noext + '_ioc', 'w+') as f:
for i in ip_list:
f.write(i + '\n')
f.write("\n")
print 'IPv4 Addresses [' + (Fore.GREEN + '%d' % (len(ip_list)) + Fore.RESET if len(ip_list) > 0 else Fore.RED + '%d' % (len(ip_list)) + Fore.RESET) + ']'
for d in domain_list:
f.write(d + '\n')
f.write("\n")
print 'Domain Names [' + (Fore.GREEN + '%d' % (len(domain_list)) + Fore.RESET if len(domain_list) > 0 else Fore.RED + '%d' % (len(domain_list)) + Fore.RESET) + ']'
for m in md5_list:
f.write(m + '\n')
f.write("\n")
print 'MD5 Hashes [' + (Fore.GREEN + '%d' % (len(md5_list)) + Fore.RESET if len(md5_list) > 0 else Fore.RED + '%d' % (len(md5_list)) + Fore.RESET) + ']'
for y in yara_list:
f.write(y + '\n')
f.write("\n")
print 'YARA Rules [' + (Fore.GREEN + '%d' % (len(yara_list)) + Fore.RESET if len(yara_list) > 0 else Fore.RED + '%d' % (len(yara_list)) + Fore.RESET) + ']'
for s1 in sha1_list:
f.write(s1 + '\n')
f.write("\n")
print 'SHA1 Hashes [' + (Fore.GREEN + '%d' % (len(sha1_list)) + Fore.RESET if len(sha1_list) > 0 else Fore.RED + '%d' % (len(sha1_list)) + Fore.RESET) + ']'
for s2 in sha256_list:
f.write(s2 + '\n')
f.write("\n")
print 'SHA256 Hashes [' + (Fore.GREEN + '%d' % (len(sha256_list)) + Fore.RESET if len(sha256_list) > 0 else Fore.RED + '%d' % (len(sha256_list)) + Fore.RESET) + ']'
print Fore.GREEN + "\n[+]" + Fore.RESET + " IOCs written to %s" % base_noext + '_ioc!'
def update_progress(progress):
barLength = 20 # Modify this value to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = Fore.RED + "Halt!\r\n"
if progress >= .999:
progress = 1
status = Fore.GREEN + "Complete!\r\n"
block = int(round(barLength*progress))
text = "\r[*] Progress: [{0}] {1}% {2}".format("#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
| {
"repo_name": "sberrydavis/Forager",
"path": "bin/tools.py",
"copies": "1",
"size": "7402",
"license": "mit",
"hash": -6466015792744461000,
"line_mean": 28.967611336,
"line_max": 206,
"alpha_frac": 0.4970278303,
"autogenerated": false,
"ratio": 3.03734099302421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8983248366031145,
"avg_score": 0.010224091458613022,
"num_lines": 247
} |
__author__ = 'byt3smith'
#
# When called, will search through Intel directory for each
# indicator in provided CSV or New-line formatted file.
#
from . import tools
import sys
import re
import os
from time import sleep
def search_file(ioc):
os.chdir('../')
patt = tools.regex('ip')
if ioc[-3:] == 'csv':
print('[*] Pulling indicators as CSV values')
else:
print('[*] Assuming new-line formatted file')
try:
f = open(ioc, 'r').readlines()
except:
sys.stderr.write("[!] Cannot locate file: %s.\
Please provide the full path." % ioc)
exit(0)
ioc_list = []
for line in f:
for match in patt.findall(line):
ioc_list.append(match)
sleep(2)
os.chdir('data/intel/')
dir = os.listdir('.')
total = float(len(ioc_list))
print('[*] Found %d indicators in %s' % (total, ioc))
frac = 1.0/total
prog = 0.0
matched = open('../matches.txt', 'w+')
for item in ioc_list:
for i in dir:
f2 = open(i, 'r')
contents = f2.readlines()
for line in contents:
if item in line:
info = item + ' --> ' + i + '\n'
matched.write(info)
matches += 1
else:
pass
f2.close()
prog += frac
tools.update_progress(prog)
print('[+] Search complete.')
print('%d matches found and stored in matches.txt' % matches)
def single_search(ioc):
dirs = os.listdir('.')
if len(dirs) == 0:
sys.stderr.write("[!] Cannot complete search, no files in intel directory. Exiting..\n")
sys.exit(0)
total = float(len(dirs))
print('There are %d intelligence files' % total)
frac = 1.0/total
prog = 0.0
matched = open('../matches.txt', 'w+')
matches = 0
for i_file in dirs:
f2 = open(i_file, 'r')
contents = f2.readlines()
for line in contents:
if ioc in line:
info = line.rstrip('\n') + ' --> ' + i_file + '\n'
matched.write(info)
matches += 1
else:
pass
f2.close()
prog += frac
tools.update_progress(prog)
print('[+] Search complete.')
print('\n[*] %d matches found and stored in matches.txt' % matches)
| {
"repo_name": "byt3smith/Forager",
"path": "forager/hunt.py",
"copies": "1",
"size": "2380",
"license": "mit",
"hash": -1021396705119809000,
"line_mean": 23.7916666667,
"line_max": 96,
"alpha_frac": 0.5163865546,
"autogenerated": false,
"ratio": 3.689922480620155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4706309035220155,
"avg_score": null,
"num_lines": null
} |
__author__ = 'byt3smith'
#
# When called, will search through Intel directory for each
# indicator in provided CSV or New-line formatted file.
#
import tools
import sys
import re
import os
from time import sleep
def search_file(ioc):
os.chdir('../')
patt = tools.regex('ip')
if ioc[-3:] == 'csv':
print '[*] Pulling indicators as CSV values'
else:
print '[*] Assuming new-line formatted file'
try:
f = open(ioc, 'r').readlines()
except:
sys.stderr.write("[!] Cannot locate file: %s.\
Please provide the full path." % ioc)
exit(0)
ioc_list = []
for line in f:
for match in patt.findall(line):
ioc_list.append(match)
sleep(2)
os.chdir('intel')
dir = os.listdir('.')
total = float(len(ioc_list))
print '[*] Found %d indicators in %s' % (total, ioc)
frac = 1.0/total
prog = 0.0
matched = open('../matches.txt', 'w+')
for item in ioc_list:
for i in dir:
f2 = open(i, 'r')
contents = f2.readlines()
for line in contents:
if item in line:
info = item + ' --> ' + i + '\n'
matched.write(info)
matches += 1
else:
pass
f2.close()
prog += frac
tools.update_progress(prog)
print '[+] Search complete.'
print '%d matches found and stored in matches.txt' % matches
def single_search(ioc):
os.chdir('../intel')
dirs = os.listdir('.')
if len(dirs) == 0:
sys.stderr.write("[!] Cannot complete search, no files in intel directory. Exiting..\n")
sys.exit(0)
total = float(len(dirs))
print 'There are %d files in Intel dir' % total
frac = 1.0/total
prog = 0.0
matched = open('../matches.txt', 'w+')
matches = 0
for i_file in dirs:
f2 = open(i_file, 'r')
contents = f2.readlines()
for line in contents:
if ioc in line:
info = line.rstrip('\n') + ' --> ' + i_file + '\n'
matched.write(info)
matches += 1
else:
pass
f2.close()
prog += frac
tools.update_progress(prog)
print '[+] Search complete.'
print '%d matches found and stored in matches.txt' % matches
| {
"repo_name": "sberrydavis/Forager",
"path": "bin/hunt.py",
"copies": "1",
"size": "2380",
"license": "mit",
"hash": -6957330248932108000,
"line_mean": 23.0404040404,
"line_max": 96,
"alpha_frac": 0.5168067227,
"autogenerated": false,
"ratio": 3.707165109034268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9664911216206924,
"avg_score": 0.011812123105468935,
"num_lines": 99
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.