seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
14557064993 |
import sys
import cv2
import numpy
import copy
import scipy.misc
import itertools
from PIL import Image, ImageOps, ImageDraw
from scipy.ndimage import morphology, label
from copy import deepcopy
from operator import itemgetter
from statistics import median, mean
from math import sqrt
from random import randint
from scipy.misc import toimage
#sketch analysis
computer_vision_size_threshold = 50
threshold_block_width = 5 # minimum block width in pixels
threshold_block_height = 5 # minimum block height in pixels
#Scaling method
scaling_method = int(sys.argv[2]) # 0=Max 1=Min 2=MidRange 3=Mean 4=Median
#rules when picking a block type
check_overlap = 1 # Check that no blocks overlap
height_error_allowed_overlap = 0.03 # prevents rounding errors and gives bit of flexability
check_local_stability = 0 # Check that the structure is locally stable after each added block
check_global_stability = 1 # Check that the structure is globally stable after each added block
check_global_stability_method = 2 # 1 is enforce global stability only for blocks currently added (doesn't take into account future blocks)
# 2 is use both blocks added so far and sketch blocks for those not yet added
check_all_supporters = 1 # Check that all supporters for a block are present (could possibly not be required if global stability checking is enforced)
required_support_amount = 0.01
check_groups = 1 # Check that group height requirements are enforced
average_single_block_groups_heights = 1 # average the height of all single blocks in groups with other single blocks (helps with very poor drawings...)
height_error_allowed_groups = 0.05
use_similarity_grouping = 1
average_same_block_groups_heights = 1
error_percentage_shape = 0.05
check_era_relations = 0 # Check that ERA relations hold
check_composite_block_stability = 1 # check that composite blocks are stable (local)
shift_blocks_sideways = 1 # Makes structures more likely to pass but takes longer, Helps with making structures stable/no overlap
moves_to_try = [-0.8,0.7,-0.6,0.5,-0.4,0.3,-0.2,0.1]
# Alternative horizontal distance options:
#-0.4,0.35,-0.3,0.25,-0.2,0.15,-0.1,0.05
#-2.8,2.6,-2.4,2.2,-2.0,1.8,-1.6,1.4,-1.2,1.0,-0.8,0.6,-0.4,0.2
#-1.4,1.3,-1.2,1.1,-1.0,0.9,-0.8,0.7,-0.6,0.5,-0.4,0.3,-0.2,0.1
order_blocks_smart = 1 # re-order blocks into a more logical order based on support graph (rather than simply bottom to top)
#add extra blocks into sketch
add_extra_blocks_to_make_stable = 1 # add extra blocks to sketch to make structure globally stable
push_back_distance = 5 # distance to push extra blocks inwards (in pixels), helps deal with minor imperfections in the sketches / vision
#generate composite blocks
composite_blocks_allowed = 1 # composite blocks are allowed within the structure
rearrange_special_block_order = 1 # also include rearrangements of composite blocks as alternative options
max_composite_block_width = 3 # number of blocks wide that a composite block can be
composite_block_interweaving = 1
composite_block_penalty_picking = 1.5 # error difference when picking block type multiplied by this value if it is composite
composite_block_penalty_end = 0.0 # final error value score multiplied by this times ratio of composite blocks to non-composite blocks (NOT CURRENTLY INCLUDED)
limit_number_block_type_changes = 1 # limit the number of times a block type can change before rolling back a block
max_number_block_type_changes = 20 # increasing will give better final results but dramatically increases generation time, when using composite blocks
# SHOULD ONLY BE USED ON ACCURATE STRUCTURES WITH ORTHOGONAL/RECTILINEAR POLYGONS
corner_splitting_allowed = int(sys.argv[3]) # split polygons into rectangles based on their corners
seperate_vision_corners = 1 # 0 = associate each corner with the MBR that it is within (problem if within two or more MBRs)
# 1 = associte each corner with the MBR whose original shape it is closest too
max_distance_allowed = 3000 # maximum distance a corner can be from an MBR (removes dots) ERROR WHEN STRUCTURE HAS HOLE IN IT!!!
corner_detection_quality_threshold = 0.2 # quality of corner required for detection
corner_detection_min_distance = 20 # minimum ditance between corners (euclidean)
threshold_corner_amount_x = 10 # make x values for corners same if wihtin this pixel distance
threshold_corner_amount_y = 10 # make y values for corners same if wihtin this pixel distance
add_togethor_similar_x = 1 # combines groups if they share a block
add_togethor_similar_y = 1
GARY_INITIAL = 1
MATTHEW_INITIAL = 1
OTHER_INITIAL = 1
ground = -3.5 # position of the level ground
# blocks number and size
blocks = {'1':[0.84,0.84], '2':[0.85,0.43], '3':[0.43,0.85], '4':[0.43,0.43],
'5':[0.22,0.22], '6':[0.43,0.22], '7':[0.22,0.43], '8':[0.85,0.22],
'9':[0.22,0.85], '10':[1.68,0.22], '11':[0.22,1.68],
'12':[2.06,0.22], '13':[0.22,2.06]}
original_number_blocks = len(blocks)
# blocks number and name
# (blocks 3, 7, 9, 11 and 13) are their respective block names rotated 90 derees clockwise
block_names = {'1':"SquareHole", '2':"RectFat", '3':"RectFat", '4':"SquareSmall",
'5':"SquareTiny", '6':"RectTiny", '7':"RectTiny", '8':"RectSmall",
'9':"RectSmall",'10':"RectMedium",'11':"RectMedium",
'12':"RectBig",'13':"RectBig"}
# Generic list merging functions
def mergeOrNot(list1,list2):
merge=False
for item in list1:
if item in list2:
merge=True
break
return merge
def uniqueList(list1,list2):
result = list1
for j in list2:
if j not in list1:
result.append(j)
return result
def cleverMergeLists(lists):
anotherLoopRequired=False
newList = []
for myList in lists:
addMyList=True
if not anotherLoopRequired:
for myList2 in lists:
if not anotherLoopRequired:
if(myList==myList2):
continue
if(mergeOrNot(myList,myList2)):
anotherLoopRequired=True
addMyList=False
newList.append(uniqueList(myList,myList2))
else:
newList.append(myList2)
if(addMyList):
newList.append(myList)
if anotherLoopRequired:
return cleverMergeLists(newList)
else:
return newList
# COMPUTER VISION ANALYSIS FUNCTIONS
# returns the MBRs for a given image
def boxes(orig):
img = ImageOps.grayscale(orig)
im = numpy.array(img)
# Inner morphological gradient.
im = morphology.grey_dilation(im, (3, 3)) - im
# Binarize.
mean, std = im.mean(), im.std()
t = mean + std
im[im < t] = 0
im[im >= t] = 1
# Connected components.
lbl, numcc = label(im)
# Size threshold.
min_size = computer_vision_size_threshold # pixels
box = []
for i in range(1, numcc + 1):
py, px = numpy.nonzero(lbl == i)
if len(py) < min_size:
im[lbl == i] = 0
continue
xmin, xmax, ymin, ymax = px.min(), px.max(), py.min(), py.max()
# Four corners and centroid.
box.append([
[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)],
(numpy.mean(px), numpy.mean(py))])
return im.astype(numpy.uint8) * 255, box
# returns both the MBRs for a given image and the segmented sections of the image
def boxes_sep(orig):
img = ImageOps.grayscale(orig)
im = numpy.array(img)
# Inner morphological gradient.
im = morphology.grey_dilation(im, (3, 3)) - im
# Binarize.
mean, std = im.mean(), im.std()
t = mean + std
im[im < t] = 0
im[im >= t] = 1
# Connected components.
lbl, numcc = label(im)
# Size threshold.
min_size = computer_vision_size_threshold # pixels
box = []
segmented_images = []
for i in range(1, numcc + 1):
im2 = deepcopy(lbl)
py, px = numpy.nonzero(lbl == i)
if len(py) < min_size:
im[lbl == i] = 0
continue
segmented_images.append([])
for j in range(len(lbl)):
for k in range(len(lbl[j])):
if lbl[j][k] == i:
segmented_images[-1].append([k,j])
for i in range(1, numcc + 1):
py, px = numpy.nonzero(lbl == i)
if len(py) < min_size:
im[lbl == i] = 0
continue
xmin, xmax, ymin, ymax = px.min(), px.max(), py.min(), py.max()
# Four corners and centroid.
box.append([
[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)],
(numpy.mean(px), numpy.mean(py))])
return im.astype(numpy.uint8) * 255, box, segmented_images
print("DOING COMPUTER VISION")
# find the corners for the given image
img = cv2.imread(sys.argv[1])
img_orig = copy.copy(img)
grayimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(grayimg,100,corner_detection_quality_threshold,corner_detection_min_distance)
corners = numpy.float32(corners)
for item in corners:
x,y = item[0]
cv2.circle(img,(x,y),5,255,-1)
Image.fromarray(img).save("sketch_corners.jpg")
new_corners = []
for item in corners:
x,y = item[0]
new_corners.append([x,y])
corners = deepcopy(new_corners)
print("Number of corners found:")
print(len(corners))
# find the MBRs for the given image
orig = Image.open(sys.argv[1])
if (seperate_vision_corners == 1) and (corner_splitting_allowed == 1):
im, box, seg_points = boxes_sep(orig)
else:
im, box = boxes(orig)
Image.fromarray(im).save("sketch_lines.jpg")
# Draw perfect rectangles and the component centroid.
img = Image.fromarray(im)
visual = img.convert('RGB')
draw = ImageDraw.Draw(visual)
for b, centroid in box:
draw.line(b + [b[0]], fill='yellow')
cx, cy = centroid
draw.ellipse((cx - 2, cy - 2, cx + 2, cy + 2), fill='red')
visual.save("sketch_MBRs.jpg")
# Draw perfect rectangles and the component centroid.
img = Image.fromarray(im)
visual = img.convert('RGB')
draw = ImageDraw.Draw(visual)
for b, centroid in box:
draw.rectangle([b[0],b[2]], fill='white')
visual.save("sketch_MBRs_filled.jpg")
all_boxes = []
# bounding boxes for all rectangles found
# all boxes is a list of all blocks [x,y,w,h], center points (x,y) and width (w) and height (h)
for b, centroid in box:
width = float(b[1][0] - b[0][0])
height = float(b[2][1] - b[0][1])
center_x = float(b[0][0]+(width/2.0))
center_y = float(b[0][1]+(height/2.0))
all_boxes.append([center_x,center_y,width,height])
#all_boxes.append([centroid[0],centroid[1],width,height])
# remove all boxes with a width or height less than size threshold (wrong)
# already done in computer vision section
new_all_boxes = []
for box in all_boxes:
if (box[2] > threshold_block_width) and (box[3] > threshold_block_height):
new_all_boxes.append(box)
all_boxes = deepcopy(new_all_boxes)
# remove all boxes that are fully inside other boxes (holes)
to_remove = []
for i in range(len(all_boxes)):
for j in range(len(all_boxes)):
if i!=j:
if ((all_boxes[i][0]-(all_boxes[i][2]/2.0)) > (all_boxes[j][0]-(all_boxes[j][2]/2.0))):
if ((all_boxes[i][0]+(all_boxes[i][2]/2.0)) < (all_boxes[j][0]+(all_boxes[j][2]/2.0))):
if ((all_boxes[i][1]-(all_boxes[i][3]/2.0)) > (all_boxes[j][1]-(all_boxes[j][3]/2.0))):
if ((all_boxes[i][1]+(all_boxes[i][3]/2.0)) < (all_boxes[j][1]+(all_boxes[j][3]/2.0))):
to_remove.append(i)
new_all_boxes = []
for i in range(len(all_boxes)):
if i not in to_remove:
new_all_boxes.append(all_boxes[i])
all_boxes = deepcopy(new_all_boxes)
if (seperate_vision_corners == 1) and (corner_splitting_allowed == 1):
new_seg_points = []
for i in range(len(seg_points)):
if i not in to_remove:
new_seg_points.append(seg_points[i])
seg_points = deepcopy(new_seg_points)
# split non-rectangular orthogonal polygons into rectangles
if (corner_splitting_allowed==1):
if seperate_vision_corners == 1:
print("SPLITTING CORNERS")
# associte each corner with the MBR whose original shape it is closest too
corner_association = []
for j in range(len(seg_points)):
corner_association.append([])
closest_corners = []
to_remove = []
for c in corners:
min_distance = 99999999
closest_seg = -1
for j in range(len(seg_points)):
for k in seg_points[j]:
x1 = c[0]
x2 = k[0]
y1 = c[1]
y2 = k[1]
distance = (sqrt( (x2 - x1)**2 + (y2 - y1)**2 ))
if distance < min_distance:
min_distance = distance
closest_seg = j
if min_distance > max_distance_allowed:
to_remove.append(c)
else:
closest_corners.append(closest_seg)
for c in to_remove:
corners.remove(c)
for j in range(len(corners)):
corner_association[closest_corners[j]].append(corners[j])
else:
# associate each corner with the MBR that it is within (problem if within two or more MBRs)
corner_association = []
for i in range(len(all_boxes)):
corner_association.append([])
for i in range(len(corners)):
mbr_within = -1
extra_give = 5
found_counter = 0
for j in range(len(all_boxes)):
if corners[i][0] < all_boxes[j][0]+(all_boxes[j][2]/2.0)+extra_give:
if corners[i][0] > all_boxes[j][0]-(all_boxes[j][2]/2.0)-extra_give:
if corners[i][1] < all_boxes[j][1]+(all_boxes[j][3]/2.0)+extra_give:
if corners[i][1] > all_boxes[j][1]-(all_boxes[j][3]/2.0)-extra_give:
mbr_within = j
found_counter = found_counter+1
if mbr_within == -1:
print("error: no MBR found to associate with")
if found_counter > 1:
print("error: too many MBR possibilities")
corner_association[mbr_within].append(corners[i])
# split up every shape with more than 5 corners into multiple rectangles
final_to_remove = []
final_to_add = []
for i in range(len(all_boxes)):
if len(corner_association[i]) > 5:
if (len(corner_association[i]) % 2) == 1:
print("error: odd number of associated corners")
# make the y values similar
split_lines_y = []
split_y = []
for c in corner_association[i]:
found = 0
for y in range(len(split_lines_y)):
max_y = max([sublist[1] for sublist in split_lines_y[y]])
min_y = min([sublist[1] for sublist in split_lines_y[y]])
if min_y < c[1] + threshold_corner_amount_y:
if max_y > c[1] - threshold_corner_amount_y:
split_lines_y[y].append(c)
found = found+1
if found == 0:
split_lines_y.append([c])
split_y.append([])
if found > 1:
print("error: multiple y values found")
if add_togethor_similar_y == 1:
split_lines_y = cleverMergeLists(split_lines_y)
for y in range(len(split_lines_y)):
split_y[y] = 0
for j in split_lines_y[y]:
split_y[y] = split_y[y] + j[1]
split_y[y] = split_y[y] / len(split_lines_y[y])
new_cor = []
for c in range(len(corner_association[i])):
match = 0
for j in range(len(split_lines_y)):
if corner_association[i][c] in split_lines_y[j]:
match = 1
new_cor.append([corner_association[i][c][0],split_y[j]])
if match == 0:
print("error: no matching y value found")
corner_association[i] = deepcopy(new_cor)
# make the x values similar
split_lines_x = []
split_x = []
for c in corner_association[i]:
found = 0
for x in range(len(split_lines_x)):
max_x = max([sublist[0] for sublist in split_lines_x[x]])
min_x = min([sublist[0] for sublist in split_lines_x[x]])
if min_x < c[0] + threshold_corner_amount_x:
if max_x > c[0] - threshold_corner_amount_x:
split_lines_x[x].append(c)
found = found+1
if found == 0:
split_lines_x.append([c])
split_x.append([])
if found > 1:
print("error: multiple x values found")
if add_togethor_similar_x == 1:
split_lines_x = cleverMergeLists(split_lines_x)
for x in range(len(split_lines_x)):
split_x[x] = 0
for j in split_lines_x[x]:
split_x[x] = split_x[x] + j[0]
split_x[x] = split_x[x] / len(split_lines_x[x])
new_cor = []
for c in range(len(corner_association[i])):
match = 0
for j in range(len(split_lines_x)):
if corner_association[i][c] in split_lines_x[j]:
match = 1
new_cor.append([split_x[j],corner_association[i][c][1]])
if match == 0:
print("error: no matching x value found")
corner_association[i] = deepcopy(new_cor)
# find horizontal and vertical edges
y_orderings = []
x_orderings = []
edges_all_x = []
edges_all_y = []
for c in corner_association[i]:
chosen_x = 0
chosen_y = 0
for j in range(len(x_orderings)):
if c[0] == x_orderings[j][0][0]:
x_orderings[j].append(c)
chosen_x = 1
if chosen_x == 0:
x_orderings.append([c])
for j in range(len(y_orderings)):
if c[1] == y_orderings[j][0][1]:
y_orderings[j].append(c)
chosen_y = 1
if chosen_y == 0:
y_orderings.append([c])
for xx in range(len(x_orderings)):
x_orderings[xx] = sorted(x_orderings[xx], key = lambda x: int(x[1]))
for yy in range(len(y_orderings)):
y_orderings[yy] = sorted(y_orderings[yy], key = lambda x: int(x[0]))
connect = True
for o in range(len(x_orderings)):
for c in range(len(x_orderings[o])):
if c < len(x_orderings[o]):
if connect == True:
edges_all_x.append([x_orderings[o][c],x_orderings[o][c+1]])
connect = False
else:
connect = True
for o in range(len(y_orderings)):
for c in range(len(y_orderings[o])):
if c < len(y_orderings[o]):
if connect == True:
edges_all_y.append([y_orderings[o][c],y_orderings[o][c+1]])
connect = False
else:
connect = True
# associate edges with each corner
corner_edges = []
for c in corner_association[i]:
edge_ver = []
edge_hor = []
for e in edges_all_x:
if c in e:
edge_ver = e
for e in edges_all_y:
if c in e:
edge_hor = e
corner_edges.append([edge_hor,edge_ver])
# identify concave and convex corners
convex_corners = [] # point outside
concave_corners = [] # point inside (ones that we want/use)
ori_edges_all_x = deepcopy(edges_all_x)
for j in range(len(corner_edges)):
point_to_test = deepcopy(corner_association[i][j])
shift_amount_corner_test = 0.01
if corner_edges[j][0][0][0] < corner_edges[j][0][1][0]:
if corner_edges[j][0][0][0] == point_to_test[0]:
point_to_test[0] = point_to_test[0]-shift_amount_corner_test
else:
point_to_test[0] = point_to_test[0]+shift_amount_corner_test
else:
if corner_edges[j][0][0][0] == point_to_test[0]:
point_to_test[0] = point_to_test[0]+shift_amount_corner_test
else:
point_to_test[0] = point_to_test[0]-shift_amount_corner_test
if corner_edges[j][1][0][1] < corner_edges[j][1][1][1]:
if corner_edges[j][1][0][1] == point_to_test[1]:
point_to_test[1] = point_to_test[1]-shift_amount_corner_test
else:
point_to_test[1] = point_to_test[1]+shift_amount_corner_test
else:
if corner_edges[j][1][0][1] == point_to_test[1]:
point_to_test[1] = point_to_test[1]+shift_amount_corner_test
else:
point_to_test[1] = point_to_test[1]-shift_amount_corner_test
num_line_intersections = 0
for linex in edges_all_x:
if linex[0][1] < linex[1][1]:
if point_to_test[1] < linex[1][1]:
if point_to_test[1] > linex[0][1]:
if point_to_test[0] > linex[0][0]:
num_line_intersections = num_line_intersections + 1
else:
if point_to_test[1] > linex[1][1]:
if point_to_test[1] < linex[0][1]:
if point_to_test[0] > linex[0][0]:
num_line_intersections = num_line_intersections + 1
if (num_line_intersections%2) == 0:
convex_corners.append(j)
else:
concave_corners.append(j)
# identify extra horzontal edges between concave corners
extra_edges_hor = []
for j in concave_corners:
current_point = corner_association[i][j]
intersecting_lines = []
for linex in edges_all_x:
if linex[0][0]!=current_point[0]:
if linex[0][1] < linex[1][1]:
if current_point[1] < linex[1][1]+shift_amount_corner_test:
if current_point[1] > linex[0][1]-shift_amount_corner_test:
intersecting_lines.append(linex)
else:
if current_point[1] > linex[1][1]-shift_amount_corner_test:
if current_point[1] < linex[0][1]+shift_amount_corner_test:
intersecting_lines.append(linex)
left_intersecting_closest = []
left_distance = 99999999
right_intersecting_closest = []
right_distance = 99999999
for line in intersecting_lines:
if current_point[0] > line[0][0]:
if current_point[0] - line[0][0] < left_distance:
left_distance = current_point[0] - line[0][0]
left_intersecting_closest = line
else:
if line[0][0] - current_point[0] < right_distance:
right_distance = line[0][0] - current_point[0]
right_intersecting_closest = line
extra_edges_hor.append([current_point,[right_intersecting_closest[0][0],current_point[1]]])
extra_edges_hor.append([[left_intersecting_closest[0][0],current_point[1]],current_point])
# identify extra vertical edges between concave corners
extra_edges_ver = []
for j in concave_corners:
current_point = corner_association[i][j]
intersecting_lines = []
for liney in edges_all_y:
if liney[0][1]!=current_point[1]:
if liney[0][0] < liney[1][0]:
if current_point[0] < liney[1][0]+shift_amount_corner_test:
if current_point[0] > liney[0][0]-shift_amount_corner_test:
intersecting_lines.append(liney)
else:
if current_point[0] > liney[1][0]-shift_amount_corner_test:
if current_point[0] < liney[0][0]+shift_amount_corner_test:
intersecting_lines.append(liney)
up_intersecting_closest = []
up_distance = 99999999
down_intersecting_closest = []
down_distance = 99999999
for line in intersecting_lines:
if current_point[1] > line[0][1]:
if current_point[1] - line[0][1] < up_distance:
up_distance = current_point[1] - line[0][1]
up_intersecting_closest = line
else:
if line[0][1] - current_point[1] < down_distance:
down_distance = line[0][1] - current_point[1]
down_intersecting_closest = line
extra_edges_ver.append([current_point,[current_point[0],up_intersecting_closest[0][1]]])
extra_edges_ver.append([[current_point[0],down_intersecting_closest[0][1]],current_point])
# remove duplicates
extra_edges_ver2 = []
extra_edges_hor2 = []
for j in extra_edges_ver:
if j not in extra_edges_ver2:
extra_edges_ver2.append(j)
for j in extra_edges_hor:
if j not in extra_edges_hor2:
extra_edges_hor2.append(j)
extra_edges_ver = deepcopy(extra_edges_ver2)
extra_edges_hor = deepcopy(extra_edges_hor2)
#order edges (left to right, top to bottom)
for edge_test in range(len(extra_edges_ver)):
if extra_edges_ver[edge_test][0][1] > extra_edges_ver[edge_test][1][1]:
extra_edges_ver[edge_test] = [extra_edges_ver[edge_test][1],extra_edges_ver[edge_test][0]]
for edge_test in range(len(extra_edges_hor)):
if extra_edges_hor[edge_test][0][0] > extra_edges_hor[edge_test][1][0]:
extra_edges_hor[edge_test] = [extra_edges_hor[edge_test][1],extra_edges_hor[edge_test][0]]
for edge_test in range(len(edges_all_x)):
if edges_all_x[edge_test][0][1] > edges_all_x[edge_test][1][1]:
edges_all_x[edge_test] = [edges_all_x[edge_test][1],edges_all_x[edge_test][0]]
for edge_test in range(len(edges_all_y)):
if edges_all_y[edge_test][0][0] > edges_all_y[edge_test][1][0]:
edges_all_y[edge_test] = [edges_all_y[edge_test][1],edges_all_y[edge_test][0]]
#split extra edges into two if it intersects another extra edge
no_change = 0
while(no_change==0):
to_add_hor = []
to_add_ver = []
to_remove_hor = []
to_remove_ver = []
no_change = 1
for j in extra_edges_hor:
for k in extra_edges_ver:
if j[0][0] < k[0][0]:
if j[1][0] > k[0][0]:
if k[0][1] < j[0][1]:
if k[1][1] > j[0][1]:
to_add_hor.append([j[0],[k[0][0],j[0][1]]])
to_add_hor.append([[k[0][0],j[0][1]],j[1]])
to_remove_hor.append(j)
to_add_ver.append([k[0],[k[0][0],j[0][1]]])
to_add_ver.append([[k[0][0],j[0][1]],k[1]])
to_remove_ver.append(k)
no_change = 0
if no_change == 0:
extra_edges_hor.append(to_add_hor[0])
extra_edges_hor.append(to_add_hor[1])
extra_edges_hor.remove(to_remove_hor[0])
extra_edges_ver.append(to_add_ver[0])
extra_edges_ver.append(to_add_ver[1])
extra_edges_ver.remove(to_remove_ver[0])
#get all touching line points for creating small blocks
all_touching_line_points = []
for j in corner_association[i]:
if j not in all_touching_line_points:
all_touching_line_points.append(j)
for j in extra_edges_ver:
for k in j:
if k not in all_touching_line_points:
all_touching_line_points.append(k)
for j in extra_edges_hor:
for k in j:
if k not in all_touching_line_points:
all_touching_line_points.append(k)
# mark extra points that were not already corners
extra_added_points = []
for j in all_touching_line_points:
if j not in corner_association[i]:
extra_added_points.append(j)
#order edges (left to right, top to bottom)
for edge_test in range(len(extra_edges_ver)):
if extra_edges_ver[edge_test][0][1] > extra_edges_ver[edge_test][1][1]:
extra_edges_ver[edge_test] = [extra_edges_ver[edge_test][1],extra_edges_ver[edge_test][0]]
for edge_test in range(len(extra_edges_hor)):
if extra_edges_hor[edge_test][0][0] > extra_edges_hor[edge_test][1][0]:
extra_edges_hor[edge_test] = [extra_edges_hor[edge_test][1],extra_edges_hor[edge_test][0]]
for edge_test in range(len(edges_all_x)):
if edges_all_x[edge_test][0][1] > edges_all_x[edge_test][1][1]:
edges_all_x[edge_test] = [edges_all_x[edge_test][1],edges_all_x[edge_test][0]]
for edge_test in range(len(edges_all_y)):
if edges_all_y[edge_test][0][0] > edges_all_y[edge_test][1][0]:
edges_all_y[edge_test] = [edges_all_y[edge_test][1],edges_all_y[edge_test][0]]
#split lines into sub-lines based on extra contact edges added
no_change_split = 0
while(no_change_split == 0):
no_change_split = 1
to_remove = []
to_add = []
for j in edges_all_x:
for k in extra_added_points:
if k[1] < j[1][1]:
if k[1] > j[0][1]:
if k[0] == j[0][0]:
to_remove.append(j)
to_add.append([j[0],k])
to_add.append([k,j[1]])
no_change_split = 0
if no_change_split == 0:
edges_all_x.remove(to_remove[0])
edges_all_x.append(to_add[0])
edges_all_x.append(to_add[1])
else:
for j in edges_all_y:
for k in extra_added_points:
if k[0] < j[1][0]:
if k[0] > j[0][0]:
if k[1] == j[0][1]:
to_remove.append(j)
to_add.append([j[0],k])
to_add.append([k,j[1]])
no_change_split = 0
if no_change_split == 0:
edges_all_y.remove(to_remove[0])
edges_all_y.append(to_add[0])
edges_all_y.append(to_add[1])
# remove duplicates and order
for new_edge_x in extra_edges_ver:
if new_edge_x not in edges_all_x:
edges_all_x.append(new_edge_x)
for new_edge_y in extra_edges_hor:
if new_edge_y not in edges_all_y:
edges_all_y.append(new_edge_y)
small_edges_hor = deepcopy(edges_all_y)
small_edges_ver = deepcopy(edges_all_x)
for edge_test in range(len(small_edges_ver)):
if small_edges_ver[edge_test][0][1] > small_edges_ver[edge_test][1][1]:
small_edges_ver[edge_test] = [small_edges_ver[edge_test][1],small_edges_ver[edge_test][0]]
for edge_test in range(len(small_edges_hor)):
if small_edges_hor[edge_test][0][0] > small_edges_hor[edge_test][1][0]:
small_edges_hor[edge_test] = [small_edges_hor[edge_test][1],small_edges_hor[edge_test][0]]
#get all the small boxes (maximum)
new_boxes = []
for j in small_edges_hor:
for k in small_edges_hor:
above = 0
below = 0
connect_left = []
connect_right = []
if j!=k:
if k[0][0] == j[0][0]:
if k[1][0] == j[1][0]:
if k[0][1] > j[0][1]:
below = 1
else:
above = 1
if below == 1:
for m in small_edges_ver:
if m[0] == j[0]:
if m[1] == k[0]:
connect_left = m
if m[0] == j[1]:
if m[1] == k[1]:
connect_right = m
if above == 1:
for m in small_edges_ver:
if m[0] == k[0]:
if m[1] == j[0]:
conect_left = m
if m[0] == k[1]:
if m[1] == j[1]:
connect_right = m
if (above == 1) and (connect_left != []) and (connect_right != []):
new_boxes.append([k,connect_right,j,connect_left])
if (below == 1) and (connect_left != []) and (connect_right != []):
new_boxes.append([j,connect_right,k,connect_left])
#convert to correct format
new_boxes2 = []
for j in new_boxes:
width = j[0][1][0] - j[0][0][0]
height = j[1][1][1] - j[1][0][1]
center_x = j[0][0][0] + (width/2.0)
center_y = j[1][0][1] + (height/2.0)
new_boxes2.append([center_x,center_y,width,height])
# remove boxes that are actually holes
new_boxes3 = []
for j in new_boxes2:
num_line_intersections = 0
point_to_test = [j[0],j[1]]
for linex in ori_edges_all_x:
if linex[0][1] < linex[1][1]:
if point_to_test[1] < linex[1][1]:
if point_to_test[1] > linex[0][1]:
if point_to_test[0] > linex[0][0]:
num_line_intersections = num_line_intersections + 1
else:
if point_to_test[1] > linex[1][1]:
if point_to_test[1] < linex[0][1]:
if point_to_test[0] > linex[0][0]:
num_line_intersections = num_line_intersections + 1
if (num_line_intersections%2) == 1:
new_boxes3.append(j)
# merge two boxes togethor if they are horizontally next to each other and have the same height
new_boxes4 = deepcopy(new_boxes3)
no_change = 1
to_merge = [0]
while(len(to_merge)>0):
to_merge = []
no_change = 0
for j in new_boxes4:
for k in new_boxes4:
if j != k:
if abs(j[1] - k[1]) < 0.1:
if abs(j[3] - k[3]) < 0.1:
if abs((j[0]+(j[2]/2.0)) - (k[0]-(k[2]/2.0))) < 0.1:
to_merge.append([j,k])
if len(to_merge)>0:
j = to_merge[0][0]
k = to_merge[0][1]
width = j[2]+k[2]
height = j[3]
center_x = (j[0]-(j[2]/2.0)) + (width/2.0)
center_y = j[1]
new_boxes4.append([center_x,center_y,width,height])
new_boxes4.remove(j)
new_boxes4.remove(k)
# add the new boxes to all_boxes and remove the original
final_to_remove.append(all_boxes[i])
for j in new_boxes4:
final_to_add.append(j)
for i in final_to_remove:
all_boxes.remove(i)
for i in final_to_add:
all_boxes.append(i)
stab_all_boxes = deepcopy(all_boxes)
for i in range(len(stab_all_boxes)):
stab_all_boxes[i][1] = (-1*(stab_all_boxes[i][1]))+2000
lowest_y = 99999999
for i in stab_all_boxes:
if i[1]-(i[3]/2.0) < lowest_y:
lowest_y = i[1]-(i[3]/2.0)
down_amount = lowest_y - 100.0
for i in range(len(stab_all_boxes)):
stab_all_boxes[i][1] = stab_all_boxes[i][1] - down_amount
f = open("sketch_blocks_data.txt", "w")
for i in stab_all_boxes:
f.write('%s %s %s %s\n' % (i[0],i[1]-(i[3]/2.0),i[2],i[3]))
f.close()
#find the largest and smallest block dimensions:
largest_value = 0
smallest_value = 99999999
largest_width = 0
smallest_width = 99999999
largest_height = 0
smallest_height = 99999999
widths = []
heights = []
areas = []
center_mass_ori_x = 0
center_mass_ori_y = 0
total_mass_ori = 0
for box in all_boxes:
widths.append(box[2])
heights.append(box[3])
areas.append(box[2]*box[3])
center_mass_ori_x = center_mass_ori_x + (box[0]*box[2]*box[3])
center_mass_ori_y = center_mass_ori_y + (box[1]*box[2]*box[3])
total_mass_ori = total_mass_ori + (box[2]*box[3])
if box[2] > largest_value:
largest_value = box[2]
if box[2] < smallest_value:
smallest_value = box[2]
if box[3] > largest_value:
largest_value = box[3]
if box[3] < smallest_value:
smallest_value = box[3]
if box[2] > largest_width:
largest_width = box[2]
if box[2] < smallest_width:
smallest_width = box[2]
if box[3] > largest_height:
largest_height = box[3]
if box[3] < smallest_height:
smallest_height = box[3]
center_mass_ori_x = center_mass_ori_x / total_mass_ori
center_mass_ori_y = center_mass_ori_y / total_mass_ori
sizes = widths+heights
mean_width = mean(widths)
mean_height = mean(heights)
mean_size = mean(sizes)
mean_area = mean(areas)
median_width = median(widths)
median_height = median(heights)
median_size = median(sizes)
median_area = median(areas)
actual_block_sizes = []
for key,value in blocks.items():
actual_block_sizes.append(value[0])
actual_block_mean = mean(actual_block_sizes)
actual_block_median = median(actual_block_sizes)
maximum_width_gap_touching = 0 # extra number of pixels to add to a blocks width when determining touching blocks
maximum_height_gap_touching = smallest_value*3 - 1 # extra number of pixels to add to a blocks height when determining touching blocks
# finds all supporters (direct and indirect) for a given block
def get_all_supporters(query):
indirect = []
to_check = [query]
while len(to_check) > 0:
doin = to_check.pop()
indirect.append(doin)
if doin != 9999:
for j in graph_supporters[doin]:
if j not in indirect:
if j not in to_check:
to_check.append(j)
new_indirect = []
for i in indirect:
if i not in new_indirect:
new_indirect.append(i)
new_indirect.remove(query)
return new_indirect
# finds all supportees (direct and indirect) for a given block
def get_all_supportees(query):
indirect = []
to_check = [query]
while len(to_check) > 0:
doin = to_check.pop()
indirect.append(doin)
for j in graph_supportees[doin]:
if j not in indirect:
if j not in to_check:
to_check.append(j)
new_indirect = []
for i in indirect:
if i not in new_indirect:
new_indirect.append(i)
new_indirect.remove(query)
return new_indirect
# finds all support paths from start block, upwards to end block
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if start not in graph:
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
# not used but always good to have in case
def find_shortest_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
# check if structure has local stability
def check_local_stability(all_boxes):
for i in range(len(all_boxes)):
left_support = 0
right_support = 0
box = all_boxes[i]
for j in graph_supporters[i]:
if j == 9999:
left_support = 1
right_support = 1
else:
box2 = all_boxes[j]
box2_left = box2[0]-(box2[2]/2.0)
box2_right = box2[0]+(box2[2]/2.0)
if box2_left < box[0]:
left_support = 1
if box2_right > box[0]:
right_support = 1
if left_support == 0:
print("UNSTABLE LOCAL (L) !!!!!")
print(i)
if right_support == 0:
print("UNSTABLE LOCAL (R) !!!!!")
print(i)
def isCentreSupporter(RAx):
if (RAx =="ERA.MOST_START_I" or RAx =="ERA.LESS_START_I" or RAx =="ERA.MOST_FINISH_I" or RAx =="ERA.LESS_FINISH_I" or RAx =="ERA.CENTRE_DURING" or RAx =="ERA.CENTRE_DURING_I" or RAx =="ERA.LEFT_DURING_I" or RAx =="ERA.RIGHT_DURING_I" or RAx =="ERA.MOST_START" or RAx =="ERA.MOST_FINISH" or RAx =="ERA.MOST_OVERLAP_MOST" or RAx =="ERA.LESS_OVERLAP_MOST" or RAx =="ERA.MOST_OVERLAP_MOST_I" or RAx =="ERA.MOST_OVERLAP_LESS_I" or RAx =="ERA.EQUAL"):
return True
return False
def isLeftSupporter(RAx):
if (RAx =="ERA.LESS_OVERLAP_LESS" or RAx =="ERA.MOST_OVERLAP_LESS" or RAx =="ERA.LESS_START" or RAx =="ERA.LEFT_DURING"):
return True
return False
def isRightSupporter(RAx):
if (RAx =="ERA.LESS_OVERLAP_MOST_I" or RAx =="ERA.LESS_OVERLAP_LESS_I" or RAx =="ERA.LESS_FINISH" or RAx =="ERA.RIGHT_DURING"):
return True
return False
# Calculate the ERA relations based on touching blocks
def find_era_relation(touching_line):
ERA_relations = []
ERA_threshold = 0.06
s1 = touching_line[0]
s2 = touching_line[2] # these are in the wrong order (should be 2,0,3,1) but the RAx rules are also wrong (filpped)
e1 = touching_line[1]
e2 = touching_line[3]
RA = "unknown"
if (s2 - e1 >=ERA_threshold):
RA ="ERA.BEFORE"
elif (s1 - e2 >=ERA_threshold):
RA ="ERA.AFTER"
elif (s2 - e1 <ERA_threshold and s2 - e1 >= 0 and s1 < e2):
RA ="ERA.MEET"
elif (s1 - e2 <ERA_threshold and s1 - e2 >= 0 and s2 < e1):
RA ="ERA.MEET_I"
elif (s1 == s2 and e2 - e1 >= 0 and (e2 - s2) / 2 < e1 - s1):
RA ="ERA.MOST_START"
elif (s1 == s2 and e1 - e2 > 0 and e2 - s2 > (e1 - s1) / 2):
RA ="ERA.MOST_START_I"
elif (s1 == s2 and e2 - e1 > 0 and (e2 - s2) / 2 >= e1 - s1):
RA ="ERA.LESS_START"
elif (s1 == s2 and e1 - e2 > 0 and e2 - s2 <= (e1 - s1) / 2):
RA ="ERA.LESS_START_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and e1 <= (s2 + e2) / 2):
RA ="ERA.LEFT_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and e2 <= (s1 + e1) / 2):
RA ="ERA.LEFT_DURING_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and s1 >= (s2 + e2) / 2):
RA ="ERA.RIGHT_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and s2 >= (s1 + e1) / 2):
RA ="ERA.RIGHT_DURING_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and s1 < (s2 + e2) / 2 and e1 > (s2 + e2) / 2):
RA ="ERA.CENTRE_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and s2 < (s1 + e1) / 2 and e2 > (s1 + e1) / 2):
RA ="ERA.CENTRE_DURING_I"
elif (s1 - s2 > 0 and e1 == e2 and (e2 - s2) / 2 < e1 - s1):
RA ="ERA.MOST_FINISH"
elif (s2 - s1 > 0 and e1 == e2 and e2 - s2 > (e1 - s1) / 2):
RA ="ERA.MOST_FINISH_I"
elif (s1 - s2 > 0 and e1 == e2 and (e2 - s2) / 2 >= e1 - s1):
RA ="ERA.LESS_FINISH"
elif (s2 - s1 > 0 and e1 == e2 and e2 - s2 <= (e1 - s1) / 2):
RA ="ERA.LESS_FINISH_I"
elif (abs(s1 - s2) <ERA_threshold and abs(e1 - e2) <ERA_threshold):
RA ="ERA.EQUAL"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 >= s2 - s1 and e1 - s2 >= e2 - e1):
RA ="ERA.MOST_OVERLAP_MOST"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 < s2 - s1 and e1 - s2 >= e2 - e1):
RA ="ERA.LESS_OVERLAP_MOST"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 >= s2 - s1 and e1 - s2 < e2 - e1):
RA ="ERA.MOST_OVERLAP_LESS"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 < s2 - s1 and e1 - s2 < e2 - e1):
RA ="ERA.LESS_OVERLAP_LESS"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e1 - s2 > 0 and e2 - s1 >= s1 - s2 and e2 - s1 >= e1 - e2):
RA ="ERA.MOST_OVERLAP_MOST_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 < s1 - s2 and e2 - s1 >= e1 - e2):
RA ="ERA.LESS_OVERLAP_MOST_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 >= s1 - s2 and e2 - s1 < e1 - e2):
RA ="ERA.MOST_OVERLAP_LESS_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 < s1 - s2 and e2 - s1 < e1 - e2):
RA ="ERA.LESS_OVERLAP_LESS_I"
ERA_relations.append(RA)
return ERA_relations
# Calculate the ERA relations based on touching blocks
def find_era_relations():
ERA_relations = []
ERA_threshold = 5
for i in range(len(touching_lines)):
s1 = touching_lines[i][0]
s2 = touching_lines[i][2] # these are in the wrong order (should be 2,0,3,1) but the RAx rules are also wrong (filpped)
e1 = touching_lines[i][1]
e2 = touching_lines[i][3]
RA = "unknown"
if (s2 - e1 >=ERA_threshold):
RA ="ERA.BEFORE"
elif (s1 - e2 >=ERA_threshold):
RA ="ERA.AFTER"
elif (s2 - e1 <ERA_threshold and s2 - e1 >= 0 and s1 < e2):
RA ="ERA.MEET"
elif (s1 - e2 <ERA_threshold and s1 - e2 >= 0 and s2 < e1):
RA ="ERA.MEET_I"
elif (s1 == s2 and e2 - e1 >= 0 and (e2 - s2) / 2 < e1 - s1):
RA ="ERA.MOST_START"
elif (s1 == s2 and e1 - e2 > 0 and e2 - s2 > (e1 - s1) / 2):
RA ="ERA.MOST_START_I"
elif (s1 == s2 and e2 - e1 > 0 and (e2 - s2) / 2 >= e1 - s1):
RA ="ERA.LESS_START"
elif (s1 == s2 and e1 - e2 > 0 and e2 - s2 <= (e1 - s1) / 2):
RA ="ERA.LESS_START_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and e1 <= (s2 + e2) / 2):
RA ="ERA.LEFT_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and e2 <= (s1 + e1) / 2):
RA ="ERA.LEFT_DURING_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and s1 >= (s2 + e2) / 2):
RA ="ERA.RIGHT_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and s2 >= (s1 + e1) / 2):
RA ="ERA.RIGHT_DURING_I"
elif (s1 - s2 > 0 and e2 - e1 > 0 and s1 < (s2 + e2) / 2 and e1 > (s2 + e2) / 2):
RA ="ERA.CENTRE_DURING"
elif (s2 - s1 > 0 and e1 - e2 > 0 and s2 < (s1 + e1) / 2 and e2 > (s1 + e1) / 2):
RA ="ERA.CENTRE_DURING_I"
elif (s1 - s2 > 0 and e1 == e2 and (e2 - s2) / 2 < e1 - s1):
RA ="ERA.MOST_FINISH"
elif (s2 - s1 > 0 and e1 == e2 and e2 - s2 > (e1 - s1) / 2):
RA ="ERA.MOST_FINISH_I"
elif (s1 - s2 > 0 and e1 == e2 and (e2 - s2) / 2 >= e1 - s1):
RA ="ERA.LESS_FINISH"
elif (s2 - s1 > 0 and e1 == e2 and e2 - s2 <= (e1 - s1) / 2):
RA ="ERA.LESS_FINISH_I"
elif (abs(s1 - s2) <ERA_threshold and abs(e1 - e2) <ERA_threshold):
RA ="ERA.EQUAL"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 >= s2 - s1 and e1 - s2 >= e2 - e1):
RA ="ERA.MOST_OVERLAP_MOST"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 < s2 - s1 and e1 - s2 >= e2 - e1):
RA ="ERA.LESS_OVERLAP_MOST"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 >= s2 - s1 and e1 - s2 < e2 - e1):
RA ="ERA.MOST_OVERLAP_LESS"
elif (s2 - s1 > 0 and e2 - e1 > 0 and e1 - s2 > 0 and e1 - s2 < s2 - s1 and e1 - s2 < e2 - e1):
RA ="ERA.LESS_OVERLAP_LESS"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e1 - s2 > 0 and e2 - s1 >= s1 - s2 and e2 - s1 >= e1 - e2):
RA ="ERA.MOST_OVERLAP_MOST_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 < s1 - s2 and e2 - s1 >= e1 - e2):
RA ="ERA.LESS_OVERLAP_MOST_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 >= s1 - s2 and e2 - s1 < e1 - e2):
RA ="ERA.MOST_OVERLAP_LESS_I"
elif (s1 - s2 > 0 and e1 - e2 > 0 and e2 - s1 > 0 and e2 - s1 < s1 - s2 and e2 - s1 < e1 - e2):
RA ="ERA.LESS_OVERLAP_LESS_I"
ERA_relations.append(RA)
return ERA_relations
# Use the ERA rules to determine is the sketch drawing is stable (qualitative LOCAL)
def calc_era_stability(all_boxes, ERA_relations,touching_blocks):
ERA_stable = []
no_change_ERA = 0
for i in range(len(all_boxes)):
ERA_stable.append(0)
while(no_change_ERA == 0):
old_ERA_stable = deepcopy(ERA_stable)
for i in range(len(all_boxes)):
rightSupporter = False
leftSupporter = False
centreSupporter = False
if graph_supporters[i] == [9999]:
ERA_stable[i] = 1
else:
for j in graph_supporters[i]:
if (ERA_stable[j]==1):
for x in range(len(ERA_relations)):
if touching_blocks[x] == [j,i]:
if isLeftSupporter(ERA_relations[x]):
leftSupporter = True
for k in graph_supporters[i]:
if (ERA_stable[k]==1):
for y in range(len(ERA_relations)):
if touching_blocks[y] == [k,i]:
if isRightSupporter(ERA_relations[y]):
rightSupporter = True
for m in graph_supporters[i]:
if (ERA_stable[m]==1):
for z in range(len(ERA_relations)):
if touching_blocks[z] == [m,i]:
if isCentreSupporter(ERA_relations[z]):
centreSupporter = True
if ((leftSupporter and rightSupporter) or centreSupporter):
ERA_stable[i] = 1
if (sorted(ERA_stable) == sorted(old_ERA_stable)):
no_change_ERA = 1
for i in ERA_stable:
if i==0:
print("UNSTABLE ERA")
# Analyse global stability of the sketch drawing (qualitative GARY)
def calc_gary_stability(all_boxes):
global_stability = []
for q in range(len(all_boxes)):
supporters_list = deepcopy(graph_supporters[q])
supportees_list = get_all_supportees(q)
for i in supportees_list:
for j in range(len(all_boxes)):
if (j in graph_supporters[i]) and (j not in get_all_supportees(q)) and j!=q:
supporters_list.append(j)
supportees_list.append(q)
center_mass_x = 0
total_mass = 0
for k in supportees_list:
center_mass_x = center_mass_x + (all_boxes[k][0]*all_boxes[k][2]*all_boxes[k][3])
total_mass = total_mass + (all_boxes[k][2]*all_boxes[k][3])
center_mass_x = center_mass_x / total_mass
leftmost_support = 99999999
rightmost_support = -99999999
for m in supporters_list:
if m == 9999:
leftmost_support = -99999999
rightmost_support = 99999999
else:
if all_boxes[m][0]-(all_boxes[m][2]/2.0) < leftmost_support:
leftmost_support = all_boxes[m][0]-(all_boxes[m][2]/2.0)
if all_boxes[m][0]+(all_boxes[m][2]/2.0) > rightmost_support:
rightmost_support = all_boxes[m][0]+(all_boxes[m][2]/2.0)
if (center_mass_x > leftmost_support) and (center_mass_x < rightmost_support):
global_stability.append(1)
else:
global_stability.append(0)
for s in global_stability:
if s == 0:
print("UNSTABLE GLOBAL GARY !!!!!")
print(global_stability)
return 0
return 1
# checks if point (vp,current_y) interescts a box in all boxes, and that this block is below the current one (b)
# returns the box that does intersect the point
def get_point_in_block(vp,current_y,all_boxes,b):
current_box = all_boxes[b]
for bb in range(len(all_boxes)):
box = all_boxes[bb]
if vp <= box[0]+(box[2]/2.0):
if vp >= box[0]-(box[2]/2.0):
if current_y <= box[1]+(box[3]/2.0):
if current_y >= box[1]-(box[3]/2.0):
if current_box == box:
return bb
else:
if ((box[1]) > (current_box[1]+(current_box[3]/2.0))): #below block must have center point below top blocks bottom
if ((box[1]-(box[3]/2.0)) > (current_box[1]-(current_box[3]/2.0))): #below block must have top point below top blocks top point
return bb
return None
# checks if point (vp,current_y) is within a box in all boxes, and that this block is below the current one (b)
def check_point_in_block(vp,current_y,all_boxes,b):
current_box = all_boxes[b]
for box in all_boxes:
if vp <= box[0]+(box[2]/2.0):
if vp >= box[0]-(box[2]/2.0):
if current_y <= box[1]+(box[3]/2.0):
if current_y >= box[1]-(box[3]/2.0):
if current_box == box:
return True
else:
if ((box[1]) > (current_box[1]+(current_box[3]/2.0))): #below block must have center point below top blocks bottom
if ((box[1]-(box[3]/2.0)) > (current_box[1]-(current_box[3]/2.0))): #below block must have top point below top blocks top point
return True
return False
#MORE COMPLEX METHOD
def calc_matthew_stability(all_boxes,valid_supportees):
global_stability = []
all_boxes_ori = deepcopy(all_boxes)
# just make safe area formed by direct supporters of the block:
safe_areas2 = []
for b in range(len(all_boxes)):
if b in valid_supportees:
leftmost = 99999999
rightmost = -99999999
for gg in graph_supporters[b]:
if gg == 9999:
leftmost = -99999999
rightmost = 99999999
else:
if (all_boxes[gg][0]-(all_boxes[gg][2]/2.0)) < leftmost:
leftmost = (all_boxes[gg][0]-(all_boxes[gg][2]/2.0))
if (all_boxes[gg][0]+(all_boxes[gg][2]/2.0)) > rightmost:
rightmost = (all_boxes[gg][0]+(all_boxes[gg][2]/2.0))
safe_areas2.append([leftmost,rightmost])
all_boxes = deepcopy(all_boxes_ori)
for b in range(len(all_boxes)):
if b in valid_supportees:
new_stable_check = 1
z = []
z2 = []
eligible_supporters = get_all_supporters(b)
bb = []
for cc in get_all_supportees(b):
if cc in valid_supportees:
eligible_supporters.append(cc)
for x in get_all_supportees(b):
if x in valid_supportees:
invalid = 0
for y in get_all_supporters(x):
if y != b:
if y not in eligible_supporters:
invalid = 1
if invalid == 0:
z.append(x)
z.append(b)
center_mass_x = 0
total_mass = 0
for k in z:
center_mass_x = center_mass_x + (all_boxes[k][0]*all_boxes[k][2]*all_boxes[k][3])
total_mass = total_mass + (all_boxes[k][2]*all_boxes[k][3])
center_mass_x = center_mass_x / total_mass
if (center_mass_x < safe_areas2[b][0]) or (center_mass_x > safe_areas2[b][1]):
if (center_mass_x < safe_areas2[b][0]):
pivot_point = safe_areas2[b][0]
good_side = "right"
else:
pivot_point = safe_areas2[b][1]
good_side = "left"
for k in get_all_supportees(b):
if k in valid_supportees:
if k not in z:
d = []
for n in get_all_supporters(k):
block_on_good_side = 0
if n in graph_supportees[b]:
if n in valid_supportees:
if good_side == "right":
if (all_boxes[n][0]+(all_boxes[n][2]/2.0)) > pivot_point:
block_on_good_side = 1
if good_side == "left":
if (all_boxes[n][0]-(all_boxes[n][2]/2.0)) < pivot_point:
block_on_good_side = 1
if block_on_good_side == 1:
for m in all_boxes:
if m in get_all_supporters(k):
if m in get_all_supportees(n):
if m in valid_supportees:
if good_side == "right":
if all_boxes[m][0] > pivot_point:
d.append(m)
if good_side == "left":
if all_boxes[m][0] < pivot_point:
d.append(m)
if good_side == "right":
if all_boxes[k][0] > pivot_point:
d.append(k)
if all_boxes[n][0] > pivot_point:
d.append(n)
if good_side == "left":
if all_boxes[k][0] < pivot_point:
d.append(k)
if all_boxes[n][0] > pivot_point:
d.append(n)
if d != []:
max_distance = -99999999
best_com = d[0]
for ii in range(len(d)):
if abs(all_boxes[ii][0]-pivot_point) > max_distance:
max_distance = abs(all_boxes[ii][0]-pivot_point)
best_com = d[ii]
new_block = [all_boxes[best_com][0],0,all_boxes[k][2],all_boxes[k][3]]
z2.append(new_block)
for jj in z:
z2.append(all_boxes[jj])
center_mass_x = 0
total_mass = 0
for bob in z2:
center_mass_x = center_mass_x + (bob[0]*bob[2]*bob[3])
total_mass = total_mass + (bob[2]*bob[3])
center_mass_x = center_mass_x / total_mass
if good_side == "right":
if center_mass_x < pivot_point:
new_stable_check = 0
if good_side == "left":
if center_mass_x > pivot_point:
new_stable_check = 0
global_stability.append(new_stable_check)
for s in global_stability:
if s == 0:
return 0
return 1
def calc_matthew_stability_ori(all_boxes):
global_stability = []
pivot_points = []
safe_areas2 = []
for b in range(len(all_boxes)):
leftmost = 99999999
rightmost = -99999999
for gg in graph_supporters[b]:
if gg == 9999:
leftmost = -99999999
rightmost = 99999999
else:
if (all_boxes[gg][0]-(all_boxes[gg][2]/2.0)) < leftmost:
leftmost = (all_boxes[gg][0]-(all_boxes[gg][2]/2.0))
if (all_boxes[gg][0]+(all_boxes[gg][2]/2.0)) > rightmost:
rightmost = (all_boxes[gg][0]+(all_boxes[gg][2]/2.0))
safe_areas2.append([leftmost,rightmost])
all_boxes = deepcopy(all_boxes_ori)
for b in range(len(all_boxes)):
new_stable_check = 1
z = []
z2 = []
good_side = None
eligible_supporters = get_all_supporters(b)+get_all_supportees(b)
for x in get_all_supportees(b):
invalid = 0
for y in get_all_supporters(x):
if y != b:
if y not in eligible_supporters:
invalid = 1
if invalid == 0:
z.append(x)
z.append(b)
center_mass_x = 0
total_mass = 0
for k in z:
center_mass_x = center_mass_x + (all_boxes[k][0]*all_boxes[k][2]*all_boxes[k][3])
total_mass = total_mass + (all_boxes[k][2]*all_boxes[k][3])
center_mass_x = center_mass_x / total_mass
if (center_mass_x < safe_areas2[b][0]) or (center_mass_x > safe_areas2[b][1]):
if (center_mass_x < safe_areas2[b][0]):
pivot_point = safe_areas2[b][0]
good_side = "right"
else:
pivot_point = safe_areas2[b][1]
good_side = "left"
for k in get_all_supportees(b):
if k not in z:
if good_side == "right":
if all_boxes[k][0]+(all_boxes[k][2]/2.0) > pivot_point:
z2.append(k)
if good_side == "left":
if all_boxes[k][0]-(all_boxes[k][2]/2.0) < pivot_point:
z2.append(k)
for jj in z:
z2.append(jj)
supporters_list = deepcopy(graph_supporters[b])
supportees_list = z2
for i in supportees_list:
for j in range(len(all_boxes)):
if (j in graph_supporters[i]) and (j not in get_all_supportees(b)) and j!=b:
supporters_list.append(j)
center_mass_x = 0
total_mass = 0
for k in supportees_list:
center_mass_x = center_mass_x + (all_boxes[k][0]*all_boxes[k][2]*all_boxes[k][3])
total_mass = total_mass + (all_boxes[k][2]*all_boxes[k][3])
center_mass_x = center_mass_x / total_mass
leftmost_support = 99999999
rightmost_support = -99999999
for m in supporters_list:
if m == 9999:
leftmost_support = -99999999
rightmost_support = 99999999
else:
if all_boxes[m][0]-(all_boxes[m][2]/2.0) < leftmost_support:
leftmost_support = all_boxes[m][0]-(all_boxes[m][2]/2.0)
if all_boxes[m][0]+(all_boxes[m][2]/2.0) > rightmost_support:
rightmost_support = all_boxes[m][0]+(all_boxes[m][2]/2.0)
if (center_mass_x > leftmost_support) and (center_mass_x < rightmost_support):
new_stable_check = 1
else:
new_stable_check = 0
pivot_points.append(good_side)
global_stability.append(new_stable_check)
return [global_stability,pivot_points]
def add_extra_supports(all_boxes,pivot_points,chosen_block):
new_all_boxes = deepcopy(all_boxes)
added_block = []
right_side = 0
if pivot_points[chosen_block] == "left":
right_side = 1
x_position = 0
if right_side == 1:
x_position = all_boxes[chosen_block][0]+(all_boxes[chosen_block][2]/2.0) - push_back_distance
else:
x_position = all_boxes[chosen_block][0]-(all_boxes[chosen_block][2]/2.0) + push_back_distance
y_position_top = all_boxes[chosen_block][1]+(all_boxes[chosen_block][3]/2.0)
lowest_point = 0
for i in range(len(all_boxes)):
if (all_boxes[i][1]+(all_boxes[i][3]/2.0)) > lowest_point:
lowest_point = (all_boxes[i][1]+(all_boxes[i][3]/2.0))
to_check_hit = []
for ii in range(len(all_boxes)):
if all_boxes[ii][1] > all_boxes[chosen_block][1]:
if ii != 9999:
to_check_hit.append(all_boxes[ii][1]-(all_boxes[ii][3]/2.0))
to_check_hit = sorted(to_check_hit,reverse=True)
new_to_check_hit = []
for ppp in range(len(to_check_hit)):
if to_check_hit[ppp] > (all_boxes[chosen_block][1]+(all_boxes[chosen_block][3]/2.0)):
new_to_check_hit.append(to_check_hit[ppp])
to_check_hit = new_to_check_hit
y_position_bottom = lowest_point
found = 0
while (len(to_check_hit))>0:
point_to_check = [x_position,to_check_hit[-1]]
if check_point_in_block(x_position,to_check_hit[-1],all_boxes,chosen_block):
if found == 0:
y_position_bottom = to_check_hit[-1]
found = 1
to_check_hit.pop()
added_block_x = x_position
added_block_width = 1
added_block_height = y_position_bottom-y_position_top
added_block_y = y_position_top+(added_block_height/2.0)
added_block = [added_block_x,added_block_y,added_block_width,added_block_height]
all_boxes.append(added_block)
print("ADDED BLOCK:")
print(added_block)
return(all_boxes)
def find_below_blocks(all_boxes, box):
below_blocks = []
for block2 in complete_locations:
if block2[2]<box[2]:
if ( (round(box[0],10) <= round((block2[1]+(blocks[str(block2[0])][0]/2)),10))
and (round(box[1],10) >= round((block2[1]-(blocks[str(block2[0])][0]/2)),10))
and (round(box[2],10) <= round((block2[2]+(blocks[str(block2[0])][1]/2)),10))
and (round(box[3],10) >= round((block2[2]-(blocks[str(block2[0])][1]/2)),10)) ):
below_blocks.append(block2)
return below_blocks
#currently doesn't work if multiple structures in image, need to test each structure separately
def calc_other_stability(all_boxes):
structure_stable = True
# checks the global stability of level by testing the stability of every block (as peak block)
highest_block = -1
highest_com = 99999999
for block in range(len(all_boxes)):
if all_boxes[block][1] < highest_com:
highest_com = all_boxes[block][1]
highest_block = block
current_box = [highest_block]
hit_ground = 0
if graph_supporters[block] == [9999]:
hit_ground = 1
while hit_ground == 0: # while not at bottom of structure
support_area = [99999999,0]
current_com = 0
total_mass = 0
supo = []
for jj in current_box:
for kk in graph_supporters[jj]:
if kk not in current_box:
supo.append(kk)
for jj in current_box:
current_com = current_com + all_boxes[jj][0]*(all_boxes[jj][2]*all_boxes[jj][3])
total_mass = total_mass + (all_boxes[jj][2]*all_boxes[jj][3])
current_com = current_com / total_mass
for jj in supo:
if all_boxes[jj][0] - (all_boxes[jj][2]/2.0) < support_area[0]:
support_area[0] = all_boxes[jj][0] - (all_boxes[jj][2]/2.0)
if all_boxes[jj][0] + (all_boxes[jj][2]/2.0) > support_area[1]:
support_area[1] = all_boxes[jj][0] + (all_boxes[jj][2]/2.0)
if (current_com >= support_area[1]) or (current_com <= support_area[0]):
structure_stable = False
to_add = []
highest_block = -1
highest_com = 99999999
for block in range(len(all_boxes)):
if block not in current_box:
if all_boxes[block][1] < highest_com:
highest_com = all_boxes[block][1]
highest_block = block
to_add.append(highest_block)
current_box = current_box + to_add
if graph_supporters[current_box[-1]] == [9999]:
hit_ground = 1
if structure_stable:
print("STABLE!")
return 1
else:
print("NOT STABLE!")
return 0
all_boxes_ori_very = deepcopy(all_boxes)
all_stable = 0
while all_stable == 0:
all_boxes = sorted(all_boxes, key=itemgetter(1), reverse=True) # sort boxes from bottom to top
all_boxes_ori = deepcopy(all_boxes)
#find blocks that touch each other (above and below)
touching_blocks = []
touching_lines = []
width_extra = maximum_width_gap_touching
height_extra = maximum_height_gap_touching
for i in range(len(all_boxes)):
current_box = all_boxes[i]
for j in range(len(all_boxes)):
box2 = all_boxes[j]
if ( (current_box[0]-((current_box[2]+width_extra)/2.0) < box2[0]+(box2[2]/2.0)) and
(current_box[0]+((current_box[2]+width_extra)/2.0) > box2[0]-(box2[2]/2.0)) and
(current_box[1]+((current_box[3]+height_extra)/2.0) > box2[1]-(box2[3]/2.0)) and
(current_box[1]-((current_box[3]+height_extra)/2.0) < box2[1]+(box2[3]/2.0)) ):
if (i != j):
if ((current_box[1]) > (box2[1]+(box2[3]/2.0))): #below block must have center point below top blocks bottom
if ((current_box[1]-(current_box[3]/2.0)) > (box2[1]-(box2[3]/2.0))): #below block must have top point below top blocks top point
touching_blocks.append([i,j]) #first box supports the second
touching_lines.append([current_box[0]-(current_box[2]/2.0),
current_box[0]+(current_box[2]/2.0),
box2[0]-(box2[2]/2.0),
box2[0]+(box2[2]/2.0)]) #bottom block first then top
new_touching_blocks = []
new_touching_lines = []
for i in range(len(all_boxes)):
for j in range(len(all_boxes)):
for k in range(len(all_boxes)):
if [i,j] in touching_blocks:
if [i,k] in touching_blocks:
if [j,k] in touching_blocks:
posie = touching_blocks.index([i,k])
touching_blocks.pop(posie)
touching_lines.pop(posie)
# finds the supportees and supporters (direct) for each block
all_boxes = deepcopy(all_boxes_ori)
graph_supportees = {}
graph_supporters = {}
for i in range(len(all_boxes)):
graph_supportees[i] = []
for support in touching_blocks:
if support[0] == i:
graph_supportees[i].append(support[1])
for i in range(len(all_boxes)):
graph_supporters[i] = []
for support in touching_blocks:
if support[1] == i:
graph_supporters[i].append(support[0])
if (graph_supporters[i] == []):
graph_supporters[i] = [9999] # the ground is represented as block number 9999
all_boxes = deepcopy(all_boxes_ori)
check_local_stability(all_boxes)
all_boxes = deepcopy(all_boxes_ori)
ERA_relations = find_era_relations()
all_boxes = deepcopy(all_boxes_ori)
calc_era_stability(all_boxes, ERA_relations, touching_blocks)
all_boxes = deepcopy(all_boxes_ori)
testg = calc_gary_stability(all_boxes)
if testg == 0:
GARY_INITIAL = 0
all_boxes = deepcopy(all_boxes_ori)
testg = calc_other_stability(all_boxes)
if testg == 0:
OTHER_INITIAL = 0
# Analyse global stability of the sketch drawing (new qualitative method)
all_boxes = deepcopy(all_boxes_ori)
chosen_block = 99999999
global_stable_sketch = 1
both = calc_matthew_stability_ori(all_boxes)
global_stability = both[0]
pivot_points = both[1]
for s in global_stability:
if s == 0:
global_stable_sketch = 0
print("GLOBALLY UNSTABLE MATTHEW")
MATTHEW_INITIAL = 0
print(both)
if (global_stable_sketch == 0):
for j in range(len(global_stability)):
if global_stability[j] == 0:
chosen_block = j
all_boxes = add_extra_supports(all_boxes,pivot_points,chosen_block)
else:
all_stable = 1
if add_extra_blocks_to_make_stable == 0:
all_stable = 1
else:
all_boxes_ori = deepcopy(all_boxes)
def merge_groups(groupings):
to_merge = []
for g1 in range(len(groupings)):
for g2 in range(len(groupings)):
if (g1 != g2):
for g1_block in groupings[g1]:
for g2_block in groupings[g2]:
if g1_block == g2_block:
to_merge.append([g1,g2])
return to_merge
return to_merge
def remove_groupings(groupings):
to_remove = []
for g1 in range(len(groupings)):
for g2 in range(len(groupings[g1])):
for g3 in range(len(groupings[g1])):
if (g2<g3):
if groupings[g1][g2] == groupings[g1][g3]:
to_remove.append([g1,g3])
return to_remove
return to_remove
# splits block sets into groupings that must have the same height
all_boxes = deepcopy(all_boxes_ori)
groupings = []
no_change1 = 0
if check_groups==1:
no_change1 = 1
old_groupings = deepcopy(groupings)
for i in range(len(all_boxes)):
for j in range(len(all_boxes)):
if (i < j):
# checks if i and j share a direct supporter
direct_supporter = 0
for b1 in graph_supporters[i]:
for b2 in graph_supporters[j]:
if (b1==b2):
direct_supporter = 1
if (direct_supporter == 1): # check if i and j share a supportee
for k in range(len(all_boxes)):
if len(find_all_paths(graph_supportees,i,k)) > 0:
if len(find_all_paths(graph_supportees,j,k)) > 0:
groupings.append([])
for aa in find_all_paths(graph_supportees,i,k):
aa.pop()
groupings[-1].append(aa)
for bb in find_all_paths(graph_supportees,j,k):
bb.pop()
groupings[-1].append(bb)
# merge groups togethor (originally the same indentation level as the above paragraph of code)
cleverMergeLists(groupings)
#remove duplicates
no_change3 = 0
while (no_change3 == 0):
to_remove = remove_groupings(groupings)
if len(to_remove) == 0:
no_change3 = 1
else:
del groupings[to_remove[0][0]][to_remove[0][1]]
if sorted(old_groupings) != sorted(groupings):
no_change1=0
# make all single blocks in groups the average height of all single blocks in same group
all_boxes = deepcopy(all_boxes_ori)
if (average_single_block_groups_heights==1):
for g in groupings:
to_average = []
average_height = 0
total_height = 0
for block_set in g:
if len(block_set)==1:
to_average.append(block_set[0])
if len(to_average)>0:
for b in to_average:
total_height = total_height+all_boxes[b][3]
average_height = total_height/float(len(to_average))
for b in to_average:
all_boxes[b][3] = average_height
if (use_similarity_grouping == 1):
close_distance = largest_value*2
blocks_same = []
for i in range(len(all_boxes)):
for j in range(len(all_boxes)):
same_shape = 0
close = 0
no_inbetween = 1
if i != j:
if all_boxes[i][0] < (all_boxes[j][0] + all_boxes[j][0]*error_percentage_shape):
if all_boxes[i][0] > (all_boxes[j][0] - all_boxes[j][0]*error_percentage_shape):
if all_boxes[i][2] < (all_boxes[j][2] + all_boxes[j][2]*error_percentage_shape):
if all_boxes[i][2] > (all_boxes[j][2] - all_boxes[j][2]*error_percentage_shape):
if all_boxes[i][3] < (all_boxes[j][3] + all_boxes[j][3]*error_percentage_shape):
if all_boxes[i][3] > (all_boxes[j][3] - all_boxes[j][3]*error_percentage_shape):
same_shape = 1
elif all_boxes[i][1] < (all_boxes[j][1] + all_boxes[j][1]*error_percentage_shape):
if all_boxes[i][1] > (all_boxes[j][1] - all_boxes[j][1]*error_percentage_shape):
if all_boxes[i][2] < (all_boxes[j][2] + all_boxes[j][2]*error_percentage_shape):
if all_boxes[i][2] > (all_boxes[j][2] - all_boxes[j][2]*error_percentage_shape):
if all_boxes[i][3] < (all_boxes[j][3] + all_boxes[j][3]*error_percentage_shape):
if all_boxes[i][3] > (all_boxes[j][3] - all_boxes[j][3]*error_percentage_shape):
same_shape = 1
if all_boxes[i][0] < (all_boxes[j][0] + close_distance):
close = 1
if all_boxes[i][0] > (all_boxes[j][0] - close_distance):
close = 1
if all_boxes[i][1] < (all_boxes[j][1] + close_distance):
close = 1
if all_boxes[i][1] > (all_boxes[j][1] - close_distance):
close = 1
for k in range(len(all_boxes)):
k_top = all_boxes[k][1] + (all_boxes[k][3]/2.0)
k_bottom = all_boxes[k][1] - (all_boxes[k][3]/2.0)
k_left = all_boxes[k][0] - (all_boxes[k][2]/2.0)
k_right = all_boxes[k][0] + (all_boxes[k][2]/2.0)
i_top = all_boxes[i][1] + (all_boxes[i][3]/2.0)
i_bottom = all_boxes[i][1] - (all_boxes[i][3]/2.0)
i_left = all_boxes[i][0] - (all_boxes[i][2]/2.0)
i_right = all_boxes[i][0] + (all_boxes[i][2]/2.0)
j_top = all_boxes[j][1] + (all_boxes[j][3]/2.0)
j_bottom = all_boxes[j][1] - (all_boxes[j][3]/2.0)
j_left = all_boxes[j][0] - (all_boxes[j][2]/2.0)
j_right = all_boxes[j][0] + (all_boxes[j][2]/2.0)
if (k_top > i_bottom) and (k_top > j_bottom) and (k_bottom < i_top) and (k_bottom < j_top) and (all_boxes[k][0] > all_boxes[i][0]) and (all_boxes[k][0] < all_boxes[j][0]):
no_inbetween = 0
if (k_right > i_left) and (k_right > j_left) and (k_left < i_right) and (k_left < j_right) and (all_boxes[k][1] > all_boxes[i][1]) and (all_boxes[k][1] < all_boxes[j][1]):
no_inbetween = 0
if (no_inbetween==1 and close==1 and same_shape==1):
blocks_same.append([i,j])
if ((average_same_block_groups_heights==1) and (use_similarity_grouping == 1)):
blocks_same2 = deepcopy(blocks_same)
no_change2 = 0
while(no_change2 == 0):
to_merge = []
for g1 in range(len(blocks_same2)):
for g1_block in blocks_same2[g1]:
for g2 in range(len(blocks_same2)):
for g2_block in blocks_same2[g2]:
if (g1 != g2):
if g1_block == g2_block:
to_merge.append([g1,g2])
if len(to_merge) == 0:
no_change2 = 1
else:
blocks_same2[to_merge[0][0]] = blocks_same2[to_merge[0][0]]+blocks_same2[to_merge[0][1]]
blocks_same2.pop(to_merge[0][1])
#remove duplicates
no_change3 = 0
while (no_change3 == 0):
to_remove = []
no_change3=1
for g1 in range(len(blocks_same2)):
for g2 in range(len(blocks_same2[g1])):
for g3 in range(len(blocks_same2[g1])):
if (g2<g3):
if blocks_same2[g1][g2] == blocks_same2[g1][g3]:
no_change3=0
to_remove.append([g1,g3])
if (no_change3 == 0):
del blocks_same2[to_remove[0][0]][to_remove[0][1]]
# make same average height
for g in blocks_same2:
to_average = []
average_height = 0
total_height = 0
for blockz in g:
to_average.append(blockz)
if len(to_average)>0:
for b in to_average:
total_height = total_height+all_boxes[b][3]
average_height = total_height/float(len(to_average))
for b in to_average:
all_boxes[b][3] = average_height
# adds composite blocks to set of possible block types, made up of multiple smaller blocks
# can also rearrange the ordering of this sub-blocks to create even more possible options
if composite_blocks_allowed == 1:
specials = {}
horizontal = [5,6,8,10,12]
counter = 14
for i in range(max_composite_block_width):
for j in horizontal:
new_block_width = (2.06*i) + blocks[str(j)][0]
height_counter = 0.22
height_num = 1
while height_counter < new_block_width*2.0:
pos_j = deepcopy(i)
if rearrange_special_block_order == 1:
while pos_j >= 0:
blocks[str(counter)] = [round(new_block_width,2),round(height_counter,2)]
block_names[str(counter)] = "special"
specials[str(counter)] = [i,j,height_num,pos_j]
counter = counter + 1
pos_j = pos_j - 1
height_counter = round(height_counter + 0.22,2)
height_num = height_num+1
else:
blocks[str(counter)] = [round(new_block_width,2),round(height_counter,2)]
block_names[str(counter)] = "special"
specials[str(counter)] = [i,j,height_num,pos_j]
counter = counter + 1
height_counter = round(height_counter + 0.22,2)
height_num = height_num+1
# divide the size and position of all blocks by the scale factor
scale_factor = 1
if scaling_method == 0:
scale_factor = largest_value/2.06 # BIG APPROACH
if scaling_method == 1:
scale_factor = smallest_value/0.22 # SMALL APPROACH
if scaling_method == 2:
middle_block_size = (largest_value+smallest_value)/2.0
scale_factor = middle_block_size/1.14 # MIDDLE APPROACH
if scaling_method == 3:
scale_factor = mean_size/actual_block_mean # MEAN APPROACH (0.667)
if scaling_method == 4:
scale_factor = median_size/actual_block_median # MEDIAN APPROACH (0.43)
all_boxes2 = []
for box in all_boxes:
box2 = []
box2.append(box[0]/scale_factor)
box2.append(box[1]/scale_factor)
box2.append(box[2]/scale_factor)
box2.append(box[3]/scale_factor)
all_boxes2.append(box2)
block_order= []
for i in range(len(all_boxes2)):
block_order.append(i)
# re-order list so that blocks are place straight after their direct supporters (or as close as possible to straight after, lower blocks get priority)
# re-orders blocks from being supporters before supportees (closer togethor) rather than top to bottom
# add very bottom block to list
# add supporter of block to list (only if all supporters of this block are present)
# if not all supporters are present, then add all supportees of this block to the list
if order_blocks_smart == 1:
block_order = [0]
block_order2 = [0]
while(len(block_order) < len(all_boxes2)):
added_block = 0
for i in reversed(block_order):
for j in graph_supportees[i]:
if j not in block_order:
if j not in block_order2:
all_supporters = 1
for k in graph_supporters[j]:
if k not in block_order:
all_supporters = 0
check_order = []
to_check = [k]
while(len(to_check)>0):
value_checking = to_check.pop()
check_order.append(value_checking)
for yup in graph_supporters[value_checking]:
if yup != 9999:
to_check.append(yup)
for kk in check_order:
if (kk not in block_order2) and (added_block == 0):
add_me = 1
for gg in graph_supporters[kk]:
if (gg not in block_order2) and gg!=9999:
add_me = 0
if add_me == 1:
block_order2.append(kk)
added_block = 1
if (all_supporters == 1) and (added_block == 0):
block_order2.append(j)
added_block = 1
if (block_order == block_order2):
for rem in range(len(all_boxes2)):
if rem not in block_order2:
block_order2.append(rem)
block_order = deepcopy(block_order2)
# find block type with most similar size to each block
block_keys = numpy.empty((len(all_boxes2), 0)).tolist()
already_tried = [[]]
count_loops = 0
ori_blocks3 = deepcopy(all_boxes2)
all_done = 0
while (all_done == 0) and (count_loops < 10000):
current_index = 0
for qqq in block_keys:
if qqq != []:
current_index = current_index + 1
current_box = block_order[current_index]
box = ori_blocks3[current_box]
count_loops = count_loops+1
if count_loops % 1000 == 0:
print("generating...") # prints every 1000 loops
# choose a block type for the next block to be added
# based on the sum of the squared differences between their widths and heights (width_dif^2 + height_dif^2)
width = box[2]
height = box[3]
best_difference = 99999999
best_name = ""
for key,value in blocks.items():
width_difference = abs(width-value[0])
height_difference = abs(height-value[1])
total_difference = width_difference**2 + height_difference**2
if int(key) > original_number_blocks:
total_difference = total_difference*composite_block_penalty_picking
if (best_difference > total_difference):
if (key not in already_tried[-1]):
best_difference = total_difference
best_name = key
block_keys[current_box] = best_name
already_tried[-1].append(best_name)
# move block to correct height (based on supporting block height)
if graph_supporters[current_box] == [9999]:
new = []
new.append(ori_blocks3[current_box][0])
new.append(ground+(blocks[block_keys[current_box]][1]/2.0))
new.append(blocks[block_keys[current_box]][0])
new.append(blocks[block_keys[current_box]][1])
else:
new = []
new.append(ori_blocks3[current_box][0])
new.append(all_boxes2[graph_supporters[current_box][0]][1]+
(blocks[block_keys[graph_supporters[current_box][0]]][1]/2.0)+ # error might happen here if structure not possible
(blocks[block_keys[current_box]][1]/2.0))
new.append(blocks[block_keys[current_box]][0])
new.append(blocks[block_keys[current_box]][1])
all_boxes2[current_box] = new
# CHECK THAT BLOCK JUST ADDED TO BLOCK KEYS DOESNT VIOLATE ANY RULES
# if it does then pop the key off block_keys
# do iteratively, removing previous block if no block types for the current block are possible
must_pop = 0
if use_similarity_grouping:
for tim in blocks_same:
if tim[0] == current_box:
if block_keys[tim[1]] != []:
if block_keys[tim[0]] != block_keys[tim[1]] :
must_pop = 1
if tim[1] == current_box:
if block_keys[tim[0]] != []:
if block_keys[tim[0]] != block_keys[tim[1]] :
must_pop = 1
# ensures that chosen block type is the right height to fulfil all grouping requirments
# outside of the horizontal movement shift option as that won't help correct this
if (check_groups == 1) and must_pop==0:
for g in groupings:
height_set = 0
for block_set1 in g:
valid = 1
for n in block_set1:
if (block_keys[n]==[]):
valid = 0
if valid == 1:
height_set2 = 0
for nn in block_set1:
height_set2 += blocks[block_keys[nn]][1]
if height_set == 0:
height_set = height_set2
else:
if abs(height_set - height_set2) > height_error_allowed_groups:
must_pop = 1
# Check if comoposite block is locally stable (all blocks that make it up are supported)
if (check_composite_block_stability == 1) and (composite_blocks_allowed == 1) and must_pop==0:
block_num_special = block_keys[current_box]
i = block_keys[current_box]
j = all_boxes2[current_box]
if int(block_num_special) > original_number_blocks:
info = specials[i]
total_width = round((2.06*info[0])+blocks[str(info[1])][0],2)
total_height = info[2]*0.22
positions_long = []
position_extra = []
added_j = 0
current_pos = j[0]-(total_width/2.0)
y_pos = j[1] - (total_height/2.0) + 0.11
for a in range(info[0]):
if a == info[3]:
added_j = 1
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
position_extra = current_pos
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
current_pos = current_pos + 1.03
positions_long.append(current_pos)
current_pos = current_pos + 1.03
if added_j == 0:
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
position_extra = current_pos
all_boxes_special = []
block_keys_special = []
for iii in range(len(positions_long)):
all_boxes_special.append([positions_long[iii],y_pos,2.06,0.22])
block_keys_special.append(12)
all_boxes_special.append([position_extra,y_pos,blocks[str(info[1])][0],0.22])
block_keys_special.append(info[1])
# check local stability
width_error_allowed_local_composite = 0.0
for ii in range(len(all_boxes_special)):
left_support = 0
right_support = 0
box = all_boxes_special[ii]
for jj in graph_supporters[current_box]:
if jj == 9999:
left_support = 1
right_support = 1
else:
box2 = all_boxes2[jj]
box2_left = box2[0]-((blocks[block_keys[jj]][0])/2.0)
box2_right = box2[0]+((blocks[block_keys[jj]][0])/2.0)
if box2_left < box[0] + width_error_allowed_local_composite:
if box2_right > (box[0] - (box[2]/2.0)):
left_support = 1
if box2_right > box[0] - width_error_allowed_local_composite:
if box2_left < (box[0] + (box[2]/2.0)):
right_support = 1
if left_support == 0:
must_pop = 1
if right_support == 0:
must_pop = 1
if must_pop == 0:
tried_all_moving = 0
while (tried_all_moving==0):
must_pop = 0
# ensures the chosen block does not overlap any other already chosen blocks
if (check_overlap == 1) and must_pop==0:
width_error_allowed_overlap = 0.0
for i in range(len(all_boxes2)):
if (block_keys[i]!=[]) and (i!=current_box):
box_width = blocks[best_name][0]-width_error_allowed_overlap
box_height = blocks[best_name][1]-height_error_allowed_overlap
box2 = all_boxes2[i]
box2_width = blocks[block_keys[i]][0]-width_error_allowed_overlap
box2_height = blocks[block_keys[i]][1]-height_error_allowed_overlap
if ( (all_boxes2[current_box][0]-(box_width/2.0) < box2[0]+(box2_width/2.0)) and
(all_boxes2[current_box][0]+(box_width/2.0) > box2[0]-(box2_width/2.0)) and
(all_boxes2[current_box][1]+(box_height/2.0) > box2[1]-(box2_height/2.0)) and
(all_boxes2[current_box][1]-(box_height/2.0) < box2[1]+(box2_height/2.0)) ):
must_pop = 1
# ensures that chosen block type is wide enough to be supported by all direct supporter blocks
if (check_all_supporters == 1) and must_pop==0:
for i in graph_supporters[current_box]:
if (i < 9999):
test_box = all_boxes2[i]
if (all_boxes2[current_box][0]-(blocks[best_name][0]/2.0) + required_support_amount) > (test_box[0]+(blocks[block_keys[i]][0]/2.0)):
must_pop = 1
if (all_boxes2[current_box][0]+(blocks[best_name][0]/2.0) - required_support_amount) < (test_box[0]-(blocks[block_keys[i]][0]/2.0)):
must_pop = 1
# CHECK ERA RELATIONS (OPTIONAL) NOT SURE IF WORKS 100% BUT SHOULDN'T BE USED ANYWAY AS PREVENTS STABILITY CORRECTION AND VERY RESTRICTIVE
if (check_era_relations == 1) and must_pop==0:
width_extra_era = 0.06
height_extra_era = 0.02
touching_blocks2 = []
touching_lines2 = []
era_relations2 = []
for i in range(len(all_boxes2)):
if block_keys[i] != []:
current_box2 = all_boxes2[i]
current_box2[2] = current_box2[2]+width_extra_era
current_box2[3] = current_box2[3]+height_extra_era
for j in range(len(all_boxes2)):
if block_keys[j] != []:
box2 = all_boxes2[j]
if ( (current_box2[0]-(current_box2[2]/2.0) < box2[0]+(box2[2]/2.0)) and
(current_box2[0]+(current_box2[2]/2.0) > box2[0]-(box2[2]/2.0)) and
(current_box2[1]+(current_box2[3]/2.0) > box2[1]-(box2[3]/2.0)) and
(current_box2[1]-(current_box2[3]/2.0) < box2[1]+(box2[3]/2.0)) ):
if (i != j):
if ((current_box2[1]) > (box2[1]+(box2[3]/2.0))):
if ((current_box2[1]-(current_box2[3]/2.0)) > (box2[1]-(box2[3]/2.0))):
touching_blocks2.append([j,i])
touching_lines2.append([box2[0]-(box2[2]/2.0),
box2[0]+(box2[2]/2.0),
current_box2[0]-(current_box2[2]/2.0),
current_box2[0]+(current_box2[2]/2.0)])
for pairin in range(len(touching_blocks)):
if block_keys[touching_blocks[pairin][0]] != []:
if block_keys[touching_blocks[pairin][1]] != []:
if touching_blocks[pairin] not in touching_blocks2:
must_pop=1
for line2 in touching_lines2:
era_relations2.append(find_era_relation(line2))
for ori1 in range(len(touching_blocks)):
if block_keys[touching_blocks[ori1][0]]!=[]:
first_block = touching_blocks[ori1][0]
if block_keys[touching_blocks[ori1][1]]!=[]:
second_block = touching_blocks[ori1][1]
correct_index_new = 99999999
for new1 in range(len(touching_blocks2)):
if touching_blocks2[new1] == [first_block,second_block]:
correct_index_new = new1
if correct_index_new < 99999999:
if ERA_relations[ori1] != era_relations2[correct_index_new][0]:
must_pop = 1
# check if structure has local stability
# BETTER TO CHECK GLOBAL STABILITY UNLESS TRYING TO BE FAST
if (check_local_stability == 1) and must_pop==0:
width_error_allowed_local = 0.0
for i in range(len(all_boxes2)):
if (block_keys[i]!=[]):
left_support = 0
right_support = 0
box = all_boxes2[i]
for j in graph_supporters[i]:
if j == 9999:
left_support = 1
right_support = 1
else:
box2 = all_boxes2[j]
box2_left = box2[0]-((blocks[block_keys[j]][0])/2.0)
box2_right = box2[0]+((blocks[block_keys[j]][0])/2.0)
if box2_left < box[0] + width_error_allowed_local:
left_support = 1
if box2_right > box[0] - width_error_allowed_local:
right_support = 1
if left_support == 0:
must_pop = 1
if right_support == 0:
must_pop = 1
# check if structure has global stability
if (check_global_stability == 1) and must_pop==0:
stable_global = 0
valid_supportees = []
new_joint_all_boxes = []
if check_global_stability_method == 1:
new_joint_all_boxes = deepcopy(all_boxes2)
for k in range(len(block_keys)):
if block_keys[k] != []:
valid_supportees.append(k)
elif check_global_stability_method == 2:
for k in range(len(all_boxes2)):
valid_supportees.append(k)
if block_keys[k] != []:
new_joint_all_boxes.append(all_boxes2[k])
else:
new_joint_all_boxes.append(ori_blocks3[k])
else:
print ("ERROR!! WRONG CHECK GLOBAL STABILITY METHOD")
stable_global = calc_matthew_stability(new_joint_all_boxes,valid_supportees)
if stable_global == 0:
must_pop = 1
# move sideways if selected as viable response option
if must_pop == 1:
if len(moves_to_try)==0:
tried_all_moving=1
elif shift_blocks_sideways==0:
tried_all_moving=1
else:
all_boxes2[current_box][0] = all_boxes2[current_box][0]+moves_to_try[-1]
moves_to_try.pop()
else:
tried_all_moving = 1
# block fails one or more requirments so remove it and try again
if must_pop == 1:
block_keys[current_box]=[]
# if already tried all block types then remove the block AND the previous block
if (limit_number_block_type_changes == 1) and (len(blocks) > max_number_block_type_changes):
while len(already_tried[-1]) == max_number_block_type_changes:
current_index = current_index-1
block_keys[block_order[current_index]]=[]
already_tried.pop()
else:
while len(already_tried[-1]) == len(blocks):
current_index = current_index-1
block_keys[block_order[current_index]]=[]
already_tried.pop()
else:
already_tried.append([])
all_done=1
for qqq in block_keys:
if qqq == []:
all_done=0
if (count_loops >= 10000):
print("generating structure took too long, suggest trying a different scale_calculation_option")
#calculate measure of difference betweent the original sketch and the generated structure (average percentage ratio difference)
avg_ratio_error_score = 0
for i in range(len(all_boxes_ori)):
ratio_ori = all_boxes_ori[i][3]/all_boxes_ori[i][2]
ratio_new = blocks[block_keys[i]][1]/blocks[block_keys[i]][0]
avg_ratio_error_score = avg_ratio_error_score + (abs(ratio_ori-ratio_new)/((ratio_ori+ratio_new)/2.0))
avg_ratio_error_score = avg_ratio_error_score/len(all_boxes_ori)
print("AVG RATIO ERROR:")
print(avg_ratio_error_score)
avg_mean_error_score = 0
old_mean_area = deepcopy(mean_area)
new_mean_area = 0
for i in range(len(all_boxes_ori)):
new_mean_area = new_mean_area + (blocks[block_keys[i]][1]*blocks[block_keys[i]][0])
new_mean_area = new_mean_area / len(all_boxes_ori)
old_scale = old_mean_area/100.0
new_scale = new_mean_area/100.0
for i in range(len(all_boxes_ori)):
area_old = all_boxes_ori[i][3]*all_boxes_ori[i][2]
area_new = ((blocks[block_keys[i]][1]*blocks[block_keys[i]][0]))
area_old = area_old / old_scale
area_new = area_new / new_scale
avg_mean_error_score = avg_mean_error_score + abs((area_old)-(area_new))
avg_mean_error_score = avg_mean_error_score
avg_mean_error_score = avg_mean_error_score/len(all_boxes_ori)
print("AVG MEAN AREA ERROR:")
print(avg_mean_error_score)
avg_location_error_score = 0
old_mean_area = deepcopy(mean_area)
new_mean_area = 0
for i in range(len(all_boxes_ori)):
new_mean_area = new_mean_area + (blocks[block_keys[i]][1]*blocks[block_keys[i]][0])
new_mean_area = new_mean_area / len(all_boxes_ori)
old_scale = sqrt(old_mean_area)/100.0
new_scale = sqrt(new_mean_area)/100.0
center_mass_new_x = 0
center_mass_new_y = 0
total_mass_new = 0
for i in range(len(all_boxes_ori)):
box = all_boxes2[i]
center_mass_new_x = center_mass_new_x + (box[0]*box[2]*box[3])
center_mass_new_y = center_mass_new_y + (box[1]*box[2]*box[3])
total_mass_new = total_mass_new + (box[2]*box[3])
center_mass_new_x = center_mass_new_x / total_mass_new
center_mass_new_y = center_mass_new_y / total_mass_new
for i in range(len(all_boxes_ori)):
position_old_x = abs(all_boxes_ori[i][0]-center_mass_ori_x)
position_old_y = abs(all_boxes_ori[i][1]-center_mass_ori_y)
position_new_x = abs(all_boxes2[i][0]-center_mass_new_x)
position_new_y = abs(all_boxes2[i][1]-center_mass_new_y)
position_old_x = position_old_x / (old_scale)
position_old_y = position_old_y / (old_scale)
position_new_x = position_new_x / (new_scale)
position_new_y = position_new_y / (new_scale)
distance = sqrt( (abs(position_old_x-position_new_x)*abs(position_old_x-position_new_x)) + (abs(position_old_y-position_new_y)*abs(position_old_y-position_new_y)) )
avg_location_error_score = avg_location_error_score + distance
avg_location_error_score = avg_location_error_score
avg_location_error_score = avg_location_error_score/len(all_boxes_ori)
print("AVG LOCATION AREA ERROR:")
print(avg_location_error_score)
penalty_composite = 0.0
number_composite = 0.0
total_number_blocks = len(block_keys)
for i in range(len(block_keys)):
if int(block_keys[i]) > original_number_blocks:
number_composite = number_composite + 1
ratio_composite = number_composite/total_number_blocks
penalty_composite = composite_block_penalty_end*ratio_composite
print("PENALTY COMPOSITE:")
print(penalty_composite)
penalty_extra = 0
penalty_weight = 1.0
for i in range(len(all_boxes2)):
if i >= len(all_boxes_ori_very):
penalty_extra = penalty_extra + ((blocks[block_keys[i]][1]*blocks[block_keys[i]][0]) / (new_scale))
print("PENALTY EXTRA:")
print(penalty_extra)
print("FINAL ERROR SCORE:")
print((avg_ratio_error_score*avg_mean_error_score*avg_location_error_score)+penalty_composite+penalty_extra) # Not normlaised
# flip y_axis direction (upwards is now positive rather than negative)
all_boxes3 = []
need_move_up = 0
for i in all_boxes2:
new = []
new.append(i[0])
new.append(i[1]*-1)
new.append(i[2])
new.append(i[3])
all_boxes3.append(new)
# move blocks to correct height (needs to be done again after flipping y-axis)
for i in range(len(all_boxes3)):
if graph_supporters[i] == [9999]:
new = []
new.append(all_boxes3[i][0])
new.append(ground+(blocks[block_keys[i]][1]/2.0))
new.append(all_boxes3[i][2])
new.append(all_boxes3[i][3])
else:
new = []
new.append(all_boxes3[i][0])
new.append(all_boxes3[graph_supporters[i][0]][1]+
(blocks[block_keys[graph_supporters[i][0]]][1]/2.0)+
(blocks[block_keys[i]][1]/2.0))
new.append(all_boxes3[i][2])
new.append(all_boxes3[i][3])
all_boxes3[i] = new
all_boxes4 = all_boxes3
# write XML
number_birds=3
f = open("level-4.xml", "w")
f.write('<?xml version="1.0" encoding="utf-16"?>\n')
f.write('<Level width ="2">\n')
f.write('<Camera x="0" y="2" minWidth="20" maxWidth="30">\n')
f.write('<Birds>\n')
for i in range(number_birds):
f.write('<Bird type="BirdRed"/>\n')
f.write('</Birds>\n')
f.write('<Slingshot x="-8" y="-2.5">\n')
f.write('<GameObjects>\n')
for index in range(len(all_boxes4)):
i = block_keys[index]
j = all_boxes4[index]
if int(i) > original_number_blocks:
rotation = 0
info = specials[i]
total_width = round((2.06*info[0])+blocks[str(info[1])][0],2)
total_height = info[2]*0.22
y_pos = j[1] - (total_height/2.0) + 0.11
pos_j = info[3]
for jj in range(info[2]):
positions_long = []
position_extra = []
added_j = 0
current_pos = j[0]-(total_width/2.0)
for a in range(info[0]):
if a == pos_j:
added_j = 1
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
position_extra = current_pos
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
current_pos = current_pos + 1.03
positions_long.append(current_pos)
current_pos = current_pos + 1.03
if added_j == 0:
current_pos = current_pos + (blocks[str(info[1])][0]/2.0)
position_extra = current_pos
for aa in range(len(positions_long)):
f.write('<Block type="RectBig" material="stone" x="%s" y="%s" rotation="0" />\n' % (str(positions_long[aa]), str(y_pos)))
f.write('<Block type="%s" material="stone" x="%s" y="%s" rotation="0" />\n' % (str(block_names[str(info[1])]),str(position_extra), str(y_pos)))
y_pos = y_pos + 0.22
if composite_block_interweaving == 1:
pos_j = pos_j + 1
if pos_j > info[0]:
pos_j = 0
else:
rotation = 0
if (int(i) in (3,7,9,11,13)):
rotation = 90
f.write('<Block type="%s" material="stone" x="%s" y="%s" rotation="%s" />\n' % (block_names[str(i)],str(j[0]), str(j[1]), str(rotation)))
f.write('</GameObjects>\n')
f.write('</Level>\n')
f.close()
| stepmat/ScienceBirds_sketch_generation | generate_sketch.py | generate_sketch.py | py | 114,707 | python | en | code | 4 | github-code | 36 |
8635132151 | # TODO :
# ElasticSearch : 검색기능 구현(DB)
# DRF Swagger(ysag) : API 문서화 작업용
# Celery(+Redis, + Naver SENS) : 문자인증을 위한 Naver SENS API 비동기 작동
# POSTMAN 설치 후 사용(DRF API Check)
import json
import os
import random
import datetime
import calendar
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import generics, status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import AnonRateThrottle
from rest_framework.views import APIView
import requests
from tables.models import TableLog
from .serializers import UserSerializer, PhoneNumberVerificationSerializer, CheckUniqueIDSerializer, \
SocialAuthTokenSerializer
User = get_user_model()
class SignupView(generics.CreateAPIView):
'''
회원가입 API.
아래 4개 필수 항목을 입력해 전달하면, 타입 유효성 검사 후 가입 처리
'username',
'password',
'name',
'phone_number'
나머지는 기입하지 않더라도 디폴트값 혹은 Null값 입력
'''
queryset = User.objects.all()
serializer_class = UserSerializer
def perform_create(self, serializer):
instance = serializer.save()
instance.set_password(instance.password)
instance.save()
date_range = [
datetime.date.today().replace(day=1) + datetime.timedelta(i)
for i in range(0, calendar.monthrange(datetime.date.today().year, datetime.date.today().month)[1])
]
for date in date_range:
for time in ['Breakfast', 'Lunch', 'Dinner', 'Snack']:
TableLog.objects.get_or_create(user=User.objects.get(pk=instance.pk), date=date, time=time)
class CheckUniqueIDView(APIView):
'''
유저 ID 중복검사를 위한 View
validation 과정에서 입력한 ID가 이미 존재하는지 체크
"unique_id" : True / False 를 리턴한다.
'''
def post(self, request):
serializer = CheckUniqueIDSerializer(data=request.data)
if serializer.is_valid():
return Response({
"unique_id": True,
"message": "사용 가능한 아이디입니다."
}, status=status.HTTP_200_OK)
return Response({
"unique_id": False,
"message": "이미 존재하는 아이디입니다."
}, status=status.HTTP_200_OK)
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
secrets = json.load(open(os.path.join(os.path.join(root_dir, '.secrets'), 'base.json')))
class PhoneNumberVerificationView(APIView):
'''
휴대폰 번호 인증을 위한 NAVER SENS API 연동
휴대폰 번호를 전달하면 정규식 검증(10-11자리 숫자로 이루어진 문자열 여부 확인, - 제외)
유효한 형식임이 확인되면 NAVER SENS를 통해 입력된 번호로 랜덤 인증번호(1000~9999 사이) 발송
발송에 성공한 경우 {"verifiation" : <인증번호> , "message" : <인증 성공>} 전달
실패했을 경우 {"verification" : False, "message" : <인증 실패> 전달
'''
# throttle classes : 익명 유저의 verification 신청 횟수 제한
throttle_classes = (AnonRateThrottle,)
def post(self, request):
serializer = PhoneNumberVerificationSerializer(data=request.data)
if serializer.is_valid():
service_id = secrets['SENS_SERVICE_ID']
random_num = str(random.randrange(1000, 9999))
send_url = f'https://api-sens.ncloud.com/v1/sms/services/{service_id}/messages'
headers = {
"Content-Type": "application/json; charset=utf-8",
"X-NCP-auth-key": secrets['X-NCP-AUTH-KEY'],
"X-NCP-service-secret": secrets['X-NCP-SERVICE-SECRET']
}
body = {
"type": "SMS",
"from": secrets['FROM_PHONE_NUMBER'],
"to": [
serializer.data['phone_number']
],
"content": "인증번호는 " + random_num + "입니다."
}
res = requests.post(send_url, headers=headers, data=json.dumps(body))
if not res.json()['status'] == '200':
return Response({"verification": False, "verificationNumber": "", "message": "인증번호 발송에 실패했습니다."},
status=status.HTTP_400_BAD_REQUEST)
return Response({"verification": True, "verificationNumber": random_num, "message": "인증번호가 발송되었습니다."},
status=status.HTTP_202_ACCEPTED)
class AuthTokenView(APIView):
'''
Login View.
Post 요청으로 username, password를 받아
serializer에서 사용자인증(authenticate)에 성공하면
해당 사용자와 연결된 토큰 정보를 리턴하거나 없다면 새로 생성한다.
'''
def post(self, request):
serializer = AuthTokenSerializer(data=request.data)
if serializer.is_valid():
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({"token": token.key}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SocialAuthTokenView(APIView):
'''
Social Login View.
Post 요청으로 iOS-SNS API 통신으로 전달받은 user_id를 확인하여
이미 있던 계정이면 그에 해당하는 토큰을, 없다면 새롭게 토큰을 생성한다.
'''
def post(self, request):
serializer = SocialAuthTokenSerializer(data=request.data)
if serializer.is_valid():
token, created = Token.objects.get_or_create(user=serializer.user)[0]
return Response({"token": token.key}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
'''
로그아웃 View. 유저에 할당되었던 토큰을 삭제해준다.
delete method로 request를 요청해야함
유저가 토큰을 가지고 있을 경우에만 접근 가능(IsAuthenticated)
'''
permission_classes = (IsAuthenticated,)
def delete(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
return Response({"logout": True, "message": "이미 로그아웃 처리되었습니다."},
status=status.HTTP_204_NO_CONTENT)
return Response({"logout": True}, status=status.HTTP_200_OK)
class UserProfileView(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
queryset = User.objects.all()
serializer_class = UserSerializer
# members/profile/로 받기 때문에, pk가 추가 인자로 들어오지 않는다.
# 따라서 lookup_urlkwarg / lookup_field > 기본값 "pk"가 주어지지 않은 경우
# request.user를 선택하여 리턴하도록 한다.
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
return self.request.user
# method for creating password hashing relation
def perform_update(self, serializer):
super(UserProfileView, self).perform_update(serializer)
instance = serializer.save()
instance.set_password(instance.password)
instance.save()
| hanoul1124/healthcare2 | app/members/apis.py | apis.py | py | 7,757 | python | ko | code | 0 | github-code | 36 |
14265869744 | import os
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
import cv2
import shutil # To copy the file
import sys
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import torch.nn.functional as F
import torchvision
from tqdm import tqdm
import urllib.request
import py7zr
import backbone as bb
aitex_folder = './datasets/AITEX/'
aitex_train_dir = aitex_folder + 'trainset/'
aitex_test_dir = aitex_folder + 'testset/'
aitex_mask_dir = aitex_folder + 'Mask_images/'
aitex_config_file = aitex_folder + 'config'
Defect_path = aitex_folder + 'Defect_images/'
NODefect_path = aitex_folder + 'NODefect_images/'
CUT_PATCHES = 1
AITEX_CLASS_NAMES = ['00', '01', '02', '03', '04', '05', '06']
PATCH_SIZE = 256 # patch size
STRIDE = PATCH_SIZE # stride of patch
ANOMALY_THRESHOLD = 0 # threshold to consider a patch as anomalous
class AitexDataSet(Dataset):
def __init__(self, class_name='03', resize=256, cropsize=224, is_train=True):
self.is_train = is_train
self.class_name = class_name
self.resize = resize
self.cropsize = cropsize
self.transform = transforms.Compose([transforms.Resize(resize, Image.ANTIALIAS),
transforms.CenterCrop(cropsize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.transform_mask = transforms.Compose([transforms.Resize(resize, Image.NEAREST),
transforms.CenterCrop(cropsize),
transforms.ToTensor()])
self.main_dir = aitex_train_dir+self.class_name+'/' if self.is_train else aitex_test_dir+self.class_name+'/'
self.all_imgs = sorted(os.listdir(self.main_dir))
self.mask_dir = aitex_mask_dir
if not self.is_train:
self.all_mask = sorted(os.listdir(self.mask_dir))
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_loc = os.path.join(self.main_dir, self.all_imgs[idx])
image = Image.open(img_loc).convert('RGB')
tensor_image = self.transform(image) # x in mvtec class
mask_name = self.all_imgs[idx].replace('.png', '_mask.png')
if os.path.isfile(self.mask_dir + '/' + mask_name):
mask_loc = os.path.join(self.mask_dir, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = self.transform_mask(mask) # mask in mvtec class
else:
tensor_mask = torch.zeros([1, self.cropsize, self.cropsize])
if int(torch.sum(tensor_mask)) > ANOMALY_THRESHOLD: # y in mvtec class
defective = 1
else:
defective = 0
return tensor_image, defective, tensor_mask
def getName(self, idx, mask=False):
if mask:
return self.all_imgs[idx].replace('.png', '_mask.png')
else:
return self.all_imgs[idx]
def resizeAitex(dataset, original_width=4096, original_height=256):
img_ = dataset.squeeze(0).numpy()
img_ = cv2.normalize(img_, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
img_ = img_.astype(np.uint8)
# Blur the image for better edge detection
img_blur = cv2.GaussianBlur(img_,(3,3), sigmaX=0, sigmaY=0)
# Canny Edge Detection
edges = cv2.Canny(image=img_blur, threshold1=100, threshold2=200)
vector = np.zeros(original_width)
for i in range(original_width):
for j in range(original_height):
vector[i] += edges[j][i]
derivative = np.gradient(vector)
max = np.argmax(derivative)
cut = (int(max/PATCH_SIZE) + CUT_PATCHES) * PATCH_SIZE
crop_img = transforms.functional.crop(dataset, top=0, left=cut, height=original_height, width=(original_width-cut))
new_widths = crop_img.shape[2]
new_heights = crop_img.shape[1]
return crop_img, new_widths, new_heights
# --------------- Functions for patches ---------------
def DivideInPatches(img, size, stride):
patches = img.unfold(1, size, stride).unfold(2, size, stride)
patches = patches.contiguous().view(patches.size(0), -1, size, size).permute(1,0,2,3)
return patches
def countAitexAnomalies():
masks = sorted(os.listdir(aitex_mask_dir))
number_of_defects = 0
defective = []
transform_mask = transforms.ToTensor()
for mask_name in masks:
mask_loc = os.path.join(aitex_mask_dir, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = transform_mask(mask)
if int(torch.sum(tensor_mask)) > ANOMALY_THRESHOLD:
number_of_defects += 1
defective.append(True)
else:
defective.append(False)
return number_of_defects, defective
# --------------- Functions to create Aitex Dataset ---------------
def Reformat_Image(ImageFilePath, new_width, new_height, color, offset):
image = Image.open(ImageFilePath, 'r')
image_size = image.size
width = image_size[0]
height = image_size[1]
if color == 'white':
color = (255, 255, 255, 255)
elif color == 'black':
color = (0, 0, 0, 255)
if offset == 'center':
offset = (int(round(((new_width - width) / 2), 0)), int(round(((new_height - height) / 2), 0)))
elif offset == 'right':
offset = (0, 0)
elif offset == 'left':
offset = ((new_width - width), (new_height - height))
background = Image.new('RGBA', (new_width, new_height), color)
background.paste(image, offset)
background.save(ImageFilePath)
def DeleteFolder(path):
shutil.rmtree(path)
def MergeMasks(name):
mask1 = Image.open(name+'_mask1.png').convert('L')
mask2 = Image.open(name+'_mask2.png').convert('L')
mask1 = np.array(mask1)
mask2 = np.array(mask2)
mask = np.add(mask1, mask2)
mask = Image.fromarray(mask)
mask.save(name+'_mask.png',"png")
os.remove(name+'_mask1.png')
os.remove(name+'_mask2.png')
def BinarizeMasks(Mask_path):
thresh = 128
maxval = 255
all_imgs = sorted(os.listdir(Mask_path))
for i in all_imgs:
im_gray = np.array(Image.open(Mask_path+i).convert('L'))
im_bin = (im_gray > thresh) * maxval
Image.fromarray(np.uint8(im_bin)).save(Mask_path+i)
def RenameFolder(oldname, newname):
os.rename(oldname, newname)
def FlipImage(filename):
image = Image.open(filename)
image = np.fliplr(image)
Image.fromarray(np.uint8(image)).save(filename)
def CreateAitexDataset(resize, log_file):
try:
bb.myPrint("Preparing the AITEX dataset...", log_file)
NODefect_subdirectories = {
'2311694-2040n7u': '00',
'2608691-202020u': '01',
'2306894-210033u': '02',
'2311694-1930c7u': '03',
'2311517-195063u': '04',
'2306881-210020u': '05',
'2311980-185026u': '06'
}
os.makedirs(aitex_train_dir, exist_ok=True)
os.makedirs(aitex_test_dir, exist_ok=True)
for i in range(len(NODefect_subdirectories)):
RenameFolder(NODefect_path+list(NODefect_subdirectories.keys())[i], NODefect_path+list(NODefect_subdirectories.values())[i])
os.makedirs(aitex_train_dir+list(NODefect_subdirectories.values())[i], exist_ok=True)
os.makedirs(aitex_test_dir+list(NODefect_subdirectories.values())[i], exist_ok=True)
MergeMasks(aitex_mask_dir+'0044_019_04') # Merge and delete 0044_019_04.png masks
MergeMasks(aitex_mask_dir+'0097_030_03') # Merge and delete 0097_030_03.png masks
BinarizeMasks(aitex_mask_dir)
Reformat_Image(Defect_path + '0094_027_05.png', 4096, 256, 'white', 'right')
Reformat_Image(aitex_mask_dir + '0094_027_05_mask.png', 4096, 256, 'black', 'right')
os.remove(Defect_path + '0100_025_08.png')
FlipImage(Defect_path + '0094_027_05.png')
FlipImage(aitex_mask_dir + '0094_027_05_mask.png')
defect_images = os.listdir(Defect_path)
nodefect_images = []
for i in range(len(NODefect_subdirectories)):
for j in os.listdir(NODefect_path + list(NODefect_subdirectories.values())[i]):
nodefect_images.append(list(NODefect_subdirectories.keys())[i] + '/' + j)
for i in range(len(NODefect_subdirectories)):
new_folder = Defect_path+list(NODefect_subdirectories.values())[i] + '/'
os.makedirs(new_folder, exist_ok=True)
for img in defect_images:
if list(NODefect_subdirectories.values())[i]+'.png' in img:
shutil.move(Defect_path + img, new_folder + img)
Mask_path_temp = aitex_folder + '/Mask_images_temp/'
RenameFolder(aitex_mask_dir, Mask_path_temp)
os.makedirs(aitex_mask_dir, exist_ok=True)
for i in range(len(NODefect_subdirectories)):
last_image = os.listdir(NODefect_path+list(NODefect_subdirectories.values())[i] + '/')[-1]
new_folder = Defect_path+list(NODefect_subdirectories.values())[i] + '/' + last_image
old_folder = NODefect_path+list(NODefect_subdirectories.values())[i] + '/' + last_image
shutil.move(old_folder, new_folder)
transform = transforms.Compose([
transforms.ToTensor()
])
for i in range(len(NODefect_subdirectories)):
train_folder_temp = NODefect_path+list(NODefect_subdirectories.values())[i] + '/'
all_train_imgs = sorted(os.listdir(train_folder_temp))
for img in all_train_imgs:
img_loc = os.path.join(train_folder_temp, img)
image = Image.open(img_loc).convert('L')
tensor_image = transform(image)
if resize:
tensor_image, _, _ = resizeAitex(tensor_image)
train_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(train_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(aitex_train_dir+list(NODefect_subdirectories.values())[i] + '/', name)
torchvision.utils.save_image(patch, name)
test_folder_temp = Defect_path+list(NODefect_subdirectories.values())[i] + '/'
all_test_imgs = sorted(os.listdir(test_folder_temp))
for img in all_test_imgs:
img_loc = os.path.join(test_folder_temp, img)
image = Image.open(img_loc).convert('L')
tensor_image = transform(image)
if resize:
tensor_image, new_widths, _ = resizeAitex(tensor_image)
test_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(aitex_test_dir+list(NODefect_subdirectories.values())[i] + '/', name)
torchvision.utils.save_image(patch, name)
mask_name = img.replace('.png', '_mask.png')
if os.path.isfile(Mask_path_temp + mask_name):
mask_loc = os.path.join(Mask_path_temp, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = transform(mask)
else:
tensor_mask = torch.zeros([1, 256, 4096])
if resize:
tensor_mask = transforms.functional.crop(tensor_mask, top=0, left=(4096-new_widths), height=256, width=new_widths)
test_masks = DivideInPatches(tensor_mask, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_masks):
name = mask_name.replace('_mask.png', '_'+str(idx)+'_mask.png')
name = os.path.join(aitex_mask_dir, name)
torchvision.utils.save_image(patch, name)
DeleteFolder(Defect_path)
DeleteFolder(NODefect_path)
DeleteFolder(Mask_path_temp)
f = open(aitex_config_file, "a")
f.write(str(resize))
f.close()
except Exception as e:
bb.myPrint(e, log_file)
bb.myPrint("Error in CreateAitexDataset function!", log_file)
DeleteFolder(aitex_folder)
sys.exit(-1)
def prepareAitex(resize, log_file):
if os.path.isdir(aitex_folder):
if (os.path.isdir(aitex_train_dir) and os.path.isdir(aitex_test_dir) and os.path.isdir(aitex_mask_dir)):
f = open(aitex_config_file, "r")
resize_ = f.readline()
f.close()
resize_ = True if resize_ == "True" else False
if resize == resize_:
return
else:
DeleteFolder(aitex_folder)
download(log_file)
CreateAitexDataset(resize, log_file)
else:
DeleteFolder(aitex_folder)
download(log_file)
CreateAitexDataset(resize, log_file)
else:
download(log_file)
CreateAitexDataset(resize, log_file)
# --------------- Functions to download Aitex Dataset ---------------
URL = 'https://www.aitex.es/wp-content/uploads/2019/07/'
ARCHIVES = [
'Defect_images.7z',
'NODefect_images.7z',
'Mask_images.7z'
]
def download(log_file):
bb.myPrint("Download AITEX dataset...", log_file)
os.makedirs(aitex_folder, exist_ok=True)
try:
for idx in range(len(ARCHIVES)):
if not os.path.isfile(aitex_folder+ARCHIVES[idx]):
download_url(URL+ARCHIVES[idx], aitex_folder+ARCHIVES[idx])
with py7zr.SevenZipFile(aitex_folder+ARCHIVES[idx], mode='r') as z:
z.extractall(path=aitex_folder)
os.remove(aitex_folder+ARCHIVES[idx])
return
except Exception as e:
bb.myPrint(str(e), log_file)
bb.myPrint("Can't download AITEX dataset. Retry later.", log_file)
sys.exit(-1)
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to) | LudovicoL/PaDiM | backbone/AITEX.py | AITEX.py | py | 14,796 | python | en | code | 2 | github-code | 36 |
73579291943 | from lscore.midi.midifile import MIDIFile
from lscore.midi.midiinstruments import *
from lscore.lsystem.lsystem import *
from musicalinterpretation import MusicalInterpretation
import scales
""" Schenkerian Rendering - see
Growing Music: musical interpretations of L-Systems by
Peter Worth and Susan Stepney
"""
class SchenkerianRendering(MusicalInterpretation):
def __init__(self,scale,
tempo = 120,
instrument_name = DISTORTION_GUITAR,
multiplier = 4,
output_name = 'output.mid'):
MusicalInterpretation.__init__(self,scale,tempo,output_name,instrument_name,multiplier)
def create_score(self,string):
timeOffset = 0
currentNote = 0
#Monta pilha com a string
#O topo da pilha é o primeiro elemento da string
strStack = list(string)
noteStack = list()
timeStack = list()
code = str()
midi = MIDIFile(self.output_name)
midi.text(string)
mpqn = 60000000/self.tempo
midi.setTempo(mpqn)
midi.timeSignature(4,2,24,8)
midi.patchChange(0,self.instrument_name)
noteLength = 0
while True:
try:
tok = strStack.pop(0)
if tok is 'F':
noteLength += 1
if tok is '+':
currentNote +=1
elif tok is '-':
currentNote -=1
elif tok is '[':
noteStack.append(currentNote)
timeStack.append(noteLength)
noteLength = 0
elif tok is ']':
if noteLength is not 0:
note = self.scale.getMIDINote(currentNote)
midi.noteOn(0,note,127)
ticks = (midi.timeDivision / self.multiplier)*noteLength
midi.noteOff(0,note,127,ticks)
currentNote = noteStack.pop()
noteLength = timeStack.pop()
except IndexError:
note = self.scale.getMIDINote(currentNote)
midi.noteOn(0,note,127)
ticks = (midi.timeDivision / self.multiplier)*noteLength
midi.noteOff(0,note,127,ticks)
break
midi.eof()
midi.write()
if __name__ == "__main__":
major = scales.Scale(scales.MAJOR)
s = SchenkerianRendering(major)
l = LSystem("X")
l.addProduction("X","F[+X][-X]FX")
l.addProduction("F","FF")
s.create_score(l.generateString(3))
| bflourenco/lscore | src/lscore/interpretation/schenkerianrendering.py | schenkerianrendering.py | py | 2,082 | python | en | code | 0 | github-code | 36 |
21620206001 | from __future__ import absolute_import
from concurrent.futures import ThreadPoolExecutor
import grpc
from apache_beam.portability.api import beam_runner_api_pb2_grpc
from apache_beam.portability.api.beam_runner_api_pb2_grpc import TestStreamServiceServicer
class TestStreamServiceController(TestStreamServiceServicer):
def __init__(self, events, endpoint=None):
self._server = grpc.server(ThreadPoolExecutor(max_workers=10))
if endpoint:
self.endpoint = endpoint
self._server.add_insecure_port(self.endpoint)
else:
port = self._server.add_insecure_port('[::]:0')
self.endpoint = '[::]:{}'.format(port)
beam_runner_api_pb2_grpc.add_TestStreamServiceServicer_to_server(
self, self._server)
self._events = events
def start(self):
self._server.start()
def stop(self):
self._server.stop(0)
self._server.wait_for_termination()
def Events(self, request, context):
"""Streams back all of the events from the streaming cache."""
for e in self._events:
yield e
| a0x8o/kafka | sdks/python/apache_beam/testing/test_stream_service.py | test_stream_service.py | py | 1,048 | python | en | code | 59 | github-code | 36 |
21619670691 | import unittest
from mock import Mock
from apache_beam.metrics.cells import DistributionData
from apache_beam.runners.google_cloud_dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.google_cloud_dataflow.internal import apiclient
from apache_beam.runners.google_cloud_dataflow.internal.clients import dataflow
from apache_beam.utils.pipeline_options import PipelineOptions
class UtilTest(unittest.TestCase):
@unittest.skip("Enable once BEAM-1080 is fixed.")
def test_create_application_client(self):
pipeline_options = PipelineOptions()
apiclient.DataflowApplicationClient(
pipeline_options,
DataflowRunner.BATCH_ENVIRONMENT_MAJOR_VERSION)
def test_default_job_name(self):
job_name = apiclient.Job.default_job_name(None)
regexp = 'beamapp-.*-[0-9]{10}-[0-9]{6}'
self.assertRegexpMatches(job_name, regexp)
def test_split_int(self):
number = 12345
split_number = apiclient.to_split_int(number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(number, 0))
shift_number = number << 32
split_number = apiclient.to_split_int(shift_number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(0, number))
def test_translate_distribution(self):
metric_update = dataflow.CounterUpdate()
distribution_update = DistributionData(16, 2, 1, 15)
apiclient.translate_distribution(distribution_update, metric_update)
self.assertEqual(metric_update.distribution.min.lowBits,
distribution_update.min)
self.assertEqual(metric_update.distribution.max.lowBits,
distribution_update.max)
self.assertEqual(metric_update.distribution.sum.lowBits,
distribution_update.sum)
self.assertEqual(metric_update.distribution.count.lowBits,
distribution_update.count)
def test_translate_means(self):
metric_update = dataflow.CounterUpdate()
accumulator = Mock()
accumulator.sum = 16
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_int(accumulator,
metric_update)
self.assertEqual(metric_update.integerMean.sum.lowBits, accumulator.sum)
self.assertEqual(metric_update.integerMean.count.lowBits, accumulator.count)
accumulator.sum = 16.0
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_float(accumulator,
metric_update)
self.assertEqual(metric_update.floatingPointMean.sum, accumulator.sum)
self.assertEqual(
metric_update.floatingPointMean.count.lowBits, accumulator.count)
if __name__ == '__main__':
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/runners/google_cloud_dataflow/internal/apiclient_test.py | apiclient_test.py | py | 2,815 | python | en | code | 59 | github-code | 36 |
24684070432 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 12:48:55 2019
@author: sudesh.amarnath
"""
import boto3
import os
import glob
import findspark
findspark.init('/home/ubuntu/spark-2.1.1-bin-hadoop2.7')
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('test').getOrCreate()
from pyspark.sql.functions import concat_ws,concat,lit,Column,regexp_replace
def upload_file(file_name, bucket, object_name=None):
if object_name is None:
object_name = file_name
s3_client = boto3.client('s3')
response = s3_client.upload_file(file_name, bucket, object_name)
s3 = boto3.resource('s3')
my_bucket = s3.Bucket('sudeshrandom')
for file in my_bucket.objects.all():
if ".json" in file.key:
file_name=file.key
fname=file_name.replace('new/','')
with open(fname,'w') as f:
wfile_path='/home/ubuntu/processed/'+fname.replace('.json','')
obj = my_bucket.Object(file.key)
f.write(obj.get()['Body'].read().decode('utf-8'))
df= spark.read.option("multiLine", "true").json(fname)
df.select(concat_ws("",df['info.seed']).alias("id"),\
concat(concat_ws("",df['results.name.first']),lit(' '),concat_ws("",df['results.name.last'])).alias('Full_Name'),\
concat_ws("",df['results.gender']).alias("Gender"),\
concat_ws("",df['results.dob.date']).astype('date').alias("DoB"),\
concat_ws("",df['results.email']).alias("Email"),\
concat_ws("",df['results.phone']).alias("home_phone"),\
concat_ws("",df['results.cell']).alias("cell_phone"),\
concat_ws("",df['results.location.street']).alias("Street"),\
concat_ws("",df['results.location.city']).alias("City"),\
concat_ws("",df['results.location.state']).alias("State"),\
concat_ws("",df['results.nat']).alias("Country"),\
concat_ws("",df['results.location.postcode']).astype('int').alias("Postcode"),\
concat_ws("",df['results.location.coordinates.latitude']).alias("Latitude"),\
concat_ws("",df['results.location.coordinates.longitude']).alias("Longitude")
).coalesce(1).write.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false").option("header","true").csv(wfile_path)
os.remove(fname)
for s3_file in glob.glob(wfile_path+'/*.csv'):
cname='processed/'+fname.replace('.json','.csv')
upload_file(s3_file,'sudeshrandom',cname)
| sudeshg46/Phoenix | json_csv_extractor.py | json_csv_extractor.py | py | 2,733 | python | en | code | 0 | github-code | 36 |
37353981095 | """Re-export of some bazel rules with repository-wide defaults."""
load("@npm//@angular/bazel:index.bzl", _ng_module = "ng_module", _ng_package = "ng_package")
load("@build_bazel_rules_nodejs//:index.bzl", _pkg_npm = "pkg_npm")
load("@npm//@bazel/jasmine:index.bzl", _jasmine_node_test = "jasmine_node_test")
load("@npm//@bazel/esbuild:index.bzl", "esbuild")
load(
"@npm//@bazel/concatjs:index.bzl",
_ts_library = "ts_library",
)
DEFAULT_TSCONFIG_BUILD = "//modules:bazel-tsconfig-build.json"
DEFAULT_TSCONFIG_TEST = "//modules:bazel-tsconfig-test"
def _getDefaultTsConfig(testonly):
if testonly:
return DEFAULT_TSCONFIG_TEST
else:
return DEFAULT_TSCONFIG_BUILD
def ts_library(
tsconfig = None,
testonly = False,
deps = [],
devmode_module = None,
**kwargs):
deps = deps + ["@npm//tslib", "@npm//@types/node"]
if testonly:
deps.append("@npm//@types/jasmine")
if not tsconfig:
tsconfig = _getDefaultTsConfig(testonly)
if not devmode_module:
devmode_module = "commonjs"
_ts_library(
tsconfig = tsconfig,
testonly = testonly,
devmode_module = devmode_module,
devmode_target = "es2022",
prodmode_target = "es2022",
deps = deps,
**kwargs
)
EXPRESS_VERSION = "^4.15.2"
EXPRESS_TYPES_VERSION = "^4.17.0"
NGUNIVERSAL_SCOPED_PACKAGES = ["@nguniversal/%s" % p for p in [
"builders",
"common",
"express-engine",
]]
PKG_GROUP_REPLACEMENTS = {
"\"NG_UPDATE_PACKAGE_GROUP\"": """[
%s
]""" % ",\n ".join(["\"%s\"" % s for s in NGUNIVERSAL_SCOPED_PACKAGES]),
"EXPRESS_VERSION": EXPRESS_VERSION,
"EXPRESS_TYPES_VERSION": EXPRESS_TYPES_VERSION,
}
def ng_module(name, package_name, module_name = None, tsconfig = None, testonly = False, deps = [], **kwargs):
deps = deps + ["@npm//tslib", "@npm//@types/node"]
if not tsconfig:
tsconfig = _getDefaultTsConfig(testonly)
if not module_name:
module_name = package_name
_ng_module(
name = name,
module_name = package_name,
package_name = package_name,
flat_module_out_file = name,
tsconfig = tsconfig,
testonly = testonly,
deps = deps,
**kwargs
)
def jasmine_node_test(deps = [], **kwargs):
local_deps = [
"@npm//source-map-support",
] + deps
_jasmine_node_test(
deps = local_deps,
configuration_env_vars = ["compile"],
**kwargs
)
def ng_test_library(name, entry_point = None, deps = [], tsconfig = None, **kwargs):
local_deps = [
# We declare "@angular/core" as default dependencies because
# all Angular component unit tests use the `TestBed` and `Component` exports.
"@npm//@angular/core",
] + deps
if not tsconfig:
tsconfig = _getDefaultTsConfig(1)
ts_library_name = name + "_ts_library"
ts_library(
name = ts_library_name,
testonly = 1,
tsconfig = tsconfig,
deps = local_deps,
**kwargs
)
esbuild(
name,
testonly = 1,
args = {
"keepNames": True,
# ensure that esbuild prefers .mjs to .js if both are available
# since ts_library produces both
"resolveExtensions": [
".mjs",
".js",
],
},
output = name + "_spec.js",
entry_point = entry_point,
format = "iife",
# We cannot use `ES2017` or higher as that would result in `async/await` not being downleveled.
# ZoneJS needs to be able to intercept these as otherwise change detection would not work properly.
target = "es2016",
platform = "node",
deps = [":" + ts_library_name],
)
def ng_package(deps = [], **kwargs):
common_substitutions = dict(kwargs.pop("substitutions", {}), **PKG_GROUP_REPLACEMENTS)
substitutions = dict(common_substitutions, **{
"0.0.0-PLACEHOLDER": "0.0.0",
})
stamped_substitutions = dict(common_substitutions, **{
"0.0.0-PLACEHOLDER": "{STABLE_PROJECT_VERSION}",
})
_ng_package(
deps = deps,
externals = [
"domino",
"xhr2",
"jsdom",
"critters",
"express-engine",
"express",
],
substitutions = select({
"//:stamp": stamped_substitutions,
"//conditions:default": substitutions,
}),
**kwargs
)
def pkg_npm(name, **kwargs):
common_substitutions = dict(kwargs.pop("substitutions", {}), **PKG_GROUP_REPLACEMENTS)
substitutions = dict(common_substitutions, **{
"0.0.0-PLACEHOLDER": "0.0.0",
})
stamped_substitutions = dict(common_substitutions, **{
"0.0.0-PLACEHOLDER": "{STABLE_PROJECT_VERSION}",
})
_pkg_npm(
name = name,
substitutions = select({
"//:stamp": stamped_substitutions,
"//conditions:default": substitutions,
}),
**kwargs
)
| angular/universal | tools/defaults.bzl | defaults.bzl | bzl | 5,099 | python | en | code | 4,017 | github-code | 36 |
5217081023 | #!/usr/bin/env python3
import pandas as pd
def best_record_company():
df = pd.read_csv('src/UK-top40-1964-1-2.tsv', sep='\t')
pubs = df.groupby('Publisher')
best = pubs['WoC'].sum().max()
return pubs.filter(lambda df: df['WoC'].sum().max() == best)
def main():
print(best_record_company())
if __name__ == "__main__":
main()
| lawrencetheabhorrence/Data-Analysis-2020 | hy-data-analysis-with-python-2020/part05-e05_best_record_company/src/best_record_company.py | best_record_company.py | py | 357 | python | en | code | 0 | github-code | 36 |
9959080371 | import numpy as np
import itertools
import math
from math import comb
import random
import numpy as np
import itertools
plural_dictionary = {
"fruit": "fruits",
"apple": "apples",
"orange": "oranges",
"banana": "bananas",
"strawberry": "strawberries",
"grape": "grapes",
"vegetable": "vegetables",
"carrot": "carrots",
"broccoli": "broccoli",
"tomato": "tomatoes",
"potato": "potatoes",
"cabbage": "cabbages",
"animal": "animals",
"dog": "dogs",
"cat": "cats",
"elephant": "elephants",
"giraffe": "giraffes",
"dolphin": "dolphins",
}
object_dictionary = {
"fruit": {
"items": [
"apple",
"orange",
"banana",
"strawberry",
"grape",
],
"range": [1, 5]
},
"vegetable": {
"items": [
"carrot",
"broccoli",
"tomato",
"potato",
"cabbage"
],
"range": [1, 3]
},
"animal":{
"items": [
"dog",
"cat",
"elephant",
"giraffe",
"dolphin"
],
"range": [1, 3]
}
}
def get_category(sampled_items):
categories = set()
for item in sampled_items:
for category, details in object_dictionary.items():
if item in details["items"]:
categories.add(category)
break
return categories
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
| GVS-007/MLLM_Reasoning | common_utils.py | common_utils.py | py | 1,585 | python | en | code | 0 | github-code | 36 |
70955306664 | """ Tests for scramble generation background tasks. """
from unittest.mock import Mock, patch, call
import pytest
from huey.exceptions import TaskException
from cubersio.tasks import huey
from cubersio.tasks.scramble_generation import check_scramble_pool, ScramblePoolTopOffInfo, top_off_scramble_pool
from cubersio.util.events.resources import EVENT_3x3, EVENT_10x10, EVENT_COLL, EVENT_FTO, EVENT_REX
# Put Huey in immediate mode so the tasks execute synchronously
huey.immediate = True
def _setup_mock(**kwargs):
""" Utility function for setting up a mocked Event or EventDefinition. Need to use Mock::configure because Events
and EventDefinitions have a `name` attribute which we need to override, and `name` is usually a special reserved
attribute for Mock. """
mock_event = Mock()
mock_event.configure_mock(**kwargs)
return mock_event
@patch('cubersio.tasks.scramble_generation.get_all_events')
@patch('cubersio.tasks.scramble_generation.top_off_scramble_pool')
def test_check_scramble_pool(mock_top_off_scramble_pool, mock_get_all_events):
""" Test that the scrambler pool checker task makes the appropriate calls to top_off_scramble_pool based on the
number of remaining scrambles for each event. """
# 3x3 and FTO need scrambles, they are below the 2x weekly scrambles threshold.
# 10x10 has enough scrambles, and COLL doesn't have its scrambles pre-generated.
mock_get_all_events.return_value = [
_setup_mock(name=EVENT_3x3.name, id=1, scramble_pool=list(range(5)), totalSolves=5),
_setup_mock(name=EVENT_10x10.name, id=2, scramble_pool=list(range(5)), totalSolves=1),
_setup_mock(name=EVENT_COLL.name, id=3, scramble_pool=list(range(5)), totalSolves=5),
_setup_mock(name=EVENT_FTO.name, id=4, scramble_pool=list(), totalSolves=5),
]
check_scramble_pool()
mock_get_all_events.assert_called_once()
assert mock_top_off_scramble_pool.call_count == 2
mock_top_off_scramble_pool.assert_has_calls([
call(ScramblePoolTopOffInfo(1, EVENT_3x3.name, 5)),
call(ScramblePoolTopOffInfo(4, EVENT_FTO.name, 10))
])
@pytest.mark.parametrize('top_off_info', [
ScramblePoolTopOffInfo(event_id=10, event_name=EVENT_REX.name, num_scrambles=5),
ScramblePoolTopOffInfo(event_id=42, event_name=EVENT_FTO.name, num_scrambles=15),
])
@patch('cubersio.tasks.scramble_generation.add_scramble_to_scramble_pool')
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_multi_scramble_puzzles(mock_get_event_definition_for_name,
mock_add_scramble_to_scramble_pool,
top_off_info: ScramblePoolTopOffInfo):
""" Test that top_off_scramble_pool calls the event resource scrambler correctly for those events where scrambles
are generated in bulk because it's faster. """
scrambles = list(range(top_off_info.num_scrambles))
mock_event_def = _setup_mock(name=top_off_info.event_name)
mock_event_def.get_multiple_scrambles.return_value = scrambles
mock_get_event_definition_for_name.return_value = mock_event_def
top_off_scramble_pool(top_off_info)
mock_get_event_definition_for_name.assert_called_once_with(top_off_info.event_name)
mock_event_def.get_multiple_scrambles.assert_called_once_with(top_off_info.num_scrambles)
assert mock_add_scramble_to_scramble_pool.call_count == top_off_info.num_scrambles
expected_calls = [call(scramble, top_off_info.event_id) for scramble in scrambles]
mock_add_scramble_to_scramble_pool.assert_has_calls(expected_calls)
@patch('cubersio.tasks.scramble_generation.add_scramble_to_scramble_pool')
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_single_scramble_puzzles(mock_get_event_definition_for_name,
mock_add_scramble_to_scramble_pool):
""" Test that top_off_scramble_pool calls the event resource scrambler correctly for those events where scrambles
are generated one at a time. """
top_off_info = ScramblePoolTopOffInfo(event_id=11, event_name=EVENT_3x3.name, num_scrambles=5)
scrambles = list(range(top_off_info.num_scrambles))
mock_event_def = _setup_mock(name=top_off_info.event_name)
mock_event_def.get_scramble.side_effect = scrambles
mock_get_event_definition_for_name.return_value = mock_event_def
top_off_scramble_pool(top_off_info)
mock_get_event_definition_for_name.assert_called_once_with(top_off_info.event_name)
assert mock_event_def.get_scramble.call_count == top_off_info.num_scrambles
assert mock_add_scramble_to_scramble_pool.call_count == top_off_info.num_scrambles
expected_calls = [call(scramble, top_off_info.event_id) for scramble in scrambles]
mock_add_scramble_to_scramble_pool.assert_has_calls(expected_calls)
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_raises_for_nonexistent_event(mock_get_event_definition_for_name):
""" Test that top_off_scramble_pool raises RuntimeError for a bogus event. """
mock_get_event_definition_for_name.return_value = None
with pytest.raises(TaskException) as te:
top_off_scramble_pool(ScramblePoolTopOffInfo(event_id=1, event_name="blah", num_scrambles=5)).get()
assert f"Can't find an EventResource for event blah" in te.value.metadata['error']
| euphwes/cubers.io | tst/tasks/test_scramble_generation.py | test_scramble_generation.py | py | 5,529 | python | en | code | 27 | github-code | 36 |
74552228582 | """ Test File """
from flask import Flask
from redis import Redis
app = Flask(__name__)
redis_client = Redis(
host='redis_db',
port=6379
)
@app.route('/')
def hello():
"""
Main app route, simply returns a Hello
"""
count_key = redis_client.get('count')
count = int(count_key) if count_key else 0
redis_client.set('count', count+1)
return f'Hello World {count}'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int('5000'), debug=True)
| ZacharyATanenbaum/docker_dev_build_system | examples/docker_compose_services/python_docker/index.py | index.py | py | 489 | python | en | code | 0 | github-code | 36 |
26947568079 | import tidypolars as tp
from tidypolars import col
import polars as pl
from tidypolars.utils import _repeat
def test_arrange1():
"""Can arrange ascending"""
df = tp.Tibble(x = ['a', 'a', 'b'], y = [2, 1, 3])
actual = df.arrange('y')
expected = tp.Tibble(x = ['a', 'a', 'b'], y = [1, 2, 3])
assert actual.frame_equal(expected), "arrange ascending failed"
assert type(actual) == tp.Tibble, "arrange didn't return a Tibble"
def test_arrange2():
"""Can arrange descending"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [2, 1, 3]})
actual = df.arrange(tp.desc('x'), 'y')
expected = tp.Tibble({'x': ['b', 'a', 'a'], 'y': [3, 1, 2]})
assert actual.frame_equal(expected), "arrange descending failed"
def test_arrange_across():
"""Can arrange across"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [1, 2, 3], 'z': [1, 2, 3]})
actual = df.arrange(
tp.across(['x']),
tp.across(['y', 'z'], tp.desc)
)
expected = tp.Tibble(x = ['a', 'a', 'b'], y = [2, 1, 3], z = [2, 1, 3])
assert actual.frame_equal(expected), "arrange across failed"
def test_bind_cols_single():
"""Can bind_cols"""
df1 = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [1, 2, 3]})
df2 = tp.Tibble({'z': [4, 4, 4]})
actual = df1.bind_cols(df2)
expected = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [1, 2, 3], 'z':[4, 4, 4]})
assert actual.frame_equal(expected), "bind_cols failed"
assert type(actual) == tp.Tibble, "bind_cols didn't return a Tibble"
def test_bind_cols_multiple():
"""Can bind_cols multiple"""
df1 = tp.Tibble(x = range(3))
df2 = tp.Tibble(y = range(3))
df3 = tp.Tibble(z = range(3))
actual = df1.bind_cols(df2, df3)
expected = tp.Tibble(x = range(3), y = range(3), z = range(3))
assert actual.frame_equal(expected), "multiple bind_cols failed"
def test_bind_rows_single():
"""Can bind rows"""
df1 = tp.Tibble({'x': ['a', 'a'], 'y': [2, 1]})
df2 = tp.Tibble({'x': ['b'], 'y': [3]})
actual = df1.bind_rows(df2)
expected = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [2, 1, 3]})
assert actual.frame_equal(expected), "bind_rows failed"
assert type(actual) == tp.Tibble, "bind_rows didn't return a Tibble"
def test_bind_rows_auto_align():
"""Can bind rows"""
df1 = tp.Tibble(x = ['a', 'a'], y = [2, 1])
df2 = tp.Tibble(y = [3], x = ['b'])
actual = df1.bind_rows(df2)
expected = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [2, 1, 3]})
assert actual.frame_equal(expected), "bind_rows auto-align failed"
def test_bind_rows_multiple():
"""Can bind rows (multiple)"""
df1 = tp.Tibble({'x': ['a', 'a'], 'y': [2, 1]})
df2 = tp.Tibble({'x': ['b'], 'y': [3]})
df3 = tp.Tibble({'x': ['b'], 'y': [3]})
actual = df1.bind_rows(df2, df3)
expected = tp.Tibble({'x': ['a', 'a', 'b', 'b'], 'y': [2, 1, 3, 3]})
assert actual.frame_equal(expected), "bind_rows multiple failed"
def test_clone():
df = tp.Tibble(x = range(3), y = range(3))
actual = df.clone()
assert type(actual) == tp.Tibble, "clone didn't return a Tibble"
def test_count_no_args():
"""Can count rows (no args)"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [1, 1, 1]})
actual = df.count()
expected = tp.Tibble({'n': [3]})
assert actual.frame_equal(expected), "count with no args failed"
def test_count_one_arg():
"""Can count rows (one arg)"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [1, 1, 1]})
actual = df.count('x', sort = True)
expected = tp.Tibble({'x': ['a', 'b'], 'n': [2, 1]})
assert actual.frame_equal(expected), "count with one arg failed"
def test_distinct_empty():
"""Can distinct columns"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': ['a', 'a', 'b']})
actual = df.distinct()
expected = tp.Tibble({'x': ['a', 'b'], 'y': ['a', 'b']})
assert actual.frame_equal(expected), "empty distinct failed"
assert type(actual) == tp.Tibble, "distinct didn't return a Tibble"
def test_distinct_select():
"""Can distinct columns"""
df = tp.Tibble({'x': ['a', 'a', 'b'], 'y': [2, 1, 3]})
actual = df.distinct('x')
expected = tp.Tibble({'x': ['a', 'b']})
assert actual.frame_equal(expected), "distinct with select failed"
def test_drop():
"""Can drop columns"""
df = tp.Tibble(x = range(3), y = range(3))
actual = df.drop('x')
expected = tp.Tibble(y = range(3))
assert actual.frame_equal(expected), "drop failed"
assert type(actual) == tp.Tibble, "drop didn't return a Tibble"
def test_drop_null_empty():
"""Can drop nulls from all cols"""
df = tp.Tibble(x = [1, None, 3], y = [None, 2, 3], z = range(1, 4))
actual = df.drop_null()
expected = tp.Tibble(x = [3], y = [3], z = [3])
assert actual.frame_equal(expected), "empty drop_null failed"
assert type(actual) == tp.Tibble, "drop_null didn't return a Tibble"
def test_drop_null_select():
"""Can drop nulls with selection"""
df = tp.Tibble(x = [1, None, 3], y = [None, 2, 3], z = range(1, 4))
actual = df.drop_null('x')
expected = tp.Tibble(x = [1, 3], y = [None, 3], z = [1, 3])
assert actual.frame_equal(expected, null_equal = True), "drop_null with selection failed"
def test_fill():
"""Can fill"""
df = tp.Tibble({'chr': ['a', None], 'int': [1, None]})
actual = df.fill('chr', 'int')
expected = tp.Tibble({'chr': ['a', 'a'], 'int': [1, 1]})
assert actual.frame_equal(expected), "fill failed"
assert type(actual) == tp.Tibble, "fill didn't return a Tibble"
def test_filter():
"""Can filter multiple conditions"""
df = tp.Tibble({'x': range(10), 'y': range(10)})
actual = df.filter(col('x') <= 3, col('y') < 2)
expected = tp.Tibble({'x': range(2), 'y': range(2)})
assert actual.frame_equal(expected), "filter failed"
assert type(actual) == tp.Tibble, "filter didn't return a Tibble"
def test_filter_grouped():
df = tp.Tibble(x = range(3), y = ['a', 'a', 'b'])
actual = df.filter(col('x') <= col('x').mean(), by = 'y').arrange('y')
expected = tp.Tibble(x = [0, 2], y = ['a', 'b'])
assert actual.frame_equal(expected), "grouped filter failed"
assert type(actual) == tp.Tibble, "grouped filter didn't return a Tibble"
def test_full_join():
"""Can perform a full join"""
df1 = tp.Tibble(x = ['a', 'a', 'b'], y = range(3))
df2 = tp.Tibble(x = ['a'], z = range(1))
actual = df1.full_join(df2)
expected = tp.Tibble(x = ['a', 'a', 'b'], y = [0, 1, 2], z = [0, 0, None])
assert actual.frame_equal(expected, null_equal = True), "full_join failed"
assert type(actual) == tp.Tibble, "full_join didn't return a Tibble"
def test_inner_join():
"""Can perform a inner join"""
df1 = tp.Tibble(x = ['a', 'a', 'b'], y = range(3))
df2 = tp.Tibble(x = ['a'], z = range(1))
actual = df1.inner_join(df2)
expected = tp.Tibble(x = ['a', 'a'], y = [0, 1], z = [0, 0])
assert actual.frame_equal(expected), "inner_join failed"
assert type(actual) == tp.Tibble, "inner_join didn't return a Tibble"
def test_left_join():
"""Can perform a left join"""
df1 = tp.Tibble(x = ['a', 'a', 'b'], y = range(3))
df2 = tp.Tibble(x = ['a', 'b'], z = range(2))
actual = df1.left_join(df2)
expected = tp.Tibble(x = ['a', 'a', 'b'], y = range(3), z = [0, 0 ,1])
assert actual.frame_equal(expected), "left_join failed"
assert type(actual) == tp.Tibble, "left_join didn't return a Tibble"
def test_mutate():
"""Can edit existing columns and can add columns"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
actual = df.mutate(double_x = col('x') * 2,
y = col('y') + 10,
y_plus_3 = col('y') + 3)
expected = tp.Tibble(
x = _repeat(1, 3),
y = _repeat(12, 3),
double_x = _repeat(2, 3),
y_plus_3 = _repeat(15, 3)
)
assert actual.frame_equal(expected), "mutate failed"
assert type(actual) == tp.Tibble, "mutate didn't return a Tibble"
def test_mutate_across():
"""Can mutate multiple columns simultaneously"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
actual = df.mutate(tp.across(tp.Int64, lambda x: x * 2),
x_plus_y = col('x') + col('y'))
expected = tp.Tibble(
{'x': _repeat(2, 3),
'y': _repeat(4, 3),
'x_plus_y': _repeat(6, 3)}
)
assert actual.frame_equal(expected), "mutate across failed"
def test_mutate_constant():
"""Can add a constant value without tp.lit"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
actual = df.mutate(z = "z")
expected = tp.Tibble(
x = _repeat(1, 3),
y = _repeat(2, 3),
z = _repeat('z', 3)
)
assert actual.frame_equal(expected), "mutate failed"
def test_names():
"""Can get column names"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
assert df.names == ['x', 'y'], "names failed"
def test_ncol():
"""Can number of columns"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
assert df.ncol == 2, "ncol failed"
def test_nrow():
"""Can number of rows"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
assert df.nrow == 3, "nrow failed"
def test_pivot_longer1():
"Can pivot all (unspecified) cols to long"
df = tp.Tibble({'x': [1, 2], 'y': [3, 4]})
actual = df.pivot_longer()
expected = tp.Tibble({'name': ['x', 'x', 'y', 'y'], 'value': range(1, 5)})
assert actual.frame_equal(expected), "unspecified pivot_longer failed"
assert type(actual) == tp.Tibble, "pivot_longer didn't return a Tibble"
def test_pivot_longer2():
"""Can pivot all (specified) cols to long"""
df = tp.Tibble({'x': [1, 2], 'y': [3, 4]})
actual = df.pivot_longer(['x', 'y'])
expected = tp.Tibble({'name': ['x', 'x', 'y', 'y'], 'value': range(1, 5)})
assert actual.frame_equal(expected), "specified pivot_longer failed"
def test_pivot_wider1():
"""Can pivot all cols to wide"""
df = tp.Tibble({'label': ['x', 'y', 'z'], 'val': range(1, 4)})
actual = df.pivot_wider(names_from = 'label', values_from = 'val').select('x', 'y', 'z')
expected = tp.Tibble({'x': [1], 'y': [2], 'z': [3]})
assert actual.frame_equal(expected), "pivot_wider all cols failed"
assert type(actual) == tp.Tibble, "pivot_wider didn't return a Tibble"
def test_pivot_wider2():
"""Can pivot cols to wide with id col"""
df = tp.Tibble({'id': _repeat(1, 3), 'label': ['x', 'y', 'z'], 'val': range(1, 4)})
actual = df.pivot_wider(names_from = 'label', values_from = 'val').select('id', 'x', 'y', 'z')
expected = tp.Tibble({'id': [1], 'x': [1], 'y': [2], 'z': [3]})
assert actual.frame_equal(expected), "pivot_wider with id failed"
def test_pivot_wider3():
"""Can pivot cols to wide with values filled"""
df = tp.Tibble({'id': _repeat(1, 3), 'label': ['x', 'y', 'z'], 'val': range(1, 4)})
actual = (
df.pivot_wider(names_from = 'label', values_from = 'id', values_fill = 0)
.select('val', 'x', 'y', 'z').arrange('val')
)
expected = tp.Tibble({'val': [1, 2, 3], 'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]})
assert actual.frame_equal(expected), "pivot_wider with values filled failed"
def test_pivot_wider4():
"""Can pivot cols to wide with values filled - doesn't affect id col"""
df = tp.Tibble(id = [None, 2], var = ["x", "y"], val = [1, 2])
actual = (
df.pivot_wider(names_from = "var", values_from = "val", values_fill = 0)
.select('id', 'x', 'y')
.arrange('y')
)
expected = tp.Tibble({'id': [None, 2], 'x': [1, 0], 'y': [0, 2]})
assert actual.frame_equal(expected), "pivot_wider with values filled failed"
def test_print():
"""Printing doesn't alter class of df"""
df = tp.Tibble(x = range(3), y = range(3))
repr(df)
print(df)
assert isinstance(df, tp.Tibble), "Printing failed"
def test_pull():
"""Can use pull"""
df = tp.Tibble({'x': _repeat(1, 3), 'y': _repeat(2, 3)})
actual = df.pull('x')
expected = df.to_polars().get_column('x')
assert actual.series_equal(expected), "pull failed"
def test_relocate_before():
"""Can relocate before columns"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.relocate('y', 'z', before = 'x')
expected = df.select('y', 'z', 'x')
assert actual.frame_equal(expected), "relocate before failed"
assert type(actual) == tp.Tibble, "relocate didn't return a Tibble"
def test_relocate_after():
"""Can relocate after columns"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.relocate('z', 'y', after = 'x')
expected = df.select('x', 'z', 'y')
assert actual.frame_equal(expected), "relocate after failed"
def test_relocate_empty():
"""Can relocate to the beginning"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.relocate('z', 'y')
expected = df.select('z', 'y', 'x')
assert actual.frame_equal(expected), "relocate to the beginning failed"
def test_rename_dplyr_kwargs():
"""Can rename - dplyr interface (kwargs)"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.rename(new_x = 'x', new_y = 'y')
expected = tp.Tibble({'new_x': range(3), 'new_y': range(3), 'z': range(3)})
assert actual.frame_equal(expected), "dplyr rename failed"
assert type(actual) == tp.Tibble, "rename didn't return a Tibble"
def test_rename_dplyr_strings():
"""Can rename - dplyr interface (strings)"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.rename('new_x', 'x', 'new_y', 'y')
expected = tp.Tibble({'new_x': range(3), 'new_y': range(3), 'z': range(3)})
assert actual.frame_equal(expected), "dplyr rename failed"
def test_rename_pandas():
"""Can rename - pandas interface"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.rename({'x': 'new_x', 'y': 'new_y'})
expected = tp.Tibble({'new_x': range(3), 'new_y': range(3), 'z': range(3)})
assert actual.frame_equal(expected), "pandas rename failed"
def test_replace_null():
"""Can replace nulls"""
df = tp.Tibble(x = [0, None], y = [None, None])
actual = df.replace_null(dict(x = 1, y = 2))
expected = tp.Tibble(x = [0, 1], y = [2, 2])
assert actual.frame_equal(expected), "replace_null method failed"
assert type(actual) == tp.Tibble, "replace_null didn't return a Tibble"
def test_set_names():
"""Can set_names"""
df = tp.Tibble(x = range(3), y = range(3))
actual = df.set_names(['a', 'b'])
expected = tp.Tibble(a = range(3), b = range(3))
assert actual.frame_equal(expected), "set_names failed"
assert type(actual) == tp.Tibble, "set_names didn't return a Tibble"
def test_select():
"""Can select columns"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.select('x', 'z')
expected = df[['x', 'z']]
assert actual.frame_equal(expected), "select failed"
assert type(actual) == tp.Tibble, "select didn't return a Tibble"
def test_separate():
"""Can separate"""
df = tp.Tibble(x = ['a_a', 'b_b', 'c_c'])
actual = df.separate('x', into = ['left', 'right']).arrange('left')
expected = tp.Tibble(left = ['a', 'b', 'c'], right = ['a', 'b', 'c'])
assert actual.frame_equal(expected), "separate failed"
assert type(actual) == tp.Tibble, "separate didn't return a Tibble"
def test_slice():
"""Can slice"""
df = tp.Tibble({'x': range(3), 'y': ['a', 'a', 'b']})
actual = df.slice(0, 2)
expected = tp.Tibble({'x': [0, 2], 'y': ['a', 'b']})
assert actual.frame_equal(expected), "slice failed"
assert type(actual) == tp.Tibble, "slice didn't return a Tibble"
def test_slice_head():
"""Can slice_head"""
df = tp.Tibble({'x': range(3), 'y': ['a', 'a', 'b']})
actual = df.slice_head(2)
expected = tp.Tibble({'x': [0, 1], 'y': ['a', 'a']})
assert actual.frame_equal(expected), "slice_head failed"
assert type(actual) == tp.Tibble, "slice_head didn't return a Tibble"
def test_slice_tail():
"""Can slice_tail by group"""
df = tp.Tibble({'x': range(3), 'y': ['a', 'a', 'b']})
actual = df.slice_tail(2)
expected = tp.Tibble({'x': [1, 2], 'y': ['a', 'b']})
assert actual.frame_equal(expected), "slice_tail failed"
assert type(actual) == tp.Tibble, "slice_tail didn't return a Tibble"
def test_summarise():
"""Can use summarise alias"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.summarise(avg_x = col('x').mean())
expected = tp.Tibble({'avg_x': [1]})
assert actual.frame_equal(expected), "summarise failed"
def test_summarize():
"""Can use summarize"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
actual = df.summarize(avg_x = col('x').mean())
expected = tp.Tibble({'avg_x': [1]})
assert actual.frame_equal(expected), "ungrouped summarize failed"
assert type(actual) == tp.Tibble, "summarize didn't return a Tibble"
def test_summarize_grouped():
"""Can use summarize by group"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': ['a', 'a', 'b']})
actual = df.summarize(avg_x = col('x').mean(), by = 'z').arrange('z')
expected = tp.Tibble(z = ['a', 'b'], avg_x = [.5, 2])
assert actual.frame_equal(expected), "grouped summarize failed"
def test_summarize_across():
"""Can use summarize_across"""
df = tp.Tibble(x = range(3), y = range(3), z = range(3))
actual = df.summarize(tp.across(['x', 'y'], tp.max, names_prefix = "max_"),
avg_x = col('x').mean())
expected = tp.Tibble({'max_x': [2], 'max_y': [2], 'avg_x': [1]})
assert actual.frame_equal(expected), "ungrouped summarize across failed"
def test_to_dict():
"""Can convert to a dictionary"""
df = tp.Tibble({'x': range(3), 'y': range(3)})
assert type(df.to_dict()) == dict
def test_to_polars():
"""Can convert to a polars DataFrame"""
df = tp.Tibble({'x': range(3), 'y': range(3), 'z': range(3)})
assert isinstance(df.to_polars(), pl.DataFrame), "to_polars failed"
def test_unite():
"""Can unite columns"""
df = tp.Tibble(a = ["a", "a", "a"], b = ["b", "b", "b"], c = range(3))
actual = df.unite("new_col", ["a", "b"])
expected = tp.Tibble(new_col = ["a_b"] * 3, c = range(3))
assert actual.frame_equal(expected), "unite failed"
assert type(actual) == tp.Tibble, "unite didn't return a Tibble"
def test_funs_in_a_row():
"""Tests if shallow copy is working properly"""
df = tp.Tibble(x = range(3), y = range(3), z = range(3))
df.distinct()
df.drop('x')
df.drop_null()
df.filter(col('x') < 7)
df.head()
df.mutate(col('x') * 2)
df.relocate('y', before = 'x')
df.rename({'x': 'new_x'})
df.select('x', 'y')
df.slice(1)
df.slice_head()
df.slice_tail()
df.tail()
df.arrange('x', 'y')
assert True, "Functions in a row failed"
| markfairbanks/tidypolars | tests/test_tibble.py | test_tibble.py | py | 19,053 | python | en | code | 275 | github-code | 36 |
15131025008 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
dummy = cur = ListNode(0)
#l1, l2의 수를 비교해 작은 수 부터 연결
while l1 and l2:
if l1.val >= l2.val:
cur.next = ListNode(l2.val)
l2 = l2.next
else:
cur.next = ListNode(l1.val)
l1 = l1.next
cur = cur.next
#남은 수 연결
while l1:
cur.next = ListNode(l1.val)
l1 = l1.next
cur = cur.next
while l2:
cur.next = ListNode(l2.val)
l2 = l2.next
cur = cur.next
return dummy.next | EnteLee/practice_algorithm | leetcode/021_merge_two_sorted_lists/merge_two_sorted_lists_khy.py | merge_two_sorted_lists_khy.py | py | 910 | python | en | code | 0 | github-code | 36 |
16039010135 | import csv
import numpy as np
from random import sample
class Data(object):
"""
Abstraction for the training data for classification
"""
def __init__(self, classes, input_dimension):
"""
Initialize the Data object.
Arguments:
`classes` : a array of all possible classes in the data
`input_dimension`: a integer indicating the number of input dimensions
of training a classification NN
"""
self.num_classes = len(classes)
self.class_dict = classes
self.input_dimension = input_dimension
def read_n_process(self, filename):
"""
Read a .csv file and sets two parameters
1. `self.samples` : a (n x k) array where n is the
number of data points and k is the
dimension of input data points
2. `self.labels` : a (n x 1) array of one-hot encoding vector
for the label of each data point
3. `self.d` : a (n x m) array where n is the
number of data points and m is the
number of categories in a row of
the csv file.
Each row is categorized in the following way:
[sepal-length, sepal-width, petal-length, petal-width, class]
where `class` is a one-hot encoding vector of the three categories:
1. Iris Setosa
2. Iris Versicolour
3. Iris Virginica
Ex) [1, 0, 0] is the encoding for Iris Setosa
NOTE: all elements in self.d are type string
but elements in input are converted to float when possible
Arguments:
`filename` : string
"""
data = []
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
row = line
data.append(row)
self.d = np.array(data)
self.num_samples = self.d.shape[0]
self.samples_per_class = self.num_samples // self.num_classes
# Pool of all available lookup indices for every class
self.index_pool = set(range(self.samples_per_class))
# Input is the first n column of the data
self.samples = np.array(self.d[:, :self.input_dimension], dtype=np.float)
# The last column is the label
self.labels = np.array([self.one_hot_encode(s) \
for s in self.d[:, self.input_dimension:]])
def one_hot_encode(self, s):
"""
Given the string of a class, return the one-hot-encoding
of that class
"""
v = [0] * self.num_classes
for index, class_name in enumerate(self.class_dict):
if s == class_name:
v[index] = 1
return v
def encoding_to_class(self, v):
"""
Return the class associated with the one-hot encoding vector
"""
index = np.argmax(v)
return self.class_dict[index]
def split_samples(self, training_size, validation_size):
"""
Randomly split the samples into training and testing sample sets.
Not overlap between training, validating and testing set.
Assume sample pools to have uniform distribution of sample across
all classes.
Arguments:
`training_size` : number of training samples per class
`validation_size` : number of samples used for validation
(NOTE: the remaining samples are for testing)
"""
assert training_size + validation_size <= self.samples_per_class, \
"Training size + validiont size cannot exceed total number of samples"
training_indices = sample(self.index_pool, training_size)
nontraining_indices = self.index_pool.difference(set(training_indices))
validation_indices = list((sample(nontraining_indices, validation_size)))
testing_indices = list(nontraining_indices.difference(validation_indices))
self.training_samples, self.training_labels = \
self.select_samples_n_labels_from_classes(training_indices, self.samples, self.labels)
self.validation_samples, self.validation_labels = \
self.select_samples_n_labels_from_classes(validation_indices, self.samples, self.labels)
self.testing_samples, self.testing_labels = \
self.select_samples_n_labels_from_classes(testing_indices, self.samples, self.labels)
# A test set with str labels. For printing purpose
self.labeled_testing_set = self.select_raw_data_with_labels(testing_indices)
def resample(self, samples_per_class, sample_pool, label_pool):
"""
Randomly select a batch of sample from the pool of data
with equal distribution across all classes.
Assuming data has equal distribution of samples across all classes
and that every element from the same class is neighboring each other in the pool
Example:
[ClassA1, ClassA2, ..., ClassB1, ClassB2, ..., ClassC1, ClassC2, ...]
Arguments:
`sample_per_class` : (NOTE should be a multiple of the number of classes)
`sample_pool` : a (n x k) array of samples to randomly select from
`label_pool` : a (n x k) array of sample labels
"""
index_pool = range(sample_pool.shape[0] // self.num_classes)
batch_indices = sample(index_pool, samples_per_class)
return self.select_samples_n_labels_from_classes(batch_indices, sample_pool, label_pool)
def select_samples_n_labels_from_classes(self, indices, sample_pool, label_pool=None):
"""
Select the element from each classes in the sample_pool at each index
Assume equal distribution of sample across all classes in the sample_pool
and that every element from the same class is neighboring each other in the pool
Example:
[ClassA1, ClassA2, ..., ClassB1, ClassB2, ..., ClassC1, ClassC2, ...]
Arguments:
`indices` : an array of indices of the sample pool
`sample_pool` : an array of samples
`label_pool` : an array of sample labels (must be same size as `sample_pool`)
"""
samples_per_class = sample_pool.shape[0] // self.num_classes
assert len(indices) <= samples_per_class, \
"Number of selected samples per class cannot exceed total number of samples per class"
sample_batch = np.array([])
label_batch = np.array([])
for i in range(len(indices)):
for j in range(self.num_classes):
sample_index = indices[i] + j * samples_per_class
sample_batch = np.vstack((sample_batch, sample_pool[sample_index])) \
if sample_batch.size else sample_pool[sample_index]
label_batch = np.vstack((label_batch, label_pool[sample_index])) \
if label_batch.size else label_pool[sample_index]
return (sample_batch, label_batch)
def select_raw_data_with_labels(self, indices):
"""
Select the element from the raw input set (self.d)
at each index
"""
samples_per_class = self.d.shape[0] // self.num_classes
data_batch = np.array([])
for i in range(len(indices)):
for j in range(self.num_classes):
data_index = indices[i] + j * samples_per_class
data_batch = np.vstack((data_batch, self.d[data_index])) \
if data_batch.size else self.d[data_index]
return data_batch | laserprec/Iris-Flower-Classifier | datapipe.py | datapipe.py | py | 7,882 | python | en | code | 0 | github-code | 36 |
23708455692 | #!/usr/bin/env python
from __future__ import print_function
import sys
import math
import re
import string
from operator import itemgetter
from ConfigBundle import *
from urlparse import *
import simplejson
## @package Document
# Provides operations on pieces of text/documents
#import global variable defined in ConfigBundle module
global applicationConfig
## This class provides an object that unifies all extra data needed for document parsing
#
# This object contains any punctuation marks that will be stripped from articles, along
# with any common words that should be prevented from appearing as keywords (due to their frequency)
class DocumentParsingSettings:
## Initializes a new object and loads the data from the supplied files
# @param punctuationMarksFilename The file to load the punctuation marks from
# @param ignoredWordsFilename The file to load the words to ignore from
# @param minLength Minimum acceptable length for a word
def __init__(self, punctuationMarksFilename, ignoredWordsFilename, minLength):
## @var minLength
# Minimum length for a word to take it into account
self.minLength = minLength
## @var punctuationMarks
# Punctuation marks to strip from content
self.punctuationMarks = []
## @var ignoredWords
# Words to explicitly not take into account
self.ignoredWords = []
#if additional debug output is requested, print the filenames
if applicationConfig.debugOutput is True:
print("Will load default parsing settings from {0}, {1}".format(punctuationMarksFilename, ignoredWordsFilename))
#read the data from the files supplied
try:
defaultWordsTxt = Document.FromFile(ignoredWordsFilename)
self.ignoredWords = defaultWordsTxt.text.split()
defaultPunctuationMarks = Document.FromFile(punctuationMarksFilename)
self.punctuationMarks = defaultPunctuationMarks.text.split()
#things that can go wrong here are filesystem-related (very rarely device-related)
except IOError as exc:
print("Error reading from file: {0}".format(exc.strerror))
raise
#if additional debug output is requested, print the punctuation marks and the wods to ignore
if applicationConfig.debugOutput is True:
print("The following words will not be counted:", end=" ")
for word in self.ignoredWords:
print(word, end=" ")
print("\nThe following punctuation marks will be stripped:", end=" ")
for punctuationMark in self.punctuationMarks:
print(punctuationMark, end=" ")
print("\n")
## Common text(file) related operations
#
# This implements text file loading along with TF metrics.
class Document:
## Initialize a new Document
# @param text The text
def __init__(self, text):
## @var text
# The loaded text
self.text = text
## @var words
# The text splitted in words
self.words = []
## @var tf
# Frequency of appearance for each word
self.tf = []
## Method for loading text from a file
# @param filename The name of the file to load text from
@staticmethod
def FromFile(filename):
text = ""
try:
file = open(filename, "rU")
for line in file:
text += line
except IOError as exc:
print("Error reading from file: ", exc.strerror)
raise
finally:
file.close()
#after we have successfully loaded text from a file, create a new Document object
newDoc = Document(text)
return newDoc
## Calculate term-frequency table
# @param parsingSettings Parse the document with these settings
# @param returnPart Boolean variable indicating wether to return a part of the calculated table or the complete table
# @param termsWanted If a partial table is requested, size of the table
def CalculateTF(self, parsingSettings, returnPart, termsWanted):
rawWords = self.text.lower().split() #spit the text into words
frequencies = {}
for word in rawWords:
for punctuationMark in parsingSettings.punctuationMarks: #strip characters
if punctuationMark in word:
word = word.replace(punctuationMark, "")
if word not in parsingSettings.ignoredWords and len(word) >= parsingSettings.minLength: #check if in (common) words to ignore
self.words.append(word)
if word in frequencies:
frequencies[word] += 1 #increment occurences
else:
frequencies[word] = 1 #add to dictionary
#convert the word array & the frequency dict in an array of arrays
for word, count in frequencies.items():
tmp = [word, count]
self.tf.append(tmp)
sortedTF = sorted(self.tf, key=lambda tup: tup[1], reverse=True)
if returnPart is True: #if a partial table is requested
return sortedTF[:termsWanted]
else:
return sortedTF
#nothing to run directly from here
if __name__ == "__main__":
print("Nothing to run here! Execute the main application file instead!")
| achalatsis/News-Spread-Analyzer | Document.py | Document.py | py | 5,540 | python | en | code | 0 | github-code | 36 |
23988137844 | from fastapi import APIRouter
import traceback
from .nl_to_sql_utils import get_similar_severities, get_most_relevant_severity, get_sql_query
from .nl_to_sql_prompts import tables_info
from pydantic import BaseModel
router = APIRouter()
class NLtoSQL(BaseModel):
"""Request body for streaming."""
query: str
@router.post("/nl_to_sql")
async def nl_to_sql(body: NLtoSQL):
"""
Args:
query (str): user query for which we want to find attack techniques
Returns:
json object with following fields
query(str),
most_relevant_severity(str),
sql_query(str)
"""
try:
query = body.query
similar_severities = get_similar_severities(query)
inputs = {
"user_query":query,
"severities":similar_severities
}
most_relevant_severity = await get_most_relevant_severity(inputs)
inputs = {
"tables_info":tables_info,
"severity_value":most_relevant_severity
}
sql_query = await get_sql_query(inputs)
return {
"query": query,
"most_relevant_severity": most_relevant_severity,
"sql_query": sql_query
}
except Exception as e:
traceback.print_exc() | yadneshSalvi/cybersec_genai | src/nl_to_sql/nl_to_sql_routes.py | nl_to_sql_routes.py | py | 1,288 | python | en | code | 0 | github-code | 36 |
14934819007 | import pytest
from dbt.tests.adapter.utils.data_types.test_type_bigint import BaseTypeBigInt
from dbt.tests.adapter.utils.data_types.test_type_bigint import (
models__actual_sql as bigint_model,
)
from dbt.tests.adapter.utils.data_types.test_type_bigint import (
models__expected_sql as bigint_expected,
)
from dbt.tests.adapter.utils.data_types.test_type_boolean import (
BaseTypeBoolean,
)
from dbt.tests.adapter.utils.data_types.test_type_boolean import (
models__actual_sql as bool_model,
)
from dbt.tests.adapter.utils.data_types.test_type_float import BaseTypeFloat
from dbt.tests.adapter.utils.data_types.test_type_float import (
models__actual_sql as float_model,
)
from dbt.tests.adapter.utils.data_types.test_type_int import BaseTypeInt
from dbt.tests.adapter.utils.data_types.test_type_int import (
models__actual_sql as int_model,
)
from dbt.tests.adapter.utils.data_types.test_type_numeric import (
BaseTypeNumeric,
)
from dbt.tests.adapter.utils.data_types.test_type_numeric import (
models__actual_sql as num_model,
)
from dbt.tests.adapter.utils.data_types.test_type_string import BaseTypeString
from dbt.tests.adapter.utils.data_types.test_type_string import (
models__actual_sql as string_model,
)
from dbt.tests.adapter.utils.data_types.test_type_timestamp import (
BaseTypeTimestamp,
)
from dbt.tests.adapter.utils.data_types.test_type_timestamp import (
models__actual_sql as ts_model,
)
from firebolt import __version__ as sdk_version
schema_actual_table_yml = """
version: 2
models:
- name: actual
config:
materialized: table
"""
schema_expected_table_yml = """
version: 2
models:
- name: expected
config:
materialized: table
"""
class TestTypeBigInt(BaseTypeBigInt):
@pytest.fixture(scope='class')
def models(self):
return {
'expected.yml': schema_expected_table_yml,
'expected.sql': bigint_expected,
'actual.yml': schema_actual_table_yml,
'actual.sql': self.interpolate_macro_namespace(bigint_model, 'type_bigint'),
}
class TestTypeFloat(BaseTypeFloat):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(float_model, 'type_float'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeInt(BaseTypeInt):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(int_model, 'type_int'),
'actual.yml': schema_actual_table_yml,
}
@pytest.mark.skipif(
sdk_version <= '0.15.0', reason='Decimal type implemented in firebolt-sdk>0.15.0'
)
class TestTypeNumeric(BaseTypeNumeric):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(num_model, 'type_numeric'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeString(BaseTypeString):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(string_model, 'type_string'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeTimestamp(BaseTypeTimestamp):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(ts_model, 'type_timestamp'),
'actual.yml': schema_actual_table_yml,
}
@pytest.mark.skip('True boolean is feature-flagged')
class TestTypeBoolean(BaseTypeBoolean):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(bool_model, 'type_boolean'),
'actual.yml': schema_actual_table_yml,
}
| firebolt-db/dbt-firebolt | tests/functional/adapter/utils/test_data_types.py | test_data_types.py | py | 3,823 | python | en | code | 26 | github-code | 36 |
31008233432 | # Given a string of words, you need to find the highest scoring word.
# Each letter of a word scores points according to its position in the alphabet: a = 1, b = 2, c = 3 etc.
# For example, the score of abad is 8 (1 + 2 + 1 + 4).
# You need to return the highest scoring word as a string.
# If two words score the same, return the word that appears earliest in the original string.
# All letters will be lowercase and all inputs will be valid.
def high(x):
# CAN BE SUBSTITUTED BY USING ORD()-96
# alpha = "abcdefghijklmnopqrstuvwxyz"
# count = 1
# d = {} #Empty dictionary to add values into
# for i in alpha:
# d[i] = count
# count+=1
score = 0
highest_score = 0
winning_word = ''
for word in x.split():
for letter in word:
score += (ord(letter)-96)
if score > highest_score:
highest_score = score
winning_word = word
score = 0
return winning_word
print(high('man i need a taxi up to ubud'))
print(high('what time are we climbing up the volcano'))
print(high('take me to semynak'))
print(high('aa b'))
print(high('b aa'))
print(high('bb d'))
print(high('d bb'))
print(high("aaa b"))
# def high(x):
# return max(x.split(), key=lambda k: sum(ord(c) - 96 for c in k))
# def high(x):
# words=x.split(' ')
# list = []
# for i in words:
# scores = [sum([ord(char) - 96 for char in i])]
# list.append(scores)
# return words[list.index(max(list))]
# def high(x):
# highest_score = 0
# for word in x.split(' '):
# score = sum(ord(c)-96 for c in word)
# if score > highest_score:
# highest_score = score
# highest_word = word
# return highest_word | jschoellkopf/My-Deployed-Code | code_wars/highest_scoring_word.py | highest_scoring_word.py | py | 1,769 | python | en | code | 0 | github-code | 36 |
15733574091 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
KERAS = 4
PYTORCH = 5
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
if str == "float16":
return mace_pb2.DT_FLOAT16
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
elif str == 'fp16_fp16':
return mace_pb2.DT_FLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_input_data_types(conf, input_count):
default_input_dt = conf[ModelKeys.data_type]
if default_input_dt == mace_pb2.DT_HALF:
default_input_dt = mace_pb2.DT_FLOAT # Compatible with old version
conf_input_dts = to_list(conf.get(ModelKeys.input_data_types, []))
if len(conf_input_dts) == 0:
input_data_types = [default_input_dt]
else:
input_data_types = [parse_data_type(dt) for dt in conf_input_dts]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
def normalize_output_data_types(conf, output_count):
default_output_dt = conf[ModelKeys.data_type]
if default_output_dt == mace_pb2.DT_HALF:
default_output_dt = mace_pb2.DT_FLOAT # Compatible with old version
conf_output_dts = to_list(conf.get(ModelKeys.output_data_types, []))
if len(conf_output_dts) == 0:
output_data_types = [default_output_dt]
else:
output_data_types = [parse_data_type(dt) for dt in conf_output_dts]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
normalize_input_data_types(conf, input_count)
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
normalize_output_data_types(conf, output_count)
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf | SheepHuan/CoDL-Mace | codl-mobile/tools/python/utils/config_parser.py | config_parser.py | py | 10,751 | python | en | code | 0 | github-code | 36 |
14838409343 | import json
import random
import re
import pubchempy as pcp
from csv_to_json import formulate_code
# -------- \begin constants ----------------------
atoms_list = [" Hydrogen ",
" Helium ",
" Lithium ",
" Beryllium ",
" Boron ",
" Carbon ",
" Nitrogen ",
" Oxygen ",
" Fluorine ",
" Neon ",
" Sodium ",
" Magnesium ",
" Aluminium ",
" Silicon ",
" Phosphorus ",
" Sulfur ",
" Chlorine ",
" Argon ",
" Potassium ",
" Calcium ",
" Scandium ",
" Titanium ",
" Vanadium ",
" Chromium ",
" Manganese ",
" Iron ",
" Cobalt ",
" Nickel ",
" Copper ",
" Zinc ",
" Gallium ",
" Germanium ",
" Arsenic ",
" Selenium ",
" Bromine ",
" Krypton ",
" Rubidium ",
" Strontium ",
" Yttrium ",
" Zirconium ",
" Niobium ",
" Molybdenum ",
" Technetium ",
" Ruthenium ",
" Rhodium ",
" Palladium ",
" Silver ",
" Cadmium ",
" Indium ",
" Tin ",
" Antimony ",
" Tellurium ",
" Iodine ",
" Xenon ",
" Cesium ",
" Barium ",
" Lanthanum ",
" Cerium ",
" Praseodymium ",
" Neodymium ",
" Promethium ",
" Samarium ",
" Europium ",
" Gadolinium ",
" Terbium ",
" Dysprosium ",
" Holmium ",
" Erbium ",
" Thulium ",
" Ytterbium ",
" Lutetium ",
" Hafnium ",
" Tantalum ",
" Tungsten ",
" Rhenium ",
" Osmium ",
" Iridium ",
" Platinum ",
" Gold ",
" Mercury ",
" Thallium ",
" Lead ",
" Bismuth ",
" Polonium ",
" Astatine ",
" Radon ",
" Francium ",
" Radium ",
" Actinium ",
" Thorium ",
" Protactinium ",
" Uranium ",
" Neptunium ",
" Plutonium ",
" Americium ",
" Curium ",
" Berkelium ",
" Californium ",
" Einsteinium ",
" Fermium ",
" Mendelevium ",
" Nobelium ",
" Lawrencium ",
" Rutherfordium ",
" Dubnium ",
" Seaborgium ",
" Bohrium ",
" Hassium ",
" Meitnerium ",
" Darmstadtium ",
" Roentgenium ",
" Copernicium ",
" Nihonium ",
" Flerovium ",
" Moscovium ",
" Livermorium ",
" Tennessine ",
" Oganesson ",
]
not_a_compound_list = ["the", "was", "and", "get", "of", "in", "as", "an"]
# -------- \end constants ----------------------
# -------- \begin question types ----------------------
def if_the_temprature_passes(row):
if re.search("If the temperature passes", row["contexts"]) is None:
return None, None, None, None
else:
additional_info = re.search(
"If the temperature passes \d+ degrees when heating .* for more than \d+ seconds .*", row["contexts"])
temp_threshold_str = re.findall("\d+ degrees", additional_info.group())
heat_duration_str = re.findall("\d+ seconds", additional_info.group())
loss_str = re.findall("loss of (\D*\d+\.?\d* [grams|milligrams|\%]+)", additional_info.group())
name = re.findall(" when heating (.*) for more", additional_info.group())
return temp_threshold_str, loss_str, heat_duration_str, name
def We_discovered_that_if_the_amount_of(row):
if re.search("We discovered that if the amount of", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"We discovered that if the amount of .*", row["contexts"])
quantity_threshold = re.findall("above (\D*\d+\.?\d* [grams|milligrams|\%]+)", additional_info.group())
threshold_comp_name = re.findall("the amount of (.*) in", additional_info.group())
temp_threshold_str = re.findall("the temperature is less than (\D*\d+\.?\d* degrees)", additional_info.group())
loss_str = re.findall("the product of the process decreases by (\d+\%)", additional_info.group())
return temp_threshold_str, loss_str, quantity_threshold, threshold_comp_name
def overheat_the(row):
if re.search("Overheat the .*", row["contexts"]) is None:
return None, None, None
else:
additional_info = re.search(
"Overheat the .*", row["contexts"])
temp_threshold_str = re.findall("the temperature is above (\D*\d+\.?\d* degrees)", additional_info.group())
decrease_ratio_str = re.findall("for each (\d+\.\d+ [second|hour]+)", additional_info.group())
loss_str = re.findall("a loss of (\D*\d+\.?\d* [grams|milligrams|ml|ML\%]+)", additional_info.group())
product_name = re.findall("Overheat the (.*) will result", additional_info.group())
return temp_threshold_str, loss_str, decrease_ratio_str, product_name
def if_we_heat(row):
if re.search("If we heat .*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If we heat .*", row["contexts"])
temp_threshold_str = re.findall("to temperature higher than (\D*\d+\.?\d* degrees)", additional_info.group())
loss_str = re.findall("at a rate of (\D*\d+\.?\d* [grams|milligrams|milliliters|\%]+) per minute.",
additional_info.group())
name = re.findall("If we heat (.*) to tem", additional_info.group())
return temp_threshold_str, loss_str, name
def stirring_the_mixture_longer(row):
if re.search("stirring the mixture longer.*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"stirring the mixture longer.*", row["contexts"])
loss_str = re.findall("will cause a loss of (\D*\d+\.?\d* [grams|milligrams|milliliters|\%]+)",
additional_info.group())
decrease_ratio_str = re.findall("for each (minute|hour) above the original time",
additional_info.group())
name = ["the mixture"]
return loss_str, decrease_ratio_str, name
def if_the_temperature_exceed(row):
if re.search(" If the temperature exceed .*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If the temperature exceed .*", row["contexts"])
temp_threshold_str = re.findall("If the temperature exceed (\D*\d+\.?\d* degrees)",
additional_info.group())
decrease_ratio_str = re.findall("it will result in (\d+\% decrease) in the final products",
additional_info.group())
name = re.findall("when heating (.*) it will result", additional_info.group())
return temp_threshold_str, decrease_ratio_str, name
def if_we_cool_the_mixture(row):
if re.search("If we cool the mixture.*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If we cool the mixture.*", row["contexts"])
temp_threshold_str = re.findall("below (\D*\d+\.?\d* degrees)",
additional_info.group())
decrease_ratio_str = re.findall("the product of the process decreases by (\d+\%)",
additional_info.group())
name = ["the mixture"]
return temp_threshold_str, decrease_ratio_str, name
# -------- \end question types ----------------------
# --------- \begin function for generating the question -------
def randomize_product_amount(vars_and_vals):
weight_units = ['gr', 'mg', 'kg']
volume_units = ['mL', 'L']
product_amount = vars_and_vals[1]
amount_unit_split = product_amount.split()
amount_unit_split[0] = float(amount_unit_split[0]) * (random.uniform(0, 2))
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in weight_units else \
amount_unit_split[1]
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in volume_units else \
amount_unit_split[1]
return "{:.2f}".format(amount_unit_split[0]) + " " + amount_unit_split[1]
def generate_reactors(components):
available_reactors = ""
reactors_list = []
for comp in components:
if comp[0].startswith("("):
comp[0] = comp[0][1:]
if random.random() < 0.2: # w.p. 20% change the units
weight_units = ['gr', 'mg', 'kg']
volume_units = ['mL', 'L']
amount_unit_split = comp[1].split()
if amount_unit_split[1] == 'g':
amount_unit_split[1] = 'gr'
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in weight_units else \
amount_unit_split[1]
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in volume_units else \
amount_unit_split[1]
amount_unit_split[0] = str(float(amount_unit_split[0]) * (random.uniform(0, 2)))
available_reactors += amount_unit_split[0] + " " + amount_unit_split[1] + " of " + comp[0] + ", "
reactors_list.append([amount_unit_split[0], amount_unit_split[1]])
else:
available_reactors += comp[1] + " of " + comp[0] + ", "
reactors_list.append([comp[1], comp[0]])
return available_reactors, reactors_list
def get_vars_from_question(q_vars):
variables = q_vars.split(", ")
code_vars = "["
for var in variables[:-1]:
amount_name = var.split(" of ")
amount_name[1] = amount_name[1].replace("'", "")
amount_name[0] = amount_name[0].replace("'", "")
code_vars += f"( ' {amount_name[1]} ' , ' {amount_name[0]} ' ) ,"
return code_vars + "]"
def get_reactors_and_output(row):
context = row["contexts"]
check2switch = "\(\d+\.?\d* mmol, \d+\.?\d* mg\)|\(\d+\.?\d* mmol, \d+\.?\d* g\)\(\d+\.?\d* mmol, \d+\.?\d* mL\)\(\d+\.?\d* mmol, \d+\.?\d* gr\)\(\d+\.?\d* mmol, \d+\.?\d* ml\)"
while re.search(check2switch, context) is not None:
cc = re.search(check2switch, context)
split_to_reorder = re.split("\(|,|\)", cc.group())
context = context.replace(cc.group(), "( " + split_to_reorder[2] + ", " + split_to_reorder[1] + " )")
split_by_sentence = context.split(". ")
if len(split_by_sentence[-1]) == 0:
dropped_additional = context.replace(split_by_sentence[-2], '')
# dropped_additional = dropped_additional.replace(split_by_sentence[-3], '')
start_search = -3
else:
dropped_additional = context.replace(split_by_sentence[-1], '')
# dropped_additional = dropped_additional.replace(split_by_sentence[-2], '')
start_search = -2
succeed = False
while not succeed:
try:
product_amount = re.search("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)",
split_by_sentence[start_search]).group()
except:
if start_search < -20:
return None, None
start_search -= 1
continue
succeed = True
vars_and_vals_list = re.split("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)", dropped_additional)
vars_and_vals_list = [i for i in vars_and_vals_list if i is not None and i is not '']
for item in vars_and_vals_list:
if re.search("[a-z]+", item) is None:
vars_and_vals_list.remove(item)
vars_and_vals = []
for i in range(len(vars_and_vals_list) // 2):
if re.search("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)",
vars_and_vals_list[2 * i]) is not None:
prev_sentence = vars_and_vals_list[2 * i + 1].split()
vars_and_vals.append(
[prev_sentence[0] if len(prev_sentence) == 1 else (
prev_sentence[1][1:] if prev_sentence[1].startswith("(") else prev_sentence[1]),
vars_and_vals_list[2 * i]])
else:
idx = -1
comp_name = ""
prev_parts = vars_and_vals_list[2 * i].split()
while re.search("[a-z|0-9]+", comp_name) is None:
comp_name += prev_parts[idx]
idx -= 1
vars_and_vals.append(
[comp_name[1:] if comp_name.startswith("(") else comp_name, vars_and_vals_list[2 * i + 1]])
return vars_and_vals, product_amount
def get_time_from_question(question):
return re.findall(", for (.*),", question)[0]
def generate_duration(row, desired_output):
# function_lists = [if_the_temprature_passes, We_discovered_that_if_the_amount_of, overheat_the, if_we_heat,
# stirring_the_mixture_longer, if_the_temperature_exceed, if_we_cool_the_mixture]
generated_temp, temp_threshold_str, loss_str, generated_duration, heat_duration_str, name = None, None, None, None, None, None
unit = None
if None not in if_the_temprature_passes(row):
temp_threshold_str, loss_str, heat_duration_str, name = if_the_temprature_passes(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = float(re.findall("(\d+)", heat_duration_str[0])[0]) + random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in We_discovered_that_if_the_amount_of(row):
temp_threshold_str, loss_str, quantity_threshold, threshold_comp_name = We_discovered_that_if_the_amount_of(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
generate_quantity = float(quantity_threshold[0].split()[0]) + random.uniform(
float(quantity_threshold[0].split()[0]), 10)
unit = random.choice(["minutes", "hours"])
question = f"if the {threshold_comp_name[0]} was over {generate_quantity}, we cool the mixture to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in overheat_the(row):
temp_threshold_str, loss_str, decrease_ratio_str, product_name = overheat_the(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat the {product_name[0]} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_we_heat(row):
temp_threshold_str, loss_str, name = if_we_heat(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat the {name[0]} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in stirring_the_mixture_longer(row):
loss_str, _, name = stirring_the_mixture_longer(row)
name = name[0]
generated_temp = random.randint(-100, 100)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
question = f"if we heat the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_the_temperature_exceed(row):
temp_threshold_str, loss_str, name = if_the_temperature_exceed(row)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
question = f"if we heat the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_we_cool_the_mixture(row):
temp_threshold_str, loss_str, name = if_we_cool_the_mixture(row)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-50, 50)
question = f"if we cool the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
return generated_temp, temp_threshold_str, loss_str, str(
generated_duration) + " " + unit, heat_duration_str, name, question
# --------- \end function for generating the question -------
def generate_question_type5(row):
vars_and_vals = get_reactors_and_output(row)
question = "how many moles of the product does the process yield ?"
try:
if vars_and_vals[0][-1][0] in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds(vars_and_vals[0][-1][0], 'name')[0].exact_mass
except:
return "", ""
code = f"molar_mass = pcp.get_compounds( \"{vars_and_vals[0][-1][0]}\", 'name')[0].exact_mass [EOL]" \
f"molar_mass = float ( molar_mass ) [EOL]" \
f"yielded_grams = to_gr(\" {vars_and_vals[0][-1][1]} \") [EOL]" \
f"return yielded_grams / molar_mass [EOL]"
return question, code
def generate_question_type6(row): # debugged
vars_and_vals = get_reactors_and_output(row)
product_quantity = vars_and_vals[0][-1]
desired_output = randomize_product_amount(vars_and_vals)
try:
reactor_chosen = random.choice(vars_and_vals[0][:-1])
except:
return "", ""
reactor_name = reactor_chosen[0]
reactor_weight = reactor_chosen[1]
try:
if reactor_name in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds({reactor_name}, 'name')[0].exact_mass
except:
return "", ""
question = f"how many moles of {reactor_name} do we need to get {desired_output} of the product ?"
code = f"desired_product = to_gr( ' {desired_output} ' ) [EOL]" \
f"product_described = to_gr( ' {product_quantity[1]} ' ) [EOL]" \
f"described_component = to_gr( ' {reactor_weight} ') [EOL]" \
f"needed_reactor = desired_product / product_described * described_component [EOL]" \
f"reactor_molar_weight = pcp.get_compounds( \"{reactor_name}\" , 'name')[0].exact_mass [EOL]" \
f"return ( needed_reactor / float( reactor_molar_weight ) ) [EOL]"
return question, code
def generate_question_type7(row): # debugged
vars_and_vals = get_reactors_and_output(row)
chosen_atom = random.choice(atoms_list).strip()
compound_name = vars_and_vals[0][-1][0].replace('.', '')
try:
if compound_name in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds(compound_name, 'name')[0].elements
except:
return "", ""
print("detected compound : ", compound_name)
question = f"Is {chosen_atom} present in the product ?"
code = f"chosen_atom = pcp.get_compounds( \" {chosen_atom} \" , 'name')[0].molecular_formula [EOL]" \
f"product_elements = pcp.get_compounds( \"{compound_name}\" , 'name')[0].elements [EOL]" \
f"return chosen_atom in product_elements [EOL]"
return question, code
def generate_question_type1(row): # debugged
vars_and_vals = get_reactors_and_output(row)
desired_output = randomize_product_amount(vars_and_vals)
question_1 = f"how much do we need from each of the reactors to get {desired_output} of the final product ?" # TODO V2 : add an environmental condtion
code_1 = f"desired_product = to_gr( \" {desired_output} \" )[EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"product_described = to_gr( \" {vars_and_vals[1]} \" )[EOL]" \
f"portions_needed = ( desired_product ) /100 [EOL]" \
f"needed_reactors = [[reactor [ 0 ] , to_gr( reactor [ 1 ] ) * portions_needed] for reactor in components] [EOL]" \
f"return needed_reactors [EOL]"
code_1 = formulate_code(code_1)
return question_1, code_1
def generate_question_type2(row): # debugged
vars_and_vals = get_reactors_and_output(row)
q_vars, q_list = generate_reactors(vars_and_vals[0][:-1])
q_list = [[q[1], q[0]] for q in q_list]
if len(vars_and_vals[0][:-1]) < 1:
return "", ""
question2 = f"we have {q_vars}, how can we optimize the process?" # TODO V2 : add an environmental conditions
code_2 = f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"have_components = {q_list} [EOL]" \
f"min_portion = float( 'inf' ) [EOL]" \
f"for component, needed in zip ( components , have_components ) : [EOL]" \
f"[TAB]portions = to_gr( component [ 1 ] ) / to_gr( needed [ 1 ] ) [EOL]" \
"[TAB]if portions < min_portion : [EOL]" \
"[TAB][TAB]min_portion = portions [EOL]" \
"optimized = [] [EOL]" \
"for need, have in zip ( components , have_components ) : [EOL]" \
"[TAB]optimized.append( [ have[0] , to_gr( have [1] ) - to_gr ( need [1] ) * min_portion ] ) [EOL]" \
"return optimized [EOL]"
code_2 = formulate_code(code_2)
return question2, code_2
def generate_question_type3(row): # debugged
vars_and_vals = get_reactors_and_output(row)
q_vars, q_list = generate_reactors(vars_and_vals[0][:-1])
question_3 = f"we have {q_vars} how much can we create of the final product?" # TODO V2 : add an environmental conditions
code_3 = f"available_reactors = {get_vars_from_question(q_vars)} [EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"product_described = to_gr( \" {vars_and_vals[1]} \" ) [EOL]" \
f"minimal_product_portion = float( 'inf' ) [EOL]" \
f"for needed, have in zip ( components , available_reactors ): [EOL]" \
f"[TAB]tmp_min_portion = to_gr( have [ 1 ] ) / to_gr( needed[1] ) [EOL]" \
f"[TAB]if tmp_min_portion < minimal_product_portion : [EOL]" \
f"[TAB][TAB]minimal_product_portion = tmp_min_portion [EOL]" \
f"return minimal_product_portion * product_described [EOL]"
code_3 = formulate_code(code_3)
return question_3, code_3
def generate_question_type4(row): # CONTINUE HEREEEE
vars_and_vals = get_reactors_and_output(row)
desired_output = randomize_product_amount(vars_and_vals)
generated_temp, temp_threshold_str, loss_str, generated_duration, heat_duration_str, name, question_4 = generate_duration(
row, desired_output)
if heat_duration_str is None:
return None, None
code_4 = f"time = to_minute( \" {get_time_from_question(question_4)} \" ) [EOL]" \
f"loss = \'{loss_str[0]}\' [EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"described_product_amount = to_gr( \" {desired_output} \" ) [EOL]" \
f"threshold_duration = to_minute( \" {heat_duration_str} \" ) [EOL]" \
f"temprature = {generated_temp} [EOL]" \
f"threshold_temp = {temp_threshold_str} [EOL]" \
f"final_product_amount = described_product_amount [EOL]" \
f"for t in range( time ): [EOL]" \
f"[TAB]if t > threshold_duration and temprature > threshold_temp: [EOL]" \
f"[TAB][TAB]final_product_amount = compensate_for_loss( loss= loss[0], current_value= final_product_amount) [EOL]" \
f"portions = final_product_amount / described_product_amount [EOL]" \
f"return [[component[0], to_gr(component[1]) * portions] for component in components] [EOL]"
code_4 = formulate_code(code_4)
answe_4 = execute
return question_4, code_4
data = json.load(open("gpt4-parsed-uspto.json", "r", encoding="utf-8"))
for idx, entry in enumerate(data):
q1, c1 = generate_question_type1(row=entry)
q2, c2 = generate_question_type2(row=entry)
q3, c3 = generate_question_type3(row=entry)
q4, c4 = generate_question_type4(row=entry)
print(c4)
| arrafmousa/generate_code | generate_questiontion_with_chempy.py | generate_questiontion_with_chempy.py | py | 25,754 | python | en | code | 0 | github-code | 36 |
74831593702 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""This module corresponds to the bspline directory in idlutils.
This is Aaron C. Rizzuto's version, with corrected handling of the
Cholesky band fails and maskpoints logic to more close match the idl
version. Also changed all relative imports from pydl to normal imports
"""
import numpy as np
class bspline(object):
"""Bspline class.
Functions in the bspline library are implemented as methods on this
class.
Parameters
----------
x : :class:`numpy.ndarray`
The data.
nord : :class:`int`, optional
To be documented.
npoly : :class:`int`, optional
To be documented.
bkpt : :class:`numpy.ndarray`, optional
To be documented.
bkspread : :class:`float`, optional
To be documented.
verbose : :class:`bool`, optional.
If ``True`` print extra information.
Attributes
----------
breakpoints
To be documented.
nord
To be documented.
npoly
To be documented.
mask
To be documented.
coeff
To be documented.
icoeff
To be documented.
xmin
To be documented.
xmax
To be documented.
funcname
To be documented.
"""
def __init__(self, x, nord=4, npoly=1, bkpt=None, bkspread=1.0,
verbose=False, **kwargs):
"""Init creates an object whose attributes are similar to the
structure returned by the create_bspline function.
"""
#
# Set the breakpoints.
#
if bkpt is None:
startx = x.min()
rangex = x.max() - startx
if 'placed' in kwargs:
w = ((kwargs['placed'] >= startx) &
(kwargs['placed'] <= startx+rangex))
if w.sum() < 2:
bkpt = np.arange(2, dtype='f') * rangex + startx
else:
bkpt = kwargs['placed'][w]
elif 'bkspace' in kwargs:
nbkpts = int(rangex/kwargs['bkspace']) + 1
if nbkpts < 2:
nbkpts = 2
tempbkspace = rangex/float(nbkpts-1)
bkpt = np.arange(nbkpts, dtype='f')*tempbkspace + startx
elif 'nbkpts' in kwargs:
nbkpts = kwargs['nbkpts']
if nbkpts < 2:
nbkpts = 2
tempbkspace = rangex/float(nbkpts-1)
bkpt = np.arange(nbkpts, dtype='f') * tempbkspace + startx
elif 'everyn' in kwargs:
npkpts = max(nx/kwargs['everyn'], 1)
if nbkpts == 1:
xspot = [0]
else:
xspot = int(nx/(nbkpts-1)) * np.arange(nbkpts, dtype='i4')
bkpt = x[xspot].astype('f')
else:
raise ValueError('No information for bkpts.')
imin = bkpt.argmin()
imax = bkpt.argmax()
if x.min() < bkpt[imin]:
if verbose:
print('Lowest breakpoint does not cover lowest x value: changing.')
bkpt[imin] = x.min()
if x.max() > bkpt[imax]:
if verbose:
print('Highest breakpoint does not cover highest x value: changing.')
bkpt[imax] = x.max()
nshortbkpt = bkpt.size
fullbkpt = bkpt.copy()
if nshortbkpt == 1:
bkspace = np.float32(bkspread)
else:
bkspace = (bkpt[1] - bkpt[0]) * np.float32(bkspread)
for i in np.arange(1, nord, dtype=np.float32):
fullbkpt = np.insert(fullbkpt, 0, bkpt[0]-bkspace*i)
fullbkpt = np.insert(fullbkpt, fullbkpt.shape[0],
bkpt[nshortbkpt-1] + bkspace*i)
#
# Set the attributes
#
nc = fullbkpt.size - nord
self.breakpoints = fullbkpt
self.nord = nord
self.npoly = npoly
self.mask = np.ones((fullbkpt.size,), dtype='bool')
if npoly > 1:
self.coeff = np.zeros((npoly, nc), dtype='d')
self.icoeff = np.zeros((npoly, nc), dtype='d')
else:
self.coeff = np.zeros((nc,), dtype='d')
self.icoeff = np.zeros((nc,), dtype='d')
self.xmin = 0.0
self.xmax = 1.0
self.funcname = 'legendre'
return
def fit(self, xdata, ydata, invvar, x2=None):
"""Calculate a B-spline in the least-squares sense.
Fit is based on two variables: x which is sorted and spans a large range
where bkpts are required y which can be described with a low order
polynomial.
Parameters
----------
xdata : :class:`numpy.ndarray`
Independent variable.
ydata : :class:`numpy.ndarray`
Dependent variable.
invvar : :class:`numpy.ndarray`
Inverse variance of `ydata`.
x2 : :class:`numpy.ndarray`, optional
Orthogonal dependent variable for 2d fits.
Returns
-------
:func:`tuple`
A tuple containing an integer error code, and the evaluation of the
b-spline at the input values. An error code of -2 is a failure,
-1 indicates dropped breakpoints, 0 is success, and positive
integers indicate ill-conditioned breakpoints.
"""
goodbk = self.mask[self.nord:]
nn = goodbk.sum()
if nn < self.nord:
yfit = np.zeros(ydata.shape, dtype='f')
return (-2, yfit)
nfull = nn * self.npoly
bw = self.npoly * self.nord
a1, lower, upper = self.action(xdata, x2=x2)
foo = np.tile(invvar, bw).reshape(bw, invvar.size).transpose()
a2 = a1 * foo
alpha = np.zeros((bw, nfull+bw), dtype='d')
beta = np.zeros((nfull+bw,), dtype='d')
bi = np.arange(bw, dtype='i4')
bo = np.arange(bw, dtype='i4')
for k in range(1, bw):
bi = np.append(bi, np.arange(bw-k, dtype='i4')+(bw+1)*k)
bo = np.append(bo, np.arange(bw-k, dtype='i4')+bw*k)
for k in range(nn-self.nord+1):
itop = k*self.npoly
ibottom = min(itop, nfull) + bw - 1
ict = upper[k] - lower[k] + 1
if ict > 0:
work = np.dot(a1[lower[k]:upper[k]+1, :].T, a2[lower[k]:upper[k]+1, :])
wb = np.dot(ydata[lower[k]:upper[k]+1], a2[lower[k]:upper[k]+1, :])
alpha.T.flat[bo+itop*bw] += work.flat[bi]
beta[itop:ibottom+1] += wb
min_influence = 1.0e-10 * invvar.sum() / nfull
errb = cholesky_band(alpha, mininf=min_influence) # ,verbose=True)
if isinstance(errb[0], int) and errb[0] == -1:
a = errb[1]
else:
if type(errb[0]) == type(0): errb = (np.array([errb[0]]),errb[1])
yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)
return (self.maskpoints(errb[0]), yfit)
errs = cholesky_solve(a, beta)
if isinstance(errs[0], int) and errs[0] == -1:
sol = errs[1]
else:
#
# It is not possible for this to get called, because cholesky_solve
# has only one return statement, & that statement guarantees that
# errs[0] == -1
#
yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)
return (self.maskpoints(errs[0]), yfit)
if self.npoly > 1:
self.icoeff[:, goodbk] = np.array(a[0, 0:nfull].reshape(self.npoly, nn), dtype=a.dtype)
self.coeff[:, goodbk] = np.array(sol[0:nfull].reshape(self.npoly, nn), dtype=sol.dtype)
else:
self.icoeff[goodbk] = np.array(a[0, 0:nfull], dtype=a.dtype)
self.coeff[goodbk] = np.array(sol[0:nfull], dtype=sol.dtype)
yfit, foo = self.value(xdata, x2=x2, action=a1, upper=upper, lower=lower)
return (0, yfit)
def action(self, x, x2=None):
"""Construct banded bspline matrix, with dimensions [ndata, bandwidth].
Parameters
----------
x : :class:`numpy.ndarray`
Independent variable.
x2 : :class:`numpy.ndarray`, optional
Orthogonal dependent variable for 2d fits.
Returns
-------
:func:`tuple`
A tuple containing the b-spline action matrix; the 'lower' parameter,
a list of pixel positions, each corresponding to the first
occurence of position greater than breakpoint indx; and 'upper',
Same as lower, but denotes the upper pixel positions.
"""
from pydl import uniq
from pydl.goddard.math import flegendre
from pydl.pydlutils.trace import fchebyshev
nx = x.size
nbkpt = self.mask.sum()
if nbkpt < 2*self.nord:
return (-2, 0, 0)
n = nbkpt - self.nord
gb = self.breakpoints[self.mask]
bw = self.npoly*self.nord
lower = np.zeros((n - self.nord + 1,), dtype='i4')
upper = np.zeros((n - self.nord + 1,), dtype='i4') - 1
indx = self.intrv(x)
bf1 = self.bsplvn(x, indx)
action = bf1
aa = uniq(indx, np.arange(indx.size, dtype='i4'))
upper[indx[aa]-self.nord+1] = aa
rindx = indx[::-1]
bb = uniq(rindx, np.arange(rindx.size, dtype='i4'))
lower[rindx[bb]-self.nord+1] = nx - bb - 1
if x2 is not None:
if x2.size != nx:
raise ValueError('Dimensions of x and x2 do not match.')
x2norm = 2.0 * (x2 - self.xmin) / (self.xmax - self.xmin) - 1.0
if self.funcname == 'poly':
temppoly = np.ones((nx, self.npoly), dtype='f')
for i in range(1, self.npoly):
temppoly[:, i] = temppoly[:, i-1] * x2norm
elif self.funcname == 'poly1':
temppoly = np.tile(x2norm, self.npoly).reshape(nx, self.npoly)
for i in range(1, self.npoly):
temppoly[:, i] = temppoly[:, i-1] * x2norm
elif self.funcname == 'chebyshev':
temppoly = fchebyshev(x2norm, self.npoly)
elif self.funcname == 'legendre':
temppoly = flegendre(x2norm, self.npoly)
else:
raise ValueError('Unknown value of funcname.')
action = np.zeros((nx, bw), dtype='d')
counter = -1
for ii in range(self.nord):
for jj in range(self.npoly):
counter += 1
action[:, counter] = bf1[:, ii]*temppoly[:, jj]
return (action, lower, upper)
def intrv(self, x):
"""Find the segment between breakpoints which contain each value in the array x.
The minimum breakpoint is nbkptord -1, and the maximum
is nbkpt - nbkptord - 1.
Parameters
----------
x : :class:`numpy.ndarray`
Data values, assumed to be monotonically increasing.
Returns
-------
:class:`numpy.ndarray`
Position of array elements with respect to breakpoints.
"""
gb = self.breakpoints[self.mask]
n = gb.size - self.nord
indx = np.zeros((x.size,), dtype='i4')
ileft = self.nord - 1
for i in range(x.size):
while x[i] > gb[ileft+1] and ileft < n - 1:
ileft += 1
indx[i] = ileft
return indx
def bsplvn(self, x, ileft):
"""To be documented.
Parameters
----------
x : :class:`numpy.ndarray`
To be documented.
ileft : :class:`int`
To be documented
Returns
-------
:class:`numpy.ndarray`
To be documented.
"""
bkpt = self.breakpoints[self.mask]
vnikx = np.zeros((x.size, self.nord), dtype=x.dtype)
deltap = vnikx.copy()
deltam = vnikx.copy()
j = 0
vnikx[:, 0] = 1.0
while j < self.nord - 1:
ipj = ileft+j+1
deltap[:, j] = bkpt[ipj] - x
imj = ileft-j
deltam[:, j] = x - bkpt[imj]
vmprev = 0.0
for l in range(j+1):
vm = vnikx[:, l]/(deltap[:, l] + deltam[:, j-l])
vnikx[:, l] = vm*deltap[:, l] + vmprev
vmprev = vm*deltam[:, j-l]
j += 1
vnikx[:, j] = vmprev
return vnikx
def value(self, x, x2=None, action=None, lower=None, upper=None):
"""Evaluate a bspline at specified values.
Parameters
----------
x : :class:`numpy.ndarray`
Independent variable.
x2 : :class:`numpy.ndarray`, optional
Orthogonal dependent variable for 2d fits.
action : :class:`numpy.ndarray`, optional
Action matrix to use. If not supplied it is calculated.
lower : :class:`numpy.ndarray`, optional
If the action parameter is supplied, this parameter must also
be supplied.
upper : :class:`numpy.ndarray`, optional
If the action parameter is supplied, this parameter must also
be supplied.
Returns
-------
:func:`tuple`
A tuple containing the results of the bspline evaluation and a
mask indicating where the evaluation was good.
"""
xsort = x.argsort()
xwork = x[xsort]
if x2 is not None:
x2work = x2[xsort]
else:
x2work = None
if action is not None:
if lower is None or upper is None:
raise ValueError('Must specify lower and upper if action is set.')
else:
action, lower, upper = self.action(xwork, x2=x2work)
yfit = np.zeros(x.shape, dtype=x.dtype)
bw = self.npoly * self.nord
spot = np.arange(bw, dtype='i4')
goodbk = self.mask.nonzero()[0]
coeffbk = self.mask[self.nord:].nonzero()[0]
n = self.mask.sum() - self.nord
if self.npoly > 1:
goodcoeff = self.coeff[:, coeffbk]
else:
goodcoeff = self.coeff[coeffbk]
# maskthis = np.zeros(xwork.shape,dtype=xwork.dtype)
for i in range(n-self.nord+1):
ict = upper[i] - lower[i] + 1
if ict > 0:
yfit[lower[i]:upper[i]+1] = np.dot(
action[lower[i]:upper[i]+1, :], goodcoeff[i*self.npoly+spot])
yy = yfit.copy()
yy[xsort] = yfit
mask = np.ones(x.shape, dtype='bool')
gb = self.breakpoints[goodbk]
outside = ((x < gb[self.nord-1]) | (x > gb[n]))
if outside.any():
mask[outside] = False
hmm = ((np.diff(goodbk) > 2).nonzero())[0]
for jj in range(hmm.size):
inside = ((x >= self.breakpoints[goodbk[hmm[jj]]]) &
(x <= self.breakpoints[goodbk[hmm[jj]+1]-1]))
if inside.any():
mask[inside] = False
return (yy, mask)
def maskpoints(self, err):
"""Perform simple logic of which breakpoints to mask.
Parameters
----------
err : :class:`numpy.ndarray`
The list of indexes returned by the cholesky routines.
Returns
-------
:class:`int`
An integer indicating the results of the masking. -1 indicates
that the error points were successfully masked. -2 indicates
failure; the calculation should be aborted.
Notes
-----
The mask attribute is modified, assuming it is possible to create the
mask.
"""
nbkpt = self.mask.sum()
if nbkpt <= 2*self.nord:
return -2
#hmm = err[np.unique(err/self.npoly)]/self.npoly
hmm = np.unique(err/self.npoly)/self.npoly ##acr change, original version (above) was incorrect use of np.unique
n = nbkpt - self.nord
if np.any(hmm >= n):
return -2
test = np.zeros(nbkpt, dtype='bool')
for jj in range(-np.ceil(self.nord/2.0).astype(int), int(self.nord/2.0)):
foo = np.where((hmm+jj) > 0, hmm+jj, np.zeros(hmm.shape, dtype=hmm.dtype))
inside = np.where((foo+self.nord) < n-1, foo+self.nord, np.zeros(hmm.shape, dtype=hmm.dtype)+n-1)
test[inside] = True
if test.any():
reality = np.where(test == True)[0]
if self.mask[reality].any():
self.mask[reality] = False
return -1
else:
return -2
else:
return -2
def cholesky_band(l, mininf=0.0, verbose=False):
"""Compute Cholesky decomposition of banded matrix.
Parameters
----------
l : :class:`numpy.ndarray`
A matrix on which to perform the Cholesky decomposition.
mininf : :class:`float`, optional
Entries in the `l` matrix are considered negative if they are less
than this value (default 0.0).
verbose : :class:`bool`, optional
If set to ``True``, print some debugging information.
Returns
-------
:func:`tuple`
If problems were detected, the first item will be the index or
indexes where the problem was detected, and the second item will simply
be the input matrix. If no problems were detected, the first item
will be -1, and the second item will be the Cholesky decomposition.
"""
from warnings import warn
from pydl.pydlutils import PydlutilsUserWarning
lower = l.copy()
bw, nn = lower.shape
n = nn - bw
negative = lower[0, 0:n] <= mininf
if negative.any() or not np.all(np.isfinite(lower)):
warn('Bad entries: ' + str(negative.nonzero()[0]), PydlutilsUserWarning)
return (negative.nonzero()[0], l)
kn = bw - 1
spot = np.arange(kn, dtype='i4') + 1
bi = np.arange(kn, dtype='i4')
for i in range(1, kn):
bi = np.append(bi, np.arange(kn-i, dtype='i4') + (kn+1)*i)
for j in range(n):
lower[0, j] = np.sqrt(lower[0, j])
lower[spot, j] /= lower[0, j]
x = lower[spot, j]
if not np.all(np.isfinite(x)):
warn('NaN found in cholesky_band.', PydlutilsUserWarning)
return (j, l)
hmm = np.outer(x, x)
here = bi+(j+1)*bw
lower.T.flat[here] -= hmm.flat[bi]
return (-1, lower)
def cholesky_solve(a, bb):
"""Solve the equation Ax=b where A is a Cholesky-banded matrix.
Parameters
----------
a : :class:`numpy.ndarray`
:math:`A` in :math:`A x = b`.
bb : :class:`numpy.ndarray`
:math:`b` in :math:`A x = b`.
Returns
-------
:func:`tuple`
A tuple containing the status and the result of the solution. The
status is always -1.
"""
b = bb.copy()
bw = a.shape[0]
n = b.shape[0] - bw
kn = bw - 1
spot = np.arange(kn, dtype='i4') + 1
for j in range(n):
b[j] /= a[0, j]
b[j+spot] -= b[j]*a[spot, j]
spot = kn - np.arange(kn, dtype='i4')
for j in range(n-1, -1, -1):
b[j] = (b[j] - np.sum(a[spot, j] * b[j+spot]))/a[0, j]
return (-1, b)
def iterfit(xdata, ydata, invvar=None, upper=5, lower=5, x2=None,
maxiter=10, **kwargs):
"""Iteratively fit a b-spline set to data, with rejection.
Parameters
----------
xdata : :class:`numpy.ndarray`
Independent variable.
ydata : :class:`numpy.ndarray`
Dependent variable.
invvar : :class:`numpy.ndarray`
Inverse variance of `ydata`. If not set, it will be calculated based
on the standard deviation.
upper : :class:`int` or :class:`float`
Upper rejection threshold in units of sigma, defaults to 5 sigma.
lower : :class:`int` or :class:`float`
Lower rejection threshold in units of sigma, defaults to 5 sigma.
x2 : :class:`numpy.ndarray`, optional
Orthogonal dependent variable for 2d fits.
maxiter : :class:`int`, optional
Maximum number of rejection iterations, default 10. Set this to
zero to disable rejection.
Returns
-------
:func:`tuple`
A tuple containing the fitted bspline object and an output mask.
"""
from pydl.pydlutils.math import djs_reject
nx = xdata.size
if ydata.size != nx:
raise ValueError('Dimensions of xdata and ydata do not agree.')
if invvar is not None:
if invvar.size != nx:
raise ValueError('Dimensions of xdata and invvar do not agree.')
else:
#
# This correction to the variance makes it the same
# as IDL's variance()
#
var = ydata.var()*(float(nx)/float(nx-1))
if var == 0:
var = 1.0
invvar = np.ones(ydata.shape, dtype=ydata.dtype)/var
if x2 is not None:
if x2.size != nx:
raise ValueError('Dimensions of xdata and x2 do not agree.')
yfit = np.zeros(ydata.shape)
if invvar.size == 1:
outmask = True
else:
outmask = np.ones(invvar.shape, dtype='bool')
xsort = xdata.argsort()
maskwork = (outmask & (invvar > 0))[xsort]
if 'oldset' in kwargs:
sset = kwargs['oldset']
sset.mask = True
sset.coeff = 0
else:
if not maskwork.any():
raise ValueError('No valid data points.')
# return (None,None)
if 'fullbkpt' in kwargs:
fullbkpt = kwargs['fullbkpt']
else:
sset = bspline(xdata[xsort[maskwork]], **kwargs)
if maskwork.sum() < sset.nord:
print('Number of good data points fewer than nord.')
return (sset, outmask)
if x2 is not None:
if 'xmin' in kwargs:
xmin = kwargs['xmin']
else:
xmin = x2.min()
if 'xmax' in kwargs:
xmax = kwargs['xmax']
else:
xmax = x2.max()
if xmin == xmax:
xmax = xmin + 1
sset.xmin = xmin
sset.xmax = xmax
if 'funcname' in kwargs:
sset.funcname = kwargs['funcname']
xwork = xdata[xsort]
ywork = ydata[xsort]
invwork = invvar[xsort]
if x2 is not None:
x2work = x2[xsort]
else:
x2work = None
iiter = 0
error = 0
qdone = False
while (error != 0 or qdone == False) and iiter <= maxiter:
# print iiter
goodbk = sset.mask.nonzero()[0]
if maskwork.sum() <= 1 or not sset.mask.any():
sset.coeff = 0
iiter = maxiter + 1
else:
if 'requiren' in kwargs:
i = 0
while xwork[i] < sset.breakpoints[goodbk[sset.nord]] and i < nx-1:
i += 1
ct = 0
for ileft in range(sset.nord, sset.mask.sum()-sset.nord+1):
while (xwork[i] >= sset.breakpoints[goodbk[ileft]] and
xwork[i] < sset.breakpoints[goodbk[ileft+1]] and
i < nx-1):
ct += invwork[i]*maskwork[i] > 0
i += 1
if ct >= kwargs['requiren']:
ct = 0
else:
sset.mask[goodbk[ileft]] = False
error, yfit = sset.fit(xwork, ywork, invwork*maskwork,
x2=x2work)
iiter += 1
inmask = maskwork
if error == -2: ##all breaks dropped out
return (sset, outmask)
elif error == 0:
maskwork, qdone = djs_reject(ywork, yfit, invvar=invwork,
inmask=inmask, outmask=maskwork,
upper=upper, lower=lower)
else:
pass
outmask[xsort] = maskwork
temp = yfit
yfit[xsort] = temp
return (sset, outmask)
| tofflemire/saphires | saphires/extras/bspline_acr.py | bspline_acr.py | py | 24,850 | python | en | code | 8 | github-code | 36 |
35689287977 | from utils.database import db
from utils.database import Product as ProductDB, ProductSize as ProductSizes, ProductColor as ProductColors, SubCategories as SubCategoriesDB, Categories as CategoriesDB
def get_products(id:int=None, search_string:str=None, category_item:str=None, subcategory_item:str=None) -> list:
products = []
if id is None:
# Get all products from db
for product in db.session.query(ProductDB).all():
subcategory = db.session.query(SubCategoriesDB).get(product.subcategory_id)
subcategory_name = subcategory.name
category = db.session.query(CategoriesDB).get(subcategory.category_id)
category_name = category.name
product_info = {'id': product.id,
'name': product.name,
'image': product.image,
'description': product.description,
'category_name': category_name,
'subcategory_name': subcategory_name}
products.append(product_info)
# Filter products for search action
if search_string:
if len(products):
filterd_products = [product for product in products
if search_string.lower() in product["name"].lower()
or search_string.lower() in product["category_name"].lower()
or search_string.lower() in product["subcategory_name"].lower()]
products = filterd_products
# If go by category, filter category & subcategory
if subcategory_item and not search_string:
if len(products):
filterd_products = list(filter(lambda product : True
if product['subcategory_name'] == subcategory_item
and product['category_name'] == category_item
else False, products))
products = filterd_products
else:
product = db.session.query(ProductDB).get(id)
sizes = product.sizes
colors = product.colors
product_info = {'id': product.id,
'name': product.name,
'image': product.image,
'description': product.description,
'stocks': product.stocks,
'price': product.price,
'material': product.material,
'composition': product.composition,
'care': product.care,
'exchange': product.exchange,
'country': product.country,
'sizes': [size.size for size in sizes],
'colors': [color.color for color in colors]}
products.append(product_info)
return products
def add_products(products):
try:
# Add to main product
for product in products:
# Add main product
new_product = ProductDB(name=product['name'],
image=product['image'],
description=product['description'],
stocks=product['stocks'],
price=product['price'],
material=product['material'],
composition=product['composition'],
care=product['care'],
exchange=product['exchange'],
country=product['country'],
subcategory_id=product['subcategory_id'])
# Add size
sizes = [ProductSizes(size=size) for size in product['sizes']]
new_product.sizes.extend(sizes)
# Add color
colors = [ProductColors(color=color) for color in product['colors']]
new_product.colors.extend(colors)
db.session.add(new_product)
db.session.commit()
except Exception as e:
print(e)
return False
finally:
db.session.close()
return True
def delete_products(product_ids:list, is_delete_all:bool=False):
if not is_delete_all:
if not len(product_ids): return False
products = db.session.query(ProductDB).filter(ProductDB.id.in_(product_ids)).all()
try:
for product in products:
ProductSizes.query.filter(ProductSizes.product_id == product.id).delete()
ProductColors.query.filter(ProductColors.product_id == product.id).delete()
db.session.delete(product)
db.session.commit()
except Exception as e:
print(e)
return False
finally:
db.session.close()
else:
try:
ProductSizes.query.delete()
ProductColors.query.delete()
counts = db.session.query(ProductDB).delete()
db.session.commit()
print(f'Deleted {counts} entries.')
except Exception as e:
print(e)
return False
finally:
db.session.close()
return True | holajoyceciao/MCloset | mystoreapp/py_files/models/product.py | product.py | py | 5,389 | python | en | code | 0 | github-code | 36 |
2193878792 | class YelpCandidateGen:
def __init__(self, elasticsearch, biz_acronyms_file, index_name='yelp', biz_doc_type='biz'):
self.es = elasticsearch
self.index_name = index_name
self.biz_doc_type = biz_doc_type
self.acronym_biz_dict = dict()
if biz_acronyms_file:
self.acronym_biz_dict = YelpCandidateGen.__load_biz_acronyms(biz_acronyms_file)
def gen_candidates(self, mention, rev_biz_city, rev_text):
# print mention.name_str, rev_biz_city
candidates = self.gen_candidates_es(mention, rev_biz_city, rev_text)
abbr = mention.name_str.replace('.', '')
if ' ' not in abbr and abbr.isupper():
candidates_acr = self.acronym_biz_dict.get(abbr, None)
if candidates_acr:
for biz_id, biz_city in candidates_acr:
if len(abbr) > 2 or biz_city == rev_biz_city:
candidates.append((biz_id, 1.0))
return candidates
def gen_candidates_es(self, mention, rev_biz_city, rev_text):
query_str1 = None
if mention.endpos + 1 < len(rev_text) and rev_text[mention.endpos:mention.endpos + 2] == "'s":
query_str1 = mention.name_str + "'s"
es_search_result = self.__match_biz_es(rev_biz_city, mention.name_str, query_str1)
# print es_search_result
candidates = YelpCandidateGen.__filter_es_candidates(es_search_result, mention)
# print candidates
return candidates
def __match_biz_es(self, rev_biz_city, query_str0, query_str1):
if query_str1:
qbody_match_name = {
"bool": {
"should": [
{"match": {"name": {"query": query_str0, "boost": 5}}},
{"match": {"name": {"query": query_str1, "boost": 5}}}
]
}
}
else:
qbody_match_name = {"match": {"name": {"query": query_str0, "boost": 5}}}
qbody_match_city = {"match": {"city": rev_biz_city}}
qbody = {
"query": {
"bool": {
"must": qbody_match_name,
"should": qbody_match_city
}
}
}
res = self.es.search(index=self.index_name, body=qbody, size=30)
return res['hits']['hits']
@staticmethod
def __load_biz_acronyms(biz_acronyms_file):
acronym_biz_dict = dict()
f = open(biz_acronyms_file, 'r')
for line in f:
vals = line.strip().split('\t')
if len(vals) < 3:
continue
acronym, biz_id, biz_city = vals
biz_list = acronym_biz_dict.get(acronym, list())
if not biz_list:
acronym_biz_dict[acronym] = biz_list
biz_list.append((biz_id, biz_city.decode('utf-8')))
f.close()
return acronym_biz_dict
@staticmethod
def __filter_es_candidates(hits, mention):
candidates = list()
for hit in hits:
biz_name = hit['_source']['name']
# candidates.append((hit['_source']['business_id'], hit['_score']))
if ' ' in mention.name_str or YelpCandidateGen.__all_words_in(mention.name_str, biz_name):
candidates.append((hit['_source']['business_id'], hit['_score']))
return candidates
@staticmethod
def __all_words_in(s0, s1):
s1 = s1.lower()
words = s0.lower().split(' ')
for w in words:
if w not in s1:
return False
return True
| hldai/labelel | yelp/yelpcandidategen.py | yelpcandidategen.py | py | 3,598 | python | en | code | 0 | github-code | 36 |
15509801064 | import numpy as np
import geopandas
import shapely
class SparseGrid:
def __init__(self, x_lim, y_lim, n_cols=10, n_rows=10, tag_prefix = ''):
'''
General class to define a spatial frame composed of regular polygons,
based on a grid of size n_cols x n_rows
:param x_lim: Minimum and Maximum values in the horizontal axis.
Tupple of floats.
:param y_lim: Minimum and Maximum values in the vertical axis.
Tupple of floats.
:param n_cols: Number of columns in which the horizontal axis is divided.
Integer.
:param n_rows: Number of columns in which the vertical axis is divided.
Integer
:param tag_prefix: Prefix to use as id of the polygons in the grid.
String.
'''
assert len(x_lim) == 2 and np.diff(x_lim) > 0
assert len(y_lim) == 2 and np.diff(y_lim) > 0
assert isinstance(n_cols, int) and n_cols > 0
assert isinstance(n_rows, int) and n_cols > 0
assert isinstance(tag_prefix, str)
self.x_lim = x_lim
self.y_lim = y_lim
self.dx = (x_lim[1] - x_lim[0]) / n_cols
self.dy = (y_lim[1] - y_lim[0]) / n_rows
self.x_grid = np.linspace(x_lim[0], x_lim[1] - self.dx, n_cols)
self.y_grid = np.linspace(y_lim[0], y_lim[1] - self.dy, n_rows)
self.n_cols = n_cols
self.n_rows = n_rows
n_cells = self.n_cols * self.n_rows
id_size = len(str(n_cells - 1))
self.tag_prefix = tag_prefix
self.tags = [self.tag_prefix + '0' * (id_size - len(str(f'{i}'))) + f'{i}' for i in range(n_cols * n_rows) ]
self.sparse_frame = geopandas.GeoDataFrame({'id' :[], 'geometry' :None})
def get_row(self, y):
'''
Get the row in the grid to which a value y corresponds
:param y: Coordinate in the vertical axis
Float
:return: Row number
Integer
'''
if y >= self.y_lim[0] or y <= self.y_lim[1]:
return sum(self.y_grid <= y) - 1
def get_col(self, x):
'''
Get the column in the grid to which a value x corresponds
:param x: Coordinate in the horizontal axis
Float
:return: Column number
Integer
'''
if x >= self.x_lim[0] or x <= self.x_lim[1]:
return sum(self.x_grid <= x) - 1
def tag_from_ij(self, i, j):
'''
Get the tag (or id) of a polygon based on its location within the grid
:param i: Column number within the grid
Integer
:param j: Row number within the grid
Integer
:return: Tag
String
'''
ij = str(j * self.n_cols + i)
return self.tag_prefix + '0' * (len(str(self.n_cols * self.n_rows)) - len(ij)) + ij
def tag_from_xy(self, x, y):
'''
Get the tag (or id) of a polygon based on a pair of coordinates located within it
:param x: Coordinate in the horizontal axis
Float
:param y: Coordinate in the vertical axis
Float
:return: Tag
String
'''
nx = self.get_col(x)
ny = self.get_row(y)
if nx is not None and ny is not None:
return self.tag_from_ij(nx, ny)
def ij_from_tag(self, tag):
'''
Get the location of a polygon within the grid based on its tag (or id)
:param tag: id of a polygon
String
:return: Location (i, j) of a polygon
Tuple of integers
'''
ix = self.tags.index(tag)
ny = ix // self.n_cols
nx = ix % self.n_cols
return nx, ny
def add_polygon_from_tag(self, tag):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param tag: id of a polygon
String
'''
if tag not in self.sparse_frame.id.tolist():
nx, ny = self.ij_from_tag(tag)
x0 = self.x_lim[0] + nx * self.dx
y0 = self.y_lim[0] + ny * self.dy
sq = [(x0, y0), (x0, y0 + self.dy), (x0 + self.dx, y0 + self.dy), (x0 + self.dx, y0)]
ngeo = geopandas.GeoDataFrame({'id': [tag],
'geometry': shapely.geometry.Polygon(sq)})
self.sparse_frame = self.sparse_frame.append(ngeo)
self.sparse_frame.reset_index(inplace=True, drop=True)
def add_polygon_from_xy(self, X):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param X: Points withing the grid
Numpy array of dimensions (n, 2)
'''
assert isinstance(X, np.ndarray)
assert X.shape[1] == 2
for xi in X:
tagi = self.tag_from_xy(*xi)
self.add_polygon_from_tag(tagi)
def get_simplified(self, tolerance=1e-4):
'''
Simplify adjacent polygons in sparse_grid
:param tolerance: Points in a simplified geometry will be no more than `tolerance` distance from the original.
(see geopandas.GeoDataFrame.simplify).
float
:return: Simplified polygons object.
GeoDataFrame
'''
assert tolerance > 0
mpolyg = shapely.geometry.multipolygon.asMultiPolygon(self.sparse_frame.geometry)
mpolyg = mpolyg.simplify(tolerance=tolerance, preserve_topology=False)
return geopandas.GeoDataFrame({'id': list(range(len(mpolyg))), 'geometry': mpolyg}) | disarm-platform/disarm-gears | disarm_gears/frames/sparse_grid.py | sparse_grid.py | py | 5,682 | python | en | code | 0 | github-code | 36 |
7595447308 | from .base.dynamic_symbol import DynamicSymbolLexeme
from .identifier import IdentifierLexeme
class KeywordSymbolLexeme(DynamicSymbolLexeme):
lexeme_id = "keywords.keyword"
@classmethod
def precedence(cls):
return 1 + IdentifierLexeme.PRECEDENCE
KeywordSymbolLexeme.register([
# undecided, or not yet dealt with
"_",
"reentrant", "threadsafe", "recursive",
"module", "class", "interface", "abstract", "protocol",
"ducktype", "duck",
"this", "args", "superclass",
"structure", "type", "alias",
"composite", "component", "composer",
"private", "public", "protected",
"overridable", "override",
"is", "as",
"singleton",
"constructor", "disowned",
"extends", "implements",
"trait", "mixin", "sealed",
"identifier",
"new",
"let", "constant",
"import", "from", "package", "export",
"scope",
"static",
"lazy",
"enumeration",
"shared",
"assert", "validate",
"logger", "metrics"
"test", "testing", "mock",
"serialise", "deserialise",
"modulo", "**", "pow",
"collection", "array", "map", "set", "iterator", "list", "tuple", "queue", "hashtable", "bag", "heap", "stack",
"mutable", "immutable",
"transaction", "commit", "rollback",
"dataset", "index", "foreignkey", "unique",
"using", "lock",
"heap", "stack",
"implicit",
"property",
"async", "await", "run",
"generic",
"actor", "select",
"channel", "send", "receive",
"consumer", "producer",
"message",
"thread", "fiber",
"getbit", "setbit", "getbyte", "setbyte",
"bitwise_and"
"ffs", "ctz", "ntz", "popcount", "shift",
"process", "service"
])
| padresmurfa/yapl | v0/transpiler/lexemes/keywords.py | keywords.py | py | 1,716 | python | en | code | 0 | github-code | 36 |
23255193442 | ADDITION_SYMBOL = '+'
SUBTRACTION_SYMBOL = '-'
MULTIPLICATION_SYMBOL = '*'
EXPONENTIATION_SYMBOL = '^'
OPERATORS = (ADDITION_SYMBOL, SUBTRACTION_SYMBOL, MULTIPLICATION_SYMBOL, \
EXPONENTIATION_SYMBOL)
class Polynomial:
def __init__(self, terms, pronumeral):
# Terms must be a dictionary, with the keys being the integer power to
# which the value (the coefficient) is raised.
self.terms = terms
self.pronumeral = pronumeral
def __repr__(self):
# If the polynomial is empty, return zero...
non_zero = False
for value in self.terms.values():
if value:
non_zero = True
break
if not non_zero:
return '0'
string_terms = []
for key in sorted(self.terms.keys(), reverse=True):
if self.terms[key]:
string_terms.append(
str(self.terms[key]) + self.pronumeral + '^' + str(key)
)
full_powers_string = ' + '.join(string_terms).replace('+ -', '- ')
return ' + '.join(string_terms).replace('+ -', '- ').replace('^1', '')\
.replace(self.pronumeral + '^0', '')\
.replace('1' + self.pronumeral, self.pronumeral)
@property
def degree(self):
degree = 0
for power in self.terms.keys():
if power > degree:
degree = power
return degree
# All maths methods assume both self and other have the same pronumeral.
def __add__(self, other):
new_degree = self.degree if self.degree > other.degree else other.degree
new_polynomial = Polynomial({}, self.pronumeral)
for power in range(new_degree + 1):
if power in self.terms or power in other.terms:
new_polynomial.terms[power] = self.terms.get(power, 0) + \
other.terms.get(power, 0)
return new_polynomial
def __sub__(self, other):
return self + Polynomial({0:-1}, self.pronumeral) * other
def __mul__(self, other):
polynomials_to_add = []
for own_power, own_coefficient in self.terms.items():
polynomial_to_add = Polynomial({}, self.pronumeral)
for other_power, other_coefficient in other.terms.items():
# As own_power is constant through this, this will not write
# over any of the same things.
polynomial_to_add.terms[own_power + other_power] = \
own_coefficient * other_coefficient
polynomials_to_add.append(polynomial_to_add)
new_polynomial = Polynomial({}, self.pronumeral)
for polynomial in polynomials_to_add:
new_polynomial += polynomial
return new_polynomial
def __pow__(self, other):
if list(other.terms.keys()) != [0]:
raise ValueError(
'can only raise Polynomials to Polynomials with only a '
'constant term.'
)
new_polynomial = Polynomial({0: 1}, self.pronumeral)
for _ in range(other.terms[0]):
new_polynomial *= self
return new_polynomial
reverse_polish_sequence = input('RPN: ').split()
memory = []
for item in reverse_polish_sequence:
# Perhaps it is an integer...
try:
int(item)
except ValueError:
pass
else:
# Assume x is the pronumeral for now...
memory.append(Polynomial({0:int(item)}, 'x'))
continue
# Or perhaps it is a operator...
if item in OPERATORS:
second_term = memory.pop(-1)
first_term = memory.pop(-1)
if item == ADDITION_SYMBOL:
memory.append(first_term + second_term)
elif item == SUBTRACTION_SYMBOL:
memory.append(first_term - second_term)
elif item == MULTIPLICATION_SYMBOL:
memory.append(first_term * second_term)
elif item == EXPONENTIATION_SYMBOL:
memory.append(first_term ** second_term)
continue
# Failing all that, it must be a pronumeral.
memory.append(Polynomial({1:1}, item))
for polynomial in memory:
# Update all the pronumerals set previously...
polynomial.pronumeral = item
print(memory[0])
| thewrongjames/ncss-challenge-2017 | expand_this.py | expand_this.py | py | 4,262 | python | en | code | 0 | github-code | 36 |
25597505663 | # Basic packages
import pandas as pd
import numpy as np
import re
import collections
# import matplotlib.pyplot as plt
from pathlib import Path
# Packages for data preparation
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import LabelEncoder
# Packages for modeling
from keras import models
from keras import layers
from keras import regularizers
NB_WORDS = 10000 # Parameter indicating the number of words we'll put in the dictionary
VAL_SIZE = 1000 # Size of the validation set
NB_START_EPOCHS = 20 # Number of epochs we usually start to train with
BATCH_SIZE = 512 # Size of the batches used in the mini-batch gradient descent
MAX_LEN = 24 # Maximum number of words in a sequence
GLOVE_DIM = 50 # Number of dimensions of the GloVe word embeddings
INPUT_PATH = '../input' # Path where all input files are stored
root = Path('./')
input_path = root / 'input/'
ouput_path = root / 'output/'
source_path = root / 'source/'
def deep_model(model, X_train, y_train, X_valid, y_valid):
'''
Function to train a multi-class model. The number of epochs and
batch_size are set by the constants at the top of the
notebook.
Parameters:
model : model with the chosen architecture
X_train : training features
y_train : training target
X_valid : validation features
Y_valid : validation target
Output:
model training history
'''
model.compile(optimizer='rmsprop'
, loss='categorical_crossentropy'
, metrics=['accuracy'])
model.fit(X_train
, y_train
, epochs=NB_START_EPOCHS
, batch_size=BATCH_SIZE
, validation_data=(X_valid, y_valid)
, verbose=1)
model.save("./output/model/model.h5")
def eval_metric(history, metric_name):
'''
Function to evaluate a trained model on a chosen metric.
Training and validation metric are plotted in a
line chart for each epoch.
Parameters:
history : model training history
metric_name : loss or accuracy
Output:
line chart with epochs of x-axis and metric on
y-axis
'''
metric = history.history[metric_name]
val_metric = history.history['val_' + metric_name]
e = range(1, NB_START_EPOCHS + 1)
plt.plot(e, metric, 'bo', label='Train ' + metric_name)
plt.plot(e, val_metric, 'b', label='Validation ' + metric_name)
plt.legend()
plt.show()
def test_model(model, X_train, y_train, X_test, y_test, epoch_stop):
'''
Function to test the model on new data after training it
on the full training data with the optimal number of epochs.
Parameters:
model : trained model
X_train : training features
y_train : training target
X_test : test features
y_test : test target
epochs : optimal number of epochs
Output:
test accuracy and test loss
'''
model.fit(X_train
, y_train
, epochs=epoch_stop
, batch_size=BATCH_SIZE
, verbose=0)
results = model.evaluate(X_test, y_test)
return results
def remove_stopwords(input_text):
'''
Function to remove English stopwords from a Pandas Series.
Parameters:
input_text : text to clean
Output:
cleaned Pandas Series
'''
stopwords_list = stopwords.words('english')
# Some words which might indicate a certain sentiment are kept via a whitelist
whitelist = ["n't", "not", "no"]
words = input_text.split()
clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]
return " ".join(clean_words)
def remove_mentions(input_text):
'''
Function to remove mentions, preceded by @, in a Pandas Series
Parameters:
input_text : text to clean
Output:
cleaned Pandas Series
'''
return re.sub(r'@\w+', '', input_text)
df = pd.read_csv(input_path / 'train.csv')
df = df.reindex(np.random.permutation(df.index))
df = df[['comment_text', 'toxic']]
df.text = df.comment_text.apply(remove_stopwords).apply(remove_mentions)
X_train, X_test, y_train, y_test = train_test_split(df.comment_text, df.toxic, test_size=0.1, random_state=37)
print('# Train data samples:', X_train.shape[0])
print('# Test data samples:', X_test.shape[0])
assert X_train.shape[0] == y_train.shape[0]
assert X_test.shape[0] == y_test.shape[0]
tk = Tokenizer(num_words=NB_WORDS,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ")
tk.fit_on_texts(X_train)
X_train_seq = tk.texts_to_sequences(X_train)
X_test_seq = tk.texts_to_sequences(X_test)
seq_lengths = X_train.apply(lambda x: len(x.split(' ')))
seq_lengths.describe()
X_train_seq_trunc = pad_sequences(X_train_seq, maxlen=MAX_LEN)
X_test_seq_trunc = pad_sequences(X_test_seq, maxlen=MAX_LEN)
X_train_seq_trunc[10] # Example of padded sequence
le = LabelEncoder()
y_train_le = le.fit_transform(y_train)
y_test_le = le.transform(y_test)
y_train_oh = to_categorical(y_train_le)
y_test_oh = to_categorical(y_test_le)
X_train_emb, X_valid_emb, y_train_emb, y_valid_emb = train_test_split(X_train_seq_trunc, y_train_oh, test_size=0.1, random_state=37)
assert X_valid_emb.shape[0] == y_valid_emb.shape[0]
assert X_train_emb.shape[0] == y_train_emb.shape[0]
print('Shape of validation set:',X_valid_emb.shape)
glove_file = 'glove.twitter.27B.25d.txt'
glove_dir = 'glove/'
emb_dict = {}
glove = open(input_path / glove_dir / glove_file)
for line in glove:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], dtype='float32')
emb_dict[word] = vector
glove.close()
airline_words = ['fuck', 'pussy', 'sad', 'hell']
for w in airline_words:
if w in emb_dict.keys():
print('Found the word {} in the dictionary'.format(w))
GLOVE_DIM = 25
emb_matrix = np.zeros((NB_WORDS, GLOVE_DIM))
for w, i in tk.word_index.items():
# The word_index contains a token for all words of the training data so we need to limit that
if i < NB_WORDS:
vect = emb_dict.get(w)
# Check if the word from the training data occurs in the GloVe word embeddings
# Otherwise the vector is kept with only zeros
if vect is not None:
emb_matrix[i] = vect
else:
break
from keras.layers import LSTM
lstm_out = 20
emb_model2 = models.Sequential()
emb_model2.add(layers.Embedding(NB_WORDS, GLOVE_DIM, input_length=MAX_LEN))
emb_model2.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
emb_model2.add(layers.Dense(2, activation='softmax'))
emb_model2.summary()
emb_history2 = deep_model(emb_model2, X_train_emb, y_train_emb, X_valid_emb, y_valid_emb)
# embmodel.save("./lstm_model/model.h5")
# eval_metric(emb_history2, 'loss')
# eval_metric(emb_history2, 'acc')
emb_results2 = test_model(emb_model2, X_train_seq_trunc, y_train_oh, X_test_seq_trunc, y_test_oh, 3)
print('/n')
print('Test accuracy of word embedding model 2: {0:.2f}%'.format(emb_results2[1]*100))
twt = ["vagina"]
#vectorizing the tweet by the pre-fitted tokenizer instance
twt = tk.texts_to_sequences(twt)
#padding the tweet to have exactly the same shape as `embedding_2` input
twt = pad_sequences(twt, maxlen=24, dtype='int32', value=0)
print(twt)
sentiment = emb_model2.predict(twt,batch_size=1,verbose = 2)[0]
if(np.argmax(sentiment) == 0):
print("positive")
elif (np.argmax(sentiment) == 1):
print("negative")
| ntesh21/profanity-detection | train.py | train.py | py | 7,843 | python | en | code | 0 | github-code | 36 |
70846346343 | import models.data
import models.email_notice
from flask import Flask, request, render_template, redirect, flash, url_for, session, abort
app = Flask(__name__, static_url_path='', root_path='/root/SPM')
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/user_view')
def user_view():
if not session.get('logged_in'):
return redirect(url_for("login"))
try:
data_connector = models.data.data_layer()
entries = data_connector.find_all_order_by_username(session["name"])
info = session["info"]
return render_template('user_view.html', entries=entries, info=info)
except Exception as e:
print(e)
return redirect(url_for("login"))
@app.route('/manage_view')
def manage_view():
if not session.get('logged_in'):
return redirect(url_for("login", title="manager"))
try:
data_connector = models.data.data_layer()
entries = data_connector.find_all_order()
info = session["info"]
return render_template('manage_view.html', entries=entries, info=info)
except Exception as e:
print(e)
return redirect(url_for("login"))
@app.route('/add_order', methods=['POST'])
def add_order():
if not session.get('logged_in'):
abort(401)
order_info = {
"username": session["name"],
"number_box": request.form['number_box'],
"d_address": request.form['d_address'],
"a_address": request.form['a_address'],
"d_date": request.form['d_date'],
"a_date": request.form['a_date'],
"o_message": request.form['o_message']
}
data_connector = models.data.data_layer()
if data_connector.add_new_order(order_info):
flash('New entry was successfully posted!')
else:
flash('Unknown Error!')
return redirect(url_for('user_view'))
@app.route('/update_order', methods=['GET', 'POST'])
def update_order():
if not session.get('logged_in'):
abort(401)
if request.method == 'POST':
order_info = {
"order_number": request.form['order_number'],
"status": request.form['status'],
"d_address": request.form['d_address'],
"a_address": request.form['a_address'],
"d_date": request.form['d_date'],
"a_date": request.form['a_date'],
"p_date": request.form['p_date'],
"h_number": request.form['h_number'],
"o_message": request.form['o_message'],
"os_message": request.form['os_message'],
}
# print(order_info)
data_connector = models.data.data_layer()
if data_connector.update_order_by_order_number(order_info):
flash('This entry was successfully updated!')
order_number = order_info["order_number"]
print("order_number:", order_number)
email_address = data_connector.get_email_by_order_number(order_number)
print("email_address:", email_address)
models.email_notice.send_email(email_address, order_info)
else:
flash('Unknown Error!')
return redirect(url_for('manage_view'))
if request.method == "GET":
order_number = request.args.get('order_number')
data_connector = models.data.data_layer()
entire = data_connector.find_order(order_number)
return render_template('order_modify.html', entire=entire)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
title = request.form['title']
data_connector = models.data.data_layer()
check_result, userinfo = data_connector.login_check(username, password, title)
if not check_result:
error = "Invaild username or password!"
return render_template('login.html', message=error, title=title)
else:
session['logged_in'] = True
session["name"] = userinfo["username"]
session['info'] = userinfo
flash('You were logged in')
if title == "manager":
return redirect(url_for("manage_view"))
elif title == "user":
return redirect(url_for("user_view"))
if request.method == "GET":
title = request.args.get('title')
if not title:
title = "user"
return render_template('login.html', title=title)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('index'))
@app.route("/register", methods=['GET', 'POST'])
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
home_address = request.form['home_address']
phone_number = request.form['phone_number']
email_address = request.form['email_address']
user_dict = {"username": username,
"password": password,
"home_address": home_address,
"phone_number": phone_number,
"email_address": email_address}
try:
data_connector = models.data.data_layer()
if data_connector.register_new_customer(user_dict):
message = "Sign up successful!"
return redirect(url_for("login", message=message))
else:
raise Exception("Database connect error!")
except Exception as e:
print("Exception(Datalayer): ", e)
return render_template('register.html')
else:
return render_template('register.html')
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.run(host='0.0.0.0', debug=True)
| Elfsong/SPM | demo.py | demo.py | py | 5,888 | python | en | code | 1 | github-code | 36 |
3751805748 | from __future__ import print_function, division
from torch.utils.data import Dataset, DataLoader
import scipy.io as scp
from keras.utils import to_categorical
import numpy as np
import torch
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from matplotlib import pyplot as plt
from scipy.stats.stats import pearsonr
from block import fusions
twenty_six_labels = {'Affection': ['loving', 'friendly'], 'Anger': ['anger', 'furious', 'resentful', 'outraged', 'vengeful'],
'Annoyance': ['annoy', 'frustrated', 'irritated', 'agitated', 'bitter', 'insensitive', 'exasperated', 'displeased'],
'Anticipation': ['optimistic', 'hopeful', 'imaginative', 'eager'],
'Aversion': ['disgusted', 'horrified', 'hateful'],
'Confidence': ['confident', 'proud', 'stubborn', 'defiant', 'independent', 'convincing'],
'Disapproval': ['disapproving', 'hostile', 'unfriendly', 'mean', 'disrespectful', 'mocking', 'condescending', 'cunning', 'manipulative', 'nasty', 'deceitful', 'conceited', 'sleazy', 'greedy', 'rebellious', 'petty'],
'Disconnection': ['indifferent', 'bored', 'distracted', 'distant', 'uninterested', 'self-centered', 'lonely', 'cynical', 'restrained', 'unimpressed', 'dismissive'] ,
'Disquietment': ['worried', 'nervous', 'tense', 'anxious','afraid', 'alarmed', 'suspicious', 'uncomfortable', 'hesitant', 'reluctant', 'insecure', 'stressed', 'unsatisfied', 'solemn', 'submissive'] ,
'Doubt/Conf': ['confused', 'skeptical', 'indecisive'] ,
'Embarrassment': ['embarrassed', 'ashamed', 'humiliated'] ,
'Engagement': ['curious', 'serious', 'intrigued', 'persistent', 'interested', 'attentive', 'fascinated'] ,
'Esteem': ['respectful', 'grateful'] ,
'Excitement': ['excited', 'enthusiastic', 'energetic', 'playful', 'impatient', 'panicky', 'impulsive', 'hasty'] ,
'Fatigue': ['tired', 'sleepy', 'drowsy'] ,
'Fear': ['scared', 'fearful', 'timid', 'terrified'] ,
'Happiness': ['cheerful', 'delighted', 'happy', 'amused', 'laughing', 'thrilled', 'smiling', 'pleased', 'overwhelmed', 'ecstatic', 'exuberant'] ,
'Pain': ['pain'] ,
'Peace': ['content', 'relieved', 'relaxed', 'calm', 'quiet', 'satisfied', 'reserved', 'carefree'] ,
'Pleasure': ['funny', 'attracted', 'aroused', 'hedonistic', 'pleasant', 'flattered', 'entertaining', 'mesmerized'] ,
'Sadness': ['sad', 'melancholy', 'upset', 'disappointed', 'discouraged', 'grumpy', 'crying', 'regretful', 'grief-stricken', 'depressed', 'heartbroken', 'remorseful', 'hopeless', 'pensive', 'miserable'] ,
'Sensitivity': ['apologetic', 'nostalgic'] ,
'Suffering': ['offended', 'hurt', 'insulted', 'ignorant', 'disturbed', 'abusive', 'offensive'],
'Surprise': ['surprise', 'surprised', 'shocked', 'amazed', 'startled', 'astonished', 'speechless', 'disbelieving', 'incredulous'],
'Sympathy': ['kind', 'compassionate', 'supportive', 'sympathetic', 'encouraging', 'thoughtful', 'understanding', 'generous', 'concerned', 'dependable', 'caring', 'forgiving', 'reassuring', 'gentle'],
'Yearning': ['jealous', 'determined', 'aggressive', 'desperate', 'focused', 'dedicated', 'diligent'] ,
'None': ['None']}
class MovieGraphDataset(Dataset):
def __init__(self, data):
self.data = data
self.movie_idx = list(self.data.keys()) # ['tt03045', 'tt0840830' ...] etc
self.num_samples = len(list(self.data.keys())) # 51 movies ideally
self.new_data = {}
for movie in self.movie_idx:
num_clips = list(self.data[movie].keys())
self.new_data[movie] = []
self.new_data[movie].append(len(num_clips))
self.new_data[movie].append( np.array([self.data[movie][clip]['face'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['va'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_description'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_situation'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_scene'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_transcript'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['emotions'] for clip in num_clips]) )
for f in range(len(num_clips)):
emot_labels = self.new_data[movie][7][f]
if len(emot_labels) == 0:
emot_labels.append('None')
labels = list(twenty_six_labels.keys())
integer_mapping = {x: i for i, x in enumerate(labels)}
vec = [integer_mapping[word] for word in labels]
encoded = to_categorical(vec)
emot_encoding = []
for emot in emot_labels:
emot_encoding.append(list(encoded[integer_mapping[emot]]))
emot_labels = [sum(x) for x in zip(*emot_encoding)]
self.new_data[movie][7][f] = emot_labels
self.new_data[movie][7] = np.array(list(self.new_data[movie][7]))
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
idx = self.movie_idx[idx]
F = self.new_data[idx][1]
Va = self.new_data[idx][2]
emb_desc = self.new_data[idx][3]
emb_sit = self.new_data[idx][4]
emb_sce = self.new_data[idx][5]
emb_trans = self.new_data[idx][6]
y = self.new_data[idx][7]
combined = np.hstack([F, Va, emb_desc, emb_sit, emb_sce, emb_trans])
F = torch.Tensor(F)
Va = torch.Tensor(Va)
emb_desc = torch.Tensor(emb_desc)
emb_sit = torch.Tensor(emb_sit)
emb_sce = torch.Tensor(emb_sce)
emb_trans = torch.Tensor(emb_trans)
# Instantiate fusion classes
fusion1 = fusions.Block([F.shape[1], Va.shape[1]], emb_desc.shape[1])
fusion2 = fusions.Block([emb_desc.shape[1], emb_desc.shape[1]], F.shape[1] + Va.shape[1] + emb_desc.shape[1])
fusion3 = fusions.Block([emb_sit.shape[1], emb_sce.shape[1]], emb_trans.shape[1])
fusion4 = fusions.Block([emb_trans.shape[1], emb_trans.shape[1]], emb_sit.shape[1] + emb_sce.shape[1] + emb_trans.shape[1])
# compute fusions
temp_output_fusion1 = fusion1([F, Va])
first_three= fusion2([temp_output_fusion1, emb_desc])
temp_output_fusion2 = fusion3([emb_sit, emb_sce])
second_three = fusion4([temp_output_fusion2, emb_trans])
fusion5 = fusions.Block([first_three.shape[1], second_three.shape[1]], first_three.shape[1]+second_three.shape[1])
final_fused = fusion5([first_three, second_three])
return combined, y, F, Va, emb_desc, emb_sit, emb_sce, emb_trans
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if epoch == 100:
lr = lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy_multihots(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
# maxk = max(topk)
# batch_size = target.size(0)
batch_size = 1
_, pred = output.topk(1, 1, True, True)
target_value = torch.gather(target, 1, pred)
# target_inds_one = (target != 0).nonzero()
correct_k = (target_value > 0).float().sum(0, keepdim=False).sum(0, keepdim=True)
correct_k /= target.shape[0]
res = (correct_k.mul_(100.0))
return res
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| affect2mm/emotion-timeseries | emotion-timeseries/MovieGraphs/utils_co_attn.py | utils_co_attn.py | py | 9,455 | python | en | code | 12 | github-code | 36 |
70606654505 | import multiprocessing
import os
import sys
import time
import warnings
from datetime import date
import akshare as ak
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
# 输出显示设置
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from util.DBUtils import sqlalchemyUtil
from util.CommonUtils import get_process_num, get_code_group, get_code_list
# 这接口太脆了 不能开太多进程
def multiprocess_run(code_list, start_date, engine, process_num = 2):
code_group = get_code_group(process_num, code_list)
result_list = []
with multiprocessing.Pool(processes=process_num) as pool:
# 多进程异步计算
for i in range(len(code_group)):
codes = code_group[i]
# 传递给apply_async()的函数如果有参数,需要以元组的形式传递 并在最后一个参数后面加上 , 号,如果没有加, 号,提交到进程池的任务也是不会执行的
result_list.append(pool.apply_async(get_group_data, args=(codes, start_date, i, len(code_group), len(code_list),)))
# 阻止后续任务提交到进程池
pool.close()
# 等待所有进程结束
pool.join()
# delete_sql = '''truncate table ods_financial_analysis_indicator_di;'''
# engine.execute(delete_sql)
for r in result_list:
rl = r.get()
if rl:
# 写入mysql append replace
# 重复主键不插入
engine.execute(
"""
insert ignore into ods_financial_analysis_indicator_di (announcement_date, stock_code, stock_name, ps_business_cash_flow,
return_on_equity, npadnrgal, net_profit_growth_rate)
values (%s, %s, %s, %s, %s, %s, %s);
""", rl
)
else:
print('rl为空')
print('ods_financial_analysis_indicator_di:执行完毕!!!')
def get_group_data(code_list, start_date, i, n, total):
result_list = []
for codes in code_list:
ak_code = codes[0]
ak_name = codes[1]
# print('ods_financial_analysis_indicator_di:{}启动,父进程为{}:第{}组/共{}组,{}个)正在处理{}...'.format(os.getpid(), os.getppid(), i, n, total, ak_name))
df = get_data(ak_code, ak_name,start_date)
if df.empty:
continue
result_list.extend(np.array(df).tolist())
return result_list
def get_data(ak_code, ak_name,start_date):
# time.sleep(1)
for i in range(1):
try:
# print(ak_code, ak_name)
# 新浪财经-财务分析-财务指标
df = ak.stock_financial_analysis_indicator(symbol=ak_code)
if df.empty:
continue
df = df[pd.to_datetime(df['日期']) >= pd.to_datetime(start_date)]
# df = df[pd.to_datetime(df['日期']) >= pd.to_datetime('20210101')]
if ak_code.startswith('6'):
df['stock_code'] = 'sh' + ak_code
elif ak_code.startswith('8') or ak_code.startswith('4') == True:
df['stock_code'] = 'bj' + ak_code
else:
df['stock_code'] = 'sz' + ak_code
df['stock_name'] = ak_name
df.rename(columns={'日期':'announcement_date','每股经营性现金流(元)':'ps_business_cash_flow','净资产收益率(%)':'return_on_equity','扣除非经常性损益后的净利润(元)':'npadnrgal','净利润增长率(%)':'net_profit_growth_rate'}, inplace=True)
df = df[['announcement_date','stock_code','stock_name','ps_business_cash_flow','return_on_equity','npadnrgal','net_profit_growth_rate']]
# MySQL无法处理nan
df = df.replace({np.nan: None})
return df
except Exception as e:
print(e)
return pd.DataFrame
# nohup python ods_financial_analysis_indicator_di.py update 20221010 >> my.log 2>&1 &
# 这个全量很慢 平时不能全量 要取最新日期
if __name__ == '__main__':
code_list = get_code_list()
start_date = date.today().strftime('%Y%m%d')
end_date = start_date
if len(sys.argv) == 1:
print("请携带一个参数 all update 更新要输入开启日期 结束日期 不输入则默认当天")
elif len(sys.argv) == 2:
run_type = sys.argv[1]
if run_type == 'all':
start_date = '20210101'
else:
start_date = date.today().strftime('%Y%m%d')
elif len(sys.argv) == 4:
run_type = sys.argv[1]
start_date = sys.argv[2]
engine = sqlalchemyUtil().engine
start_time = time.time()
multiprocess_run(code_list, start_date, engine)
engine.dispose()
end_time = time.time()
print('程序运行时间:{}s,{}分钟'.format(end_time - start_time, (end_time - start_time) / 60)) | cgyPension/pythonstudy_space | 05_quantitative_trading_mysql/ods/ods_financial_analysis_indicator_di.py | ods_financial_analysis_indicator_di.py | py | 5,216 | python | en | code | 7 | github-code | 36 |
13346852826 | from osgeo import gdalnumeric
from osgeo import osr
from osgeo import gdal
from osgeo.gdal_array import *
from osgeo.gdalconst import *
from PIL import Image
import pylab as P
import os
import numpy as np
from IPython.core.debugger import set_trace
def readData(filename, ndtype=np.float64):
'''
z=readData('/path/to/file')
'''
if os.path.isfile(filename):
return LoadFile(filename).astype(ndtype);
else:
return gdal.Open(filename, gdal.GA_ReadOnly).readAsArray()
def writeTiff(ary, coord, filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64,lon=None, lat=None, nodata=None, grid=False, cog=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', options=[], gcps=None):
'''writeTiff(ary, geoTransform, filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64 ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
rescale: [min max]: If given rescale ary values between min and max.
If lon lat is specified set coord to None
'''
if coord is None and gcps is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
coord=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(coord)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
coord=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(coord)
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
if ary.ndim==2:
Ny, Nx = ary.shape
Nb=1;
#ds = driver.Create(filename, Nx, Ny, 1, gdal.GDT_Float64)
elif ary.ndim==3:
Ny,Nx,Nb = ary.shape #Nb: number of bands. #osgeo.gdal expects, (band, row, col), so this is a deviation from that.
else:
print("Input array has to be 2D or 3D.")
return None
driver = gdal.GetDriverByName("GTiff")
if cog:
options = ["TILED=YES","COMPRESS=LZW","INTERLEAVE=BAND","BIGTIFF=YES"]
ds = driver.Create(filename, Nx, Ny, Nb, gdal.GDT_Float64, options)
srs=osr.SpatialReference()
srs.ImportFromProj4(srs_proj4)
ds.SetProjection(srs.ExportToWkt() );
#ds.SetGeoTransform( ... ) # define GeoTransform tuple
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
if gcps is None:
ds.SetGeoTransform( coord )
else:
if type(gcps[0])== gdal.GCP:
ds.SetGCPs(gcps, srs.ExportToWkt())
elif type(gcps[0])==np.int and len(gcps)==2 and lat is not None:
gcps_list=create_gcp_list(lon,lat,np.zeros(lat.shape), gcp_count=[gcps[0], gcps[1]])
ds.SetGCPs(gcp_list, srs.ExportToWkt())
else:
print('unsupported type of GCPs. Skipping.')
if nodata is not None:
ds.GetRasterBand(1).SetNoDataValue(nodata);
if Nb==1:
ds.GetRasterBand(1).WriteArray(ary)
else:
for b in range(Nb):
ds.GetRasterBand(b+1).WriteArray(ary[:,:,b])
# optimize for COG
if cog:
ds.BuildOverviews("NEAREST", [2, 4, 8, 16, 32, 64, 128, 256])
ds = None
print("File written to: " + filename);
def create_gcp_list(x,y,z,p=None, l=None,gcp_count=[2,2]):
"""create_gcp_list(x,y,z,p=None, l=None, gcp_count=[2,2])
if xyz is in the same shape as image, uses gcp count to select a reasonable amount of gcps.
if xyz is not in the same shape as image, p and l need to be provided to select the correct pixel and line.
"""
gcp_list=[]
if l is None or p is None:
p=np.linspace(0,x.shape[0]-1, gcp_count[0]).astype(int)
l=np.linspace(0,x.shape[1]-1, gcp_count[1]).astype(int)
for pp in p:
for ll in l:
gcp=gdal.GCP(x[pp,ll], y[pp,ll], z[pp,ll], float(pp), float(ll))
gcp_list.append(gcp)
else:
p=p.ravel().astype(float)
l=l.ravel().astype(float)
x=x.ravel()
y=y.ravel()
z=z.ravel()
for k in range(l.size):
gcp=gdal.GCP(x[k], y[k], z[k], p[k], l[k])
gcp_list.append(gcp)
return gcp_list
def writeAny(ary, coord, fileformat="GTiff", filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64,lon=None, lat=None, nodata=None, grid=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''writeAny(ary, geoTransform, format="GTiff", filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64 ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
format: "GTiff"
rescale: [min max]: If given rescale ary values between min and max.
If lon lat is specified set coord to None
'''
if coord is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]#BRANDON EDIT FOR COMPATIBILITY: changed / to // for python 3
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
coord=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(coord)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
coord=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(coord)
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
if ary.ndim ==2:
Ny, Nx = ary.shape
Nb = 1;
elif ary.ndim==3:
Ny,Nx,Nb=ary.shape
else:
print("Input array has to be 2D or 3D.")
return None
driver = gdal.GetDriverByName(fileformat)
ds = driver.Create(filename, Nx, Ny, Nb, gdal.GDT_Float64)
#ds.SetGeoTransform( ... ) # define GeoTransform tuple
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
ds.SetGeoTransform( coord )
srs=osr.SpatialReference()
srs.ImportFromProj4(srs_proj4)
ds.SetProjection(srs.ExportToWkt() );
if nodata is not None:
ds.GetRasterBand(1).SetNoDataValue(0);
if Nb==1:
ds.GetRasterBand(1).WriteArray(ary)
else:
for b in range(Nb):
ds.GetRasterBand(b+1).WriteArray(ary[:,:,b])
ds = None
print("File written to: " + filename);
def writeCSV(ary, filename='gis_file.csv', geotransform=None, rescale=None, format="%f", lon=None, lat=None, nodata=None, grid=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''writeCSV(ary, geoTransform=None, format="GTiff", filename='gis_file.csv', rescale=None, format="%f" ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
format: "GTiff"
rescale: [min max]: If given rescale ary values between min and max.
'''
if geotransform is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
geotransform=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(geotransform)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
geotransform=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(geotransform)
else:
y = np.linspace(1, ary.shape[0], ary.shape[0])
x = np.linspace(1, ary.shape[1], ary.shape[1])
Y,X=np.meshgrid(y,x ,indexing='ij')
lon=geotransform[0]+geotransform[1]*X+Y*geotransform[2]
lat=geotransform[3]+geotransform[4]*X+Y*geotransform[5]
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
Ny, Nx = ary.shape
item_length=Ny*Nx
import csv
lol=[lon.ravel(), lat.ravel(), ary.ravel()]
with open(filename, 'wb') as test_file:
file_writer = csv.writer(test_file)
for i in range(item_length):
file_writer.writerow([x[i] for x in lol])
print("File written to: " + filename);
def readCoord(filename, srs_proj4=None, dtype=np.float64):
'''
lon,lat=lonlat('/path/to/file', srs_proj4=None)
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
# get the existing coordinate system
xn,yn,xN,yN=corners(filename);
ds = gdal.Open(filename)
if srs_proj4 is None:
old_cs=osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjectionRef())
else:
old_cs=osr.SpatialReference()
old_cs.ImportFromProj4(srs_proj4);
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs .ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(old_cs,new_cs)
#get the point to transform, pixel (0,0) in this case
#width = ds.RasterXSize
#height = ds.RasterYSize
#gt = ds.GetGeoTransform()
#minx = gt[0]
#miny = gt[3] + width*gt[4] + height*gt[5]
#get the coordinates in lat long
#latlong = transform.TransformPoint(minx,miny)
lonn,latn,z=transform.TransformPoint(xn,yn)
# print(latn, lon)
#lonN,latn,z=transform.TransformPoint(xN,yn)
lonN,latN,z=transform.TransformPoint(xN,yN)
lat=np.linspace(latn,latN,ds.RasterYSize).astype(dtype);
lon=np.linspace(lonn,lonN,ds.RasterXSize).astype(dtype);
LON,LAT=np.meshgrid(lon,lat);
return LON, np.flipud(LAT);
def corners(filename):
'''
(minx,miny,maxx,maxy)=corners('/path/to/file')
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
ds = gdal.Open(filename)
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
return (minx,miny,maxx,maxy)
def bounding_box(filename):
"""
((lon1,lat1), (lon2,lat2), (lon3,lat3), (lon4,lat4))=bounding_box('/path/to/file')
"""
gT=getGeoTransform(filename)
width, height=get_size(filename)
return (xy2coord(0,0,gT), xy2coord(width,0,gT), xy2coord(width, height,gT), xy2coord(0, height,gT))
def xy2coord(x,y,gT):
'''
lon,lat=xy2coord(x,y,geoTransform)
projects pixel index to position based on geotransform.
'''
coord_x=gT[0] + x*gT[1] + y*gT[2]
coord_y=gT[3] + x*gT[4] + y*gT[5]
return coord_x, coord_y
def coord2xy(x,y,gT):
'''
x,y = coord2xy(lon, lat, geoTransform)
calculates pixel index closest to the lon, lat.
'''
#ref: https://gis.stackexchange.com/questions/221292/retrieve-pixel-value-with-geographic-coordinate-as-input-with-gdal/221430
xOrigin = gT[0]
yOrigin = gT[3]
pixelWidth = gT[1]
pixelHeight = -gT[5]
col = np.array((x - xOrigin) / pixelWidth).astype(int)
row = np.array((yOrigin - y) / pixelHeight).astype(int)
return row,col
def getGeoTransform(filename):
'''
[top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]=getGeoTransform('/path/to/file')
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
ds = gdal.Open(filename)
return ds.GetGeoTransform()
def get_size(filename):
"""(width, height) = get_size(filename)
"""
ds = gdal.Open(filename)
width = ds.RasterXSize
height = ds.RasterYSize
ds=None
return (width, height)
def get_proj4(filename):
ds=gdal.Open(filename)
sr=gdal.osr.SpatialReference()
sr.ImportFromWkt(ds.GetProjectionRef)
return sr.ExportToProj4()
def transformPoint(x,y,z,s_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', t_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''
transformPoint(x,y,z,s_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', t_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
Known Bugs: gdal transform may fail if a proj4 string can not be found for the EPSG or WKT formats.
'''
from .. import base
srs_cs=osr.SpatialReference()
if "EPSG" == s_srs[0:4]:
srs_cs.ImportFromEPSG(int(s_srs.split(':')[1]));
elif "GEOCCS" == s_srs[0:6]:
srs_cs.ImportFromWkt(s_srs);
else:
srs_cs.ImportFromProj4(s_srs);
trs_cs=osr.SpatialReference()
if "EPSG" == t_srs[0:4]:
trs_cs.ImportFromEPSG(int(t_srs.split(':')[1]));
elif "GEOCCS" == t_srs[0:6]:
trs_cs.ImportFromWkt(t_srs);
else:
trs_cs.ImportFromProj4(t_srs);
transform = osr.CoordinateTransformation(srs_cs,trs_cs)
if base.numel(x)>1:
return [ transformPoint(x[k], y[k], z[k]) for k in range(base.numel(x))]
else:
try:
return transform.TransformPoint((x,y,z));
except:
return transform.TransformPoint(x,y,z)
def rsat2_export(filename, export_filename=None, yclip=225):
""" 'export_filename'=rsat2_export(filename, export_filename=None, yclip=225)
"""
ds=gdal.Open(filename, gdal.GA_ReadOnly)
w=ds.RasterXSize
h=ds.RasterYSize
data=10.*np.log10(abs(ds.ReadAsArray(ysize=ds.RasterYSize-yclip)))
gT=ds.GetGeoTransform()
if export_filename is None:
timestr=''.join(ds.GetMetadata()['ACQUISITION_START_TIME'].split(".")[0].split(":"))
export_filename='_'.join(filename.split(":")[0:2])+"_"+timestr+"_cog.tif"
data[data==-np.inf]=np.nan
data[data==np.inf]=np.nan #should not be necessary
writeTiff(data, gT, filename=export_filename, cog=True, gcps=ds.GetGCPs(), nodata=np.nan)
return export_filename
def clip_gT(gT, xmin, xmax, ymin, ymax, method='image'):
'''calculate new geotransform for a clipped raster either using pixels or projected coordinates.
clipped_gT=clip_gT(gT, xmin, xmax, ymin, ymax, method='image')
method: 'image' | 'coord'
'''
if method == 'image':
y,x=xy2coord(ymin, xmin, gT); #top left, reference, coordinate
if method == 'coord':
#find nearest pixel
yi, xi = coord2xy(ymin, xmin, gT)
#get pixel coordinate
y,x=xy2coord(yi, xi, gT)
gTc=list(gT)
gTc[0]=y
gTc[3]=x
return tuple(gTc)
def auto_clip(arr, gT, no_data=np.nan):
"""automatically remova the excess no-data pixels in raster. Similar to auto_clip in GIMP.
cliipped_raster, clipped_gT = auto_clip(raster, geoTransform, no_data=np.nan)
"""
if np.isnan(no_data):
m=~np.isnan(arr)
else:
m= arr!=no_data
data_cols = numpy.where(m.sum(0) > 50)[0]
data_rows = numpy.where(m.sum(1) > 50)[0]
gTc=clip_gT(gT, data_rows[0], data_rows[-1], data_cols[0], data_cols[-1])
arrC=arr[data_rows[0]:data_rows[-1], data_cols[0]:data_cols[-1]]
return arrC, gTc
def translate_gT(gT, x_offset, y_offset):
'''gT_translated=translate_gT(gT, x_offset, y_offset)
simply offsets the starting 0th and 3rd elements of geotransform accordingly.
'''
gTt=list(gT)
gTt[0]=gTt[0]+x_offset
gTt[3]=gTt[3]+y_offset
return tuple(gTt)
def translate_tif(filename, x_offset, y_offset):
arr=readData(filename)
gT=getGeoTransform(filename)
gTt=gsp.connectors.gdal.translate_gT(gT, x_offset, y_offset)
writeTiff(arr, gTt, filename=filename[-4]+'_translated.tif')
return filename[:-4]+'_translated.tif'
def auto_clip_tif(f, no_data=np.nan):
print('Reading {}'.format(f))
arr=readData(f)
gT=getGeoTransform(f)
if np.isnan(no_data):
m=~np.isnan(arr)
else:
m= arr!=no_data
data_cols = numpy.where(m.sum(0) > 50)[0]
data_rows = numpy.where(m.sum(1) > 50)[0]
gTc=clip_gT(gT, data_rows[0], data_rows[-1], data_cols[0], data_cols[-1])
arrC=arr[data_rows[0]:data_rows[-1], data_cols[0]:data_cols[-1]]
writeTiff(arrC, gTc, filename=f[:-4]+'_clipped.tif')
def distance_lat_lon(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - np.cos((lat2-lat1)*p)/2 + np.cos(lat1*p)*np.cos(lat2*p) * (1-np.cos((lon2-lon1)*p)) / 2
return 12742 * np.arcsin(np.sqrt(a))
def closest_lat_lon(lat_vector, lon_vector, lat_point, lon_point):
"""
Find the closest index in a vector.
index = closest_lat_lon(lat_vector, lon_vector, lat_point, lon_point)
"""
return np.argmin(distance_lat_lon(lat_vector, lon_vector,lat_point,lon_point))
def get_point_value(filename, x,y,srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', band=1):
"""
z=get_point_value(filename, x,y,srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
"""
ds=gdal.Open(filename, gdal.GA_ReadOnly)
w=ds.RasterXSize
h=ds.RasterYSize
gT=ds.GetGeoTransform()
t_srs=get_proj4(filename)
rb=ds.GetRasterBand(band)
if t_srs != srs_proj4:
x,y,z=transformPoint(x,y,z,s_srs=srs_proj4, t_srs=t_srs)
cx,cy=coord2xy(x,y,gT)
return rb.ReadAsArray(px,py,1,1)[0]
| bosmanoglu/adore-doris | lib/python/gis.py | gis.py | py | 21,211 | python | en | code | 13 | github-code | 36 |
71521743464 | from konlpy.tag import Okt
# 오픈 소스 한국어 분석기
# 속도는 느리지만, 정규화에 매우 좋음
from collections import Counter
def NLP(text) :
# Okt 형태소 분석기 객체 생성
okt = Okt()
#text = "냉장고 앞에서 최면!"
'''
# 형태소 추출
morphs = okt.morphs(text)
print(morphs)
# 형태소와 품사 태그 추출
pos = okt.pos(text)
print(pos)
'''
# 명사만 추출
nouns = okt.nouns(text)
for i,v in enumerate(nouns):
if len(v)<2:
nouns.pop(i)
count = Counter(nouns)
print(nouns)
# 명사 빈도 카운트
noun_list = count.most_common(100)
for v in noun_list :
print(v)
print("가장 높은 빈도 수의 단어 : ")
print(noun_list[0])
print("두 번째로 높은 빈도 수의 단어 : ")
print(noun_list[1])
print("두 단어를 합치기 : ")
nouns_list= noun_list[0][0]+' '+noun_list[1][0]
print(nouns_list)
'''
# 정규화, 어구 추출
text = "하나 둘 셋 장고!"
print(okt.normalize(text))
print(okt.phrases(text))
'''
return nouns_list #, noun_list[0], noun_list[1]
#text=input()
#NLP(text)
| Junst/KoNLPy-tTV | KoNLPy/KoNLPy_Okt.py | KoNLPy_Okt.py | py | 1,218 | python | ko | code | 0 | github-code | 36 |
18269283758 | N = int(input())
orders = list(map(str, input().split()))
# N = 5
# orders = ["R", "R", "R", "U", "D", "D"]
# 0123 -> R L D U
dx = [1,-1,0,0]
dy = [0,0,1,-1]
idx = 0
def isIn(r, c):
return r>=1 and r<=N and c>=1 and c<=N
class Point:
def __init__(self, r, c):
self.r = r
self.c = c
def move(self, idx):
self.r += dx[idx]
self.c += dy[idx]
travler = Point(1, 1)
for order in orders:
idx = 0
if(order == "L"):
idx = 1
elif(order == "D"):
idx = 2
elif(order == "U"):
idx = 3
if(isIn(travler.r + dx[idx], travler.c + dy[idx])):
travler.move(idx)
print(str(travler.c) +" "+ str(travler.r))
| shinzan7/algostudy | src/이코테/Chapter4 - 구현/ex4-1.py | ex4-1.py | py | 693 | python | en | code | 0 | github-code | 36 |
73274037545 | from django.urls import re_path
from . import views
urlpatterns = [
# Marketing TL
# My tasks
# re_path(r'^$', views.index),
re_path(r'^marketingTL_dashboard$', views.marketingTL_dash, name="marketingTL_dashboard"),
re_path(r'^mytasks$', views.marketingTL_mytasks, name="marketingTL_mytasks"),
re_path(r'^products$', views.marketingTL_products, name="marketingTL_products"),
re_path(r'^product_details$', views.marketingTL_productdet, name="marketingTL_productdet"),
re_path(r'^recruitments$', views.marketingTL_recruitments, name="marketingTL_recruitments"),
re_path(r'^recruitment_details$', views.marketingTL_recdet, name="marketingTL_recdet"),
# Shared tasks
re_path(r'^sharedtasks$', views.marketingTL_sharedtasks, name="marketingTL_sharedtasks"),
re_path(r'^shared_products$', views.marketingTL_sharedproducts, name="marketingTL_sharedproducts"),
re_path(r'^shared_productdetails$', views.marketingTL_Sproductdet, name="marketingTL_Sproductdet"),
re_path(r'^view_productdata', views.marketingTL_productdata, name="marketingTL_productdata"),
re_path(r'^shared_recruitments$', views.marketingTL_sharedrecruitments, name="marketingTL_sharedrecruitments"),
re_path(r'^shared_recruitmentdetails$', views.marketingTL_Srecdet, name="marketingTL_Srecdet"),
re_path(r'^view_recruitmentdata', views.marketingTL_recdata, name="marketingTL_recdata"),
# Reports
re_path(r'^report_issue$', views.marketingTL_reportissue, name="marketingTL_reportissue"),
re_path(r'^reported_issues$', views.marketingTL_reportedissues, name="marketingTL_reportedissues"),
re_path(r'^view_reportedissue$', views.marketingTL_viewissue, name="marketingTL_viewissue"),
# Attendance
re_path(r'^give_attendance$', views.marketingTL_giveattendance, name="marketingTL_giveattendance"),
re_path(r'^view_attendance$', views.marketingTL_viewattendance, name="marketingTL_viewattendance"),
re_path(r'^show_attendance$', views.marketingTL_showattendance, name="marketingTL_showattendance"),
# Data Collector
re_path(r'^dc_dash$', views.dc_dash, name="dc_dash"),
# Tasks
re_path(r'^dc_mytasks$', views.dc_mytasks, name="dc_mytasks"),
re_path(r'^dc_products$', views.dc_products, name="dc_products"),
re_path(r'^dc_productdet$', views.dc_productdet, name="dc_productdet"),
re_path(r'^collect_productdata$', views.collect_productdata, name="collect_productdata"),
re_path(r'^dc_recruitments$', views.dc_recruitments, name="dc_recruitments"),
re_path(r'^dc_recdet$', views.dc_recdet, name="dc_recdet"),
re_path(r'^collect_recdata$', views.collect_recdata, name="collect_recdata"),
# Reports
re_path(r'^dc_reportissue$', views.dc_reportissue, name="dc_reportissue"),
re_path(r'^dc_reportedissues$', views.dc_reportedissues, name="dc_reportedissues"),
re_path(r'^dc_viewissue$', views.dc_viewissue, name="dc_viewissue"),
#Attendance
re_path(r'^dc_viewattendance$', views.dc_viewattendance, name="dc_viewattendance"),
re_path(r'^dc_showattendance$', views.dc_showattendance, name="dc_showattendance"),
# Marketing Executive
re_path(r'^exec_dash$', views.exec_dash, name="exec_dash"),
# Tasks
re_path(r'^exec_mytasks$', views.exec_mytasks, name="exec_mytasks"),
re_path(r'^exec_products$', views.exec_products, name="exec_products"),
re_path(r'^exec_productdet$', views.exec_productdet, name="exec_productdet"),
re_path(r'^exec_productdata$', views.exec_productdata, name="exec_productdata"),
re_path(r'^exec_recruitments$', views.exec_recruitments, name="exec_recruitments"),
re_path(r'^exec_recdet$', views.exec_recdet, name="exec_recdet"),
re_path(r'^exec_recdata$', views.exec_recdata, name="exec_recdata"),
# Reports
re_path(r'^exec_reportissue$', views.exec_reportissue, name="exec_reportissue"),
re_path(r'^exec_reportedissues$', views.exec_reportedissues, name="exec_reportedissues"),
re_path(r'^exec_viewissue$', views.exec_viewissue, name="exec_viewissue"),
#Attendance
re_path(r'^exec_viewattendance$', views.exec_viewattendance, name="exec_viewattendance"),
re_path(r'^exec_showattendance$', views.exec_showattendance, name="exec_showattendance"),
]
| Emil-20/infoxmain | marketingapp/urls.py | urls.py | py | 4,392 | python | en | code | 1 | github-code | 36 |
70955299304 | """add unique constraint to solve for scramble and event results
Revision ID: 5de7c9b4e68c
Revises: 66f166a908a4
Create Date: 2019-10-13 13:11:53.915868
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5de7c9b4e68c'
down_revision = '66f166a908a4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_solves', schema=None) as batch_op:
batch_op.create_unique_constraint('unique_scramble_user_results', ['scramble_id', 'user_event_results_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_solves', schema=None) as batch_op:
batch_op.drop_constraint('unique_scramble_user_results', type_='unique')
# ### end Alembic commands ###
| euphwes/cubers.io | migrations/versions/039_5de7c9b4e68c_add_unique_constraint_to_solve_for_.py | 039_5de7c9b4e68c_add_unique_constraint_to_solve_for_.py | py | 924 | python | en | code | 27 | github-code | 36 |
41911943994 | from django.shortcuts import render
from .forms import *
from django.http import HttpResponse
import requests
def index(request):
#Making try-except block for excluding KeyError
#This is made because in post request may be field which API cannot work with
try:
#accepting POST request
film_title = request.POST.get('message', '')
#when POST has been made, the webpage changes
if request.method == 'POST':
film_title1 = str(film_title)
url = "https://imdb8.p.rapidapi.com/auto-complete"
querystring = {"q": film_title1}
headers = {
"X-RapidAPI-Key": "62008096b2mshfb208128fa454d7p14c074jsne7881457ef9a",
"X-RapidAPI-Host": "imdb8.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
#accepting all information from API
title = response.json()["d"][0]["l"]
image_url = response.json()["d"][0]["i"]["imageUrl"]
year = response.json()["d"][0]["y"]
cast = response.json()["d"][0]["s"]
title1 = response.json()["d"][1]["l"]
image_url1 = response.json()["d"][1]["i"]["imageUrl"]
year1 = response.json()["d"][1]["y"]
cast1 = response.json()["d"][1]["s"]
title2 = response.json()["d"][2]["l"]
image_url2 = response.json()["d"][2]["i"]["imageUrl"]
year2 = response.json()["d"][2]["y"]
cast2 = response.json()["d"][2]["s"]
title = title.replace('\'', '')
cast = cast.replace('\'', '')
year = str(year)
title1 = title1.replace('\'', '')
cast1 = cast1.replace('\'', '')
year1 = str(year1)
title2 = title2.replace('\'', '')
cast2 = cast2.replace('\'', '')
year2 = str(year2)
#the variable which gives info to HTML page
context = {
'title': title,
"image_url": image_url,
"year": year,
"cast": cast,
'title1': title1,
"image_url1": image_url1,
"year1": year1,
"cast1": cast1,
'title2': title2,
"image_url2": image_url2,
"year2": year2,
"cast2": cast2,
#The 2 variables below create the if statement in html page,
#Which allow the webpage to show certain information in certain conditions
'is_post_request': True,
'errors': False
}
return render(request, 'main/index.html', context)
#when POST has not been made. Basically, the first page
return render(request, 'main/index.html', {'is_post_request': False, 'no_errors': False})
except (KeyError, IndexError):
return render(request, 'main/index.html', {'is_post_request': False, 'no_errors': True})
| adilluos/Movie-Searcher | WebProject/taskmanager/main/views.py | views.py | py | 3,026 | python | en | code | 0 | github-code | 36 |
71234511143 | #!/usr/bin/env python
#
# An example on how to read the YAML output from etisnoop
# Pipe etisnoop to this script
#
# License: public domain
import sys
import yaml
for frame in yaml.load_all(sys.stdin):
print("FIGs in frame {}".format(frame['Frame']))
for fib in frame['LIDATA']['FIC']:
if fib['FIGs']:
for fig in fib['FIGs']:
print(" FIG " + fig['FIG'])
| Opendigitalradio/etisnoop | yamlexample.py | yamlexample.py | py | 401 | python | en | code | 8 | github-code | 36 |
23212831424 | #Desenvolva uma calculadora de IMC, o programa deve pedir o peso e a altura ao usuario. calcular o IMC e retronar para o usuario o IMC
# e a categoria em que se encontra
def calculadora_IMC(peso,altura):
calculo_altura = altura * altura
calculo_IMC = peso / calculo_altura
if calculo_IMC < 18.5:
print("abaixo do peso")
elif calculo_IMC < 24.9:
print("peso normal")
elif calculo_IMC < 29.9:
print("sobrepeso")
elif calculo_IMC <30:
print("Obesidade")
return calculo_IMC
peso = float(input("Digite o seu peso em kg: "))
altura = float(input("Digite a sua altura em metros: "))
print(calculadora_IMC(peso,altura))
| EduardoFB321/CalculadoraIMC | CalculadoraIMC.py | CalculadoraIMC.py | py | 696 | python | pt | code | 0 | github-code | 36 |
10507399118 | import argparse
import sys
import json
import pickle
import os
import time
status_colors_hex = {
'200': '#6FB665',
'204': '#4FA29F',
'400': '#D8C726',
'404': '#F06A2A',
'406': '#78CAEF',
'414': '#86F6D2',
'500': '#043E8A',
'502': '#A81E03',
}
def fetch_input(stats_file):
file_resource = file(stats_file, 'r')
data = []
for line in file_resource:
keys = ['count', 'code']; vals = line.strip().split()
data.append(dict(zip(keys, vals)))
file_resource.close()
return data
def unpack_data(rows):
"""
Input:
[('2014-12-10', [{'count': '7', 'code': '200'}, {'count': '3', 'code': '204'}]),
('2014-12-11', [{'count': '9', 'code': '200'}, {'count': '1', 'code': '204'}]),
('2014-12-13', [{'count': '3', 'code': '200'}, {'count': '2', 'code': '204'}])]
Ouput Example:
categories = ['200', '201', '204']
series = [
{"color": "#108ec5", "name": "NewYork", "data": [17.0,22.0,24.8,24.1,20.1,14.1,8.6,2.5]},
{"color": "#52b238", "name": "Berlin", "data": [13.5,17.0,18.6,17.9,14.3,9.0,3.9,1.0]},
{"color": "#ee5728", "name": "London", "data": [11.9,15.2,17.0,16.6,14.2,10.3,6.6,4.8]}
]
"""
categories = []
series = []
status_codes = {}
for date, codes in sorted(rows): # stored data can be appended in any order..
categories.append(date)
for entry in codes:
code = entry['code']
count = int(entry['count'])
if code in status_codes:
status_codes[code].append(count)
else:
status_codes[code] = [count]
for key, value in status_codes.items():
color = status_colors_hex.get(key, '#fff')
serie = {"color": color, "name": "http %s" % key, "data": value}
series.append(serie)
# limit output for graph to last 23 points.
# this geckoboard stupidity..
return {'categories': categories, 'series': series}
def update_graph_data(config, new_record):
"""
Example dataformat that will be passed around. Including the json file on disk.
Input example:
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}])
Will be stored as:
[
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}]),
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}])
]
"""
exists = os.path.isfile(config['history_file'])
with file(config['history_file'], 'r' if exists else 'w') as dump:
schema = {'index': [], 'data': []}
all_entries = pickle.load(dump) if exists else schema
the_date = new_record[0]
if the_date not in all_entries['index'] or config['force_update']:
if the_date in all_entries['index']:
sys.stderr.write('warning: writing duplicate entry\n')
all_entries['data'].append(new_record)
all_entries['index'].append(the_date)
else:
sys.stderr.write('warning: did not append, data found in index\n')
with file(config['history_file'], 'w') as dump:
pickle.dump(all_entries, dump)
return unpack_data(all_entries['data'])
def chart_config(api_key, chart_data):
# https://developer.geckoboard.com/#highcharts-example
highcharts_data = {
"chart": {
"style": {"color": "#b9bbbb"},
"renderTo": "container",
"backgroundColor": "transparent",
"lineColor": "rgba(35,37,38,100)",
"plotShadow": False
},
"credits": {"enabled": False},
"title": {
"style": {"color": "#b9bbbb"},
"text": "Daily HTTP Status Codes"
},
"xAxis": {
"categories": chart_data['categories']
},
"yAxis": {"title": {"style": {"color": "#b9bbbb"}, "text": "HTTP Requests"}},
"legend": {
"itemStyle": {"color": "#b9bbbb"},
"layout": "vertical",
"align": "right",
"verticalAlign": "middle",
"borderWidth":0
},
"series": chart_data['series']
}
highcharts_js = json.dumps(highcharts_data).replace('"', '\\"')
# http://wiki.bash-hackers.org/syntax/quoting
# - weak quoting with double-quotes: "stuff"
# - strong quoting with single-quotes: 'stuff'
# note: inside a single-qouted string NOTHING(!!!) is interpreted.
return "'{\"api_key\": \"%s\", \"data\": {\"highchart\": \"%s\"}}'" % (api_key, highcharts_js)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='update_graph_data',
description=("Appends given data to graph data in Javascript format."
". Which can be accepted by Highcharts (Geckoboard API)"),
add_help=True)
parser.add_argument('filepath', type=str, help='Path to the stats file. (output from uniq -c)')
parser.add_argument('--history', dest='history_file', type=str, help='Path to the stats file. (output from uniq -c)', required=True)
parser.add_argument('--force-update', action='store_true', help='Force to update the history file, if the date already exists on disk')
parser.add_argument('--api-key', type=str, help='Date of this graph stats in YYYYmmdd', required=True)
parser.add_argument('--date', type=str, help='Date of this graph stats in YYYYmmdd')
args = parser.parse_args()
config = args.__dict__
if config['force_update']:
sys.stderr.write('warning: using --force-update, this will append data and possibly duplicate \n')
sys.stderr.write('warning: press ^C to cancel (program starts in 1 second..)\n')
time.sleep(1)
new_record = (config['date'], fetch_input(args.filepath))
chart_data = update_graph_data(config, new_record)
sys.stdout.write(chart_config(config['api_key'], chart_data))
#test:
#data = [('2014-12-10', [{'count': '7', 'code': '200'}, {'count': '3', 'code': '204'}]),
# ('2014-12-11', [{'count': '9', 'code': '200'}, {'count': '1', 'code': '204'}]),
# ('2014-12-13', [{'count': '3', 'code': '200'}, {'count': '2', 'code': '204'}])]
#unpacked = unpack_data(data)
| stefanooldeman/gecko_http_codes | update_graph_data.py | update_graph_data.py | py | 6,440 | python | en | code | 1 | github-code | 36 |
73895270183 | '''
Author: airscker
Date: 2022-09-21 18:43:31
LastEditors: airscker
LastEditTime: 2023-08-31 12:23:45
Description: NULL
Copyright (C) 2023 by Airscker(Yufeng), All Rights Reserved.
'''
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
import os
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "./README.md").read_text(encoding="utf-8")
version = {}
with open(os.path.join("DeepMuon", "__version__.py")) as f:
exec(f.read(), version)
setup(
name="DeepMuon",
version=version['__version__'],
description="Interdisciplinary Deep Learning Platform",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://airscker.github.io/DeepMuon/",
author="Airscker/Yufeng Wang",
author_email="airscker@gmail.com",
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
],
keywords="Deep Learning, Searching Dark Matter, Direct and Simple",
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
# package_dir={"": "DeepMuon"}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. See
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires=">=3.6, <4",
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/discussions/install-requires-vs-requirements/
install_requires=['click',
'prettytable',
'opencv-python',
'tqdm',
'numpy',
'pandas',
'openpyxl',
'ptflops',
'torchinfo',
'captum',
'monai',
'pynvml',
'psutil',
'GPUtil',
'matplotlib',
'timm',
'SimpleITK',
'scikit-learn',
'scikit-image',
'tensorboard',
'yapf',
'parso',
'rdkit',
'seaborn'
],
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# "dev": ["check-manifest"],
# "test": ["coverage"],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
# package_data={ # Optional
# "sample": ["package_data.dat"],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[("Tutorial", ["Tutorial/*"],'Resources',['Resources/*'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
"console_scripts": [
"Dmuon_train=DeepMuon.train.run:main",
# "Dmuon_infer=DeepMuon.test.inference:run",
# 'Dmuon_ana=DeepMuon.test.analysis:run',
# 'Dmuon_com=DeepMuon.test.compare:run',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
# project_urls={ # Optional
# "Bug Reports": "https://github.com/pypa/sampleproject/issues",
# "Funding": "https://donate.pypi.org",
# "Say Thanks!": "http://saythanks.io/to/example",
# "Source": "https://github.com/pypa/sampleproject/",
# },
)
| Airscker/DeepMuon | setup.py | setup.py | py | 6,250 | python | en | code | 1 | github-code | 36 |
31456331042 | from openerp import models
from openerp.tools.safe_eval import safe_eval as eval
import cStringIO
import re
try:
from elaphe import barcode
except ImportError:
pass
class Report(models.Model):
_inherit = 'report'
def generate_barcode(self, type, value, kw, width=0, height=0):
width = int(width)
height = int(height)
scale = float(kw.get('scale', 2.0))
margin = float(kw.get('barmargin', 0))
extra_opts = {}
barcode_out = cStringIO.StringIO()
if kw.get('extraopts', False):
for opt in kw['extraopts'].split(','):
key = opt.split(':')[0]
values = opt.split(':')[1]
if re.search(r'^(?:[0-9a-fA-F]{3}){1,2}$', values) is None:
values = eval(values)
extra_opts[key] = values
try:
barcode_img = barcode(type, str(value), extra_opts, scale=scale,
margin=margin)
if width and height:
barcode_img = barcode_img.resize((width, height))
barcode_img.save(barcode_out, "png", resolution=100.0)
except (ValueError, AttributeError):
raise ValueError('Cannot convert into barcode.')
return barcode_out.getvalue() | blooparksystems/bp_reportbarcode_elaphe | models/report.py | report.py | py | 1,293 | python | en | code | 0 | github-code | 36 |
38565788435 | __author__ = '''Brent Lambert, David Ray, Jon Thomas, Shane Graber'''
__version__ = '$ Revision 0.0 $'[11:-2]
from plone.app.layout.viewlets.content import DocumentActionsViewlet
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
class BookmarkletsActionsViewlet(DocumentActionsViewlet):
def getSites(self):
""" returns bookmarking sites. """
page = self.aq_parent
page_url = page.absolute_url()
page_title = page.title.replace(' ', '+')
page_descr = page.Description()
available_sites = []
sites = []
props = getToolByName(context, 'portal_properties').bookmarklets_properties
# is faster than
# props = self.context.portal_url.portal_properties.bookmarklets_properties
available_sites = props.available_sites
for x in available_sites:
sites.append(getattr(props, x))
return sites
def getSiteUrl(self, site):
"""replace some parts of the site url with title and other from the
context"""
url = self.context.absolute_url()
encodedTitle = self.context.Title().replace(' ', '+')
pageDescription = self.context.Description()
site = site.replace('URL', url).replace('ENCODED_TITLE', encodedTitle)
site = site.replace('DESCR', pageDescription)
return site
render = ViewPageTemplateFile("bookmarklets_document_actions.pt")
| makinacorpus/collective.plonebookmarklets | collective/plonebookmarklets/browser/viewlets.py | viewlets.py | py | 1,510 | python | en | code | 0 | github-code | 36 |
20391645340 | """
An Armstrong number is an n-digit number that is equal to the sum of the n'th
powers of its digits. Determine if the input numbers are Armstrong numbers.
INPUT SAMPLE:
Your program should accept as its first argument a path to a filename. Each
line in this file has a positive integer. E.g.
6
153
351
OUTPUT SAMPLE:
Print out True/False if the number is an Armstrong number or not. E.g.
True
True
False
"""
from sys import argv
def armstrong_number(number):
"""Compute and return the armstrong number"""
total = 0
num_len = len(number)
for i in range(len(number)):
total += int(number[i]) ** num_len
return total
def main(input_file):
with open(input_file, 'r') as file:
for line in file:
num = line.rstrip()
arm_num = armstrong_number(num)
if (arm_num) == int(num):
print(True)
else:
print(False)
if __name__ == '__main__':
main(argv[1])
| joelstanner/codeeval | python_solutions/ARMSTRONG_NUMBERS/ARMSTRONG_NUMBERS.py | ARMSTRONG_NUMBERS.py | py | 979 | python | en | code | 0 | github-code | 36 |
15596107022 | import os
import traceback
from . import datasets
from . import tasks
from . import util
from logging import getLogger
logger = getLogger('mrs')
class WorkerSetupRequest(object):
"""Request the worker to run the setup function."""
def __init__(self, opts, args, default_dir):
self.id = 'worker_setup'
self.opts = opts
self.args = args
self.default_dir = default_dir
def id(self):
return self.__class__.__name__
class WorkerRemoveRequest(object):
def __init__(self, *args):
(self.directory,) = args
def id(self):
return self.__class__.__name__
class WorkerTaskRequest(object):
"""Request the to worker to run a task."""
def __init__(self, *args):
_, _, self.dataset_id, self.task_index, _, _, _, _, _ = args
self.args = args
def id(self):
return '%s_%s_%s' % (self.__class__.__name__, self.dataset_id,
self.task_index)
class WorkerQuitRequest(object):
"""Request the worker to quit."""
class WorkerFailure(object):
"""Failure response from worker."""
def __init__(self, dataset_id, task_index, exception, traceback,
request_id):
self.dataset_id = dataset_id
self.task_index = task_index
self.exception = exception
self.traceback = traceback
self.request_id = request_id
class WorkerSetupSuccess(object):
"""Successful worker setup."""
class WorkerSuccess(object):
"""Successful response from worker."""
def __init__(self, dataset_id, task_index, outdir, outurls, request_id):
self.dataset_id = dataset_id
self.task_index = task_index
self.outdir = outdir
self.outurls = outurls
self.request_id = request_id
class Worker(object):
"""Execute map tasks and reduce tasks.
The worker waits for other threads to make assignments by calling
start_map and start_reduce.
This needs to run in a daemon thread rather than in the main thread so
that it can be killed by other threads.
"""
def __init__(self, program_class, request_pipe):
self.program_class = program_class
self.request_pipe = request_pipe
self.default_dir = None
self.program = None
self.opts = None
self.args = None
def run(self):
while self.run_once():
pass
def run_once(self):
"""Runs one iteration of the event loop.
Returns True if it should keep running.
"""
request = None
response = None
try:
request = self.request_pipe.recv()
if isinstance(request, WorkerSetupRequest):
assert self.program is None
self.opts = request.opts
self.args = request.args
logger.debug('Starting to run the user setup function.')
util.log_ram_usage()
self.program = self.program_class(self.opts, self.args)
self.default_dir = request.default_dir
response = WorkerSetupSuccess()
elif isinstance(request, WorkerQuitRequest):
return False
elif isinstance(request, WorkerRemoveRequest):
util.remove_recursive(request.directory)
else:
assert self.program is not None
logger.info('Running task: %s, %s' %
(request.dataset_id, request.task_index))
util.log_ram_usage()
max_sort_size = getattr(self.opts, 'mrs__max_sort_size', None)
t = tasks.Task.from_args(*request.args, program=self.program)
t.run(self.program, self.default_dir,
max_sort_size=max_sort_size)
response = WorkerSuccess(request.dataset_id,
request.task_index, t.outdir, t.outurls(),
request.id())
logger.info('Completed task: %s, %s' %
(request.dataset_id, request.task_index))
util.log_ram_usage()
except KeyboardInterrupt:
return
except Exception as e:
logger.info('Failed task: %s, %s' %
(request.dataset_id, request.task_index))
request_id = request.id() if request else None
tb = traceback.format_exc()
response = WorkerFailure(request.dataset_id, request.task_index,
e, tb, request_id)
if response:
self.request_pipe.send(response)
return True
def profiled_run(self):
#TODO: detect the node number for other systems (e.g., pbs)
nodenum = os.getenv('PSSH_NODENUM')
if nodenum:
filename = 'mrs-worker-%s.prof' % nodenum
else:
filename = 'mrs-worker.prof'
util.profile_loop(self.run_once, (), {}, filename)
class WorkerManager(object):
"""Mixin class that provides methods for dealing with Workers.
Assumes that a worker_pipe attribute is defined and that read_worker_pipe
is called when data is available. Also assumes that a current_task
attribute is available.
"""
def worker_setup(self, opts, args, default_dir):
request = WorkerSetupRequest(opts, args, default_dir)
self.worker_pipe.send(request)
response = self.worker_pipe.recv()
if isinstance(response, WorkerSetupSuccess):
return True
if isinstance(response, WorkerFailure):
msg = 'Exception in Worker Setup: %s' % response.exception
logger.critical(msg)
msg = 'Traceback: %s' % response.traceback
logger.error(msg)
return False
else:
raise RuntimeError('Invalid message type.')
def read_worker_pipe(self):
"""Reads a single response from the worker pipe."""
r = self.worker_pipe.recv()
if not (isinstance(r, WorkerSuccess) or isinstance(r, WorkerFailure)):
assert False, 'Unexpected response type'
assert self.current_task == (r.dataset_id, r.task_index)
self.current_task = None
if isinstance(r, WorkerSuccess):
self.worker_success(r)
elif isinstance(r, WorkerFailure):
msg = 'Exception in Worker: %s' % r.exception
logger.critical(msg)
msg = 'Traceback: %s' % r.traceback
logger.error(msg)
self.worker_failure(r)
def submit_request(self, request):
"""Submit the given request to the worker.
If one_at_a_time is specified, then no other one_at_time requests can
be accepted until the current task finishes. Returns a boolean
indicating whether the request was accepted.
Called from the RPC thread.
"""
if isinstance(request, WorkerTaskRequest):
if self.current_task is not None:
return False
self.current_task = (request.dataset_id, request.task_index)
self.worker_pipe.send(request)
return True
def worker_success(self, response):
"""Called when a worker sends a WorkerSuccess for the given task."""
raise NotImplementedError
def worker_failure(self, response):
"""Called when a worker sends a WorkerFailure for the given task."""
raise NotImplementedError
# vim: et sw=4 sts=4
| byu-aml-lab/mrs-mapreduce | mrs/worker.py | worker.py | py | 7,442 | python | en | code | 3 | github-code | 36 |
71809107943 | # Import files
from ScrapingProducts.AmazonRateLimiterException import AmazonRateLimiterException
from Utilities.Utils import get_page_source
from Utilities.MetadataUtils import *
# Import libraries
from bs4 import BeautifulSoup
import logging as logger
import time
AMAZON_ERROR = "Sorry! Something went wrong on our end. Please go back and try again or go to Amazon's home page."
def crawl_item(curr_url, retry=0):
try:
page = get_page_source(curr_url)
if AMAZON_ERROR in page:
raise AmazonRateLimiterException
soup1 = BeautifulSoup(page, "html.parser")
soup2 = BeautifulSoup(soup1.prettify(), "html.parser")
BSR = get_best_sellers_rank(page)
top_category = get_top_category(soup2)
#bsr_category = category
bottom_category = get_bottom_category(soup2)
category = ""
if top_category != "NA":
category = top_category
else:
category = bottom_category
product = {
'name': get_name(soup2),
'topCategory': top_category,
'bottomCategory': bottom_category,
'price': get_price(soup2),
'ASIN': get_asin(curr_url),
'reviews': get_reviews(soup2),
'rating': get_rating(soup2),
'search': get_search(category, curr_url),
'url': curr_url,
'BSR': BSR
}
return product
except AmazonRateLimiterException as a:
print("Amazon is probably blocking us. Will sleep for 1800 seconds and retry")
time.sleep(1800)
if retry < 3:
crawl_item(curr_url, retry + 1)
except Exception as e:
logger.error("Error occurred: " + str(e))
logger.error("URL:" + str(curr_url))
return None
| Yogesh19921/Scrapper | CollectingProducts/Crawl.py | Crawl.py | py | 1,798 | python | en | code | 0 | github-code | 36 |
1991798018 | from .card import APDUError
from .iso import IsoMixin
PIV_AID = b"\xA0\x00\x00\x03\x08\x00\x00\x10\x00"
GET_DATA = b"\x00\xCB\x3F\xFF"
PIV_CHUID = b"\x5F\xC1\x02" # Card Holder Unique Identifier
CERTIFICATE_9A = b"\x5F\xC1\x05" # X.509 Certificate for PIV Authentication
CERTIFICATE_9C = b"\x5F\xC1\x0A" # X.509 Certificate for Digital Signature
CERTIFICATE_9D = b"\x5F\xC1\x0B" # X.509 Certificate for Key Management
CERTIFICATE_9E = b"\x5F\xC1\x01" # X.509 Certificate for Card Authentication
def unwrap_tlv(data):
tag = data[0]
length = data[1]
if length < 128:
return data[2:2 + length]
elif length == 128:
raise ValueError
elif length == 255:
raise ValueError
else:
length_length = length & 127
length = int.from_bytes(data[2:2 + length_length], "big")
return data[2 + length_length:2 + length_length + length]
class PivMixin(IsoMixin):
def piv_select(self):
self.iso_select_df(PIV_AID)
def piv_get_data(self, data_field):
response, sw1, sw2 = self.apdu(0x00, 0xCB, 0x3F, 0xFF, data_field)
return response
def piv_get_certificate(self, slot=0x9E):
"""Retrieve a certificate from the card and return in DER format."""
if slot == 0x9A:
data_object_identifier = CERTIFICATE_9A
elif slot == 0x9C:
data_object_identifier = CERTIFICATE_9C
elif slot == 0x9D:
data_object_identifier = CERTIFICATE_9D
elif slot == 0x9E:
data_object_identifier = CERTIFICATE_9E
else:
raise ValueError("Unknown slot 0x{:02X}".format(slot))
try:
return unwrap_tlv(unwrap_tlv(self.piv_get_data(b"\x5C\x03" + data_object_identifier)))
except APDUError as e:
if e.sw1 == 0x6A and e.sw2 == 0x80:
# Incorrect parameters in the command data field
return None
if e.sw1 == 0x6A and e.sw2 == 0x82:
# File or application not found
return None
raise e
def piv_general_authenticate(self, algo, slot, data):
response, sw1, sw2 = self.apdu(0x00, 0x87, algo, slot, data)
return response
def piv_sign(self, nonce, algo=0x14, slot=0x9E):
"""Sign a message on the card and return the signature in DER format."""
if algo not in [0x11, 0x14]:
# 0x07 RSA 2048
# 0x11 ECC P-256
# 0x14 ECC P-384
raise NotImplementedError
if algo == 0x11:
if len(nonce) > 32:
raise ValueError("nonce must be <= 32 bytes for P-256")
if algo == 0x14:
if len(nonce) > 48:
raise ValueError("nonce must be <= 48 bytes for P-384")
challenge = [0x82, 0x00, 0x81, len(nonce)] + list(nonce)
dynamic_auth_template = [0x7C, len(challenge)] + challenge
response = self.piv_general_authenticate(
algo, slot, bytes(dynamic_auth_template)
)
return unwrap_tlv(unwrap_tlv(response))
| timhawes/timhawes_circuitpython_nfc | timhawes_nfc/piv.py | piv.py | py | 3,073 | python | en | code | 0 | github-code | 36 |
36060739387 | from lxml import html
import requests
# Define parsing function
def parse(score):
return float(score[2:score.index('-')])
def scrape(league_id):
# Store scores in list
league_scores = []
# Loop through each team
for team_id in range(1, 13):
# Make request
page = requests.get('http://games.espn.go.com/ffl/schedule?leagueId=' + league_id + '&teamId=' + str(team_id))
tree = html.fromstring(page.text)
# Get team name
team_name = tree.xpath('//h1/text()')
team_name = [name for name in team_name if name != '\n'][0].replace(' Schedule', '')
# Get weekly scores
if team_name != 'Mass Text Appeal III':
weekly_scores = tree.xpath('//nobr//a[@href]/text()')
weekly_scores = [score for score in weekly_scores if score != 'Box' and (score[0] == 'W' or score[0] =='L')]
weekly_scores = list(map(parse, weekly_scores))
# Store in league_scores list
league_scores.append({
'name': team_name,
'scores': weekly_scores
})
return league_scores | JonathanWarrick/data-viz-web-crawler | web_scraper.py | web_scraper.py | py | 987 | python | en | code | 0 | github-code | 36 |
43251321825 | import pandas as pd
import numpy as np
from tkinter.filedialog import askopenfilenames
def npzToFormat(NPZfiles = ''):
#Prompt for file names if none provided
if not NPZfiles:
NPZfiles = askopenfilenames(title = "Select NPZ Files",filetypes = (("NPZ Files","*.npz"),("all files","*.*")))
for j in range(0,len(NPZfiles)):
data = np.load(NPZfiles[j])
#CF Data
bins = data['bins']
CF = data['CF']
#Count rates
CR = data['countRates']
countRateCh1 = CR[0]
countRateCh2 = CR[1]
#PCHs
PCH = data['PCH']
PCH1 = PCH[0]
PCH2 = PCH[1]
#Account for single channel collection
if not np.isnan(PCH2[0][0]):
PCH2[0] = np.zeros(len(PCH1[0]))
times = PCH1[1][:-1]
elif not np.isnan(PCH1[0][0]):
PCH1[0] = np.zeros(len(PCH2[0]))
times = PCH2[1][:-1]
#Create Data Frames
df1 = pd.DataFrame({'Bins [us]':bins,'CF':CF})
df2 = pd.DataFrame({'Ch1 Count Rate (kHz)':[countRateCh1],'Ch2 Count Rate (kHz)':[countRateCh2]})
df3 = pd.DataFrame({'Times':times,'PCH Ch1':PCH1[0],'PCH Ch2':PCH2[0]})
#Write to Excel
outFile = NPZfiles[j][:-4]+ '.xls' #Output File Name
writer = pd.ExcelWriter(outFile)
df1.to_excel(writer,sheet_name = 'Correlation')
df2.to_excel(writer,sheet_name = 'Count Rates')
df3.to_excel(writer,sheet_name = 'PCH')
writer.save()
if __name__ == '__main__':
testFile = [r'C:\Users\mfarrar.MESSIAH\Documents\FCS_Measurements\20181218\NewThorNSLaser_TetraSpeck_1to100_5min_0.95mW_Trial_1Ch1ACF.npz']
npzToFormat(testFile)
| farrarmj/FalCorr | fileFormatter.py | fileFormatter.py | py | 1,941 | python | en | code | 1 | github-code | 36 |
74105530982 | #!/usr/bin/env python3
"""Gleitzsch core."""
import argparse
import sys
import os
import random
import string
import subprocess
from subprocess import DEVNULL
# from subprocess import PIPE
from array import array
import numpy as np
from skimage import io
from skimage import img_as_float
from skimage.util import img_as_ubyte
from skimage import transform as tf
from skimage import exposure
from skimage import util
from skimage import color
from skimage import filters
try:
from pydub import AudioSegment
except ImportError:
sys.stderr.write("Warning! Could not import pydub.\n")
sys.stderr.write("This library is not mandatory, however\n")
sys.stderr.write("filters on sound data would be not available\n")
AudioSegment = None
__author__ = "Bogdan Kirilenko, 2020"
__version__ = 4.0
# text constants
RB_SHIFT = "rb_shift"
GLITTER = "glitter"
GAMMA_CORRECTION = "gamma_correction"
ADD_TEXT = "add_text"
VERT_STREAKS = "vert_streaks"
ADD_NOISE = "add_noise"
SOUND_QUALITY = "sound_quality"
BITRATE = "bitrate"
INTENSIFY = "intensify"
GLITCH_SOUND = "glitch_sound"
ADD_RAINBOW = "add_rainbow"
SHIFT_SIZE = "shift_size"
STRETCHING = "stretching"
RANDOM = "random"
TEMP = "temp"
MIN_IM_SIZE = 64
class Gleitzsch:
"""Gleitzsch core class."""
def __init__(self, image_in, size=0, verbose=False):
"""Init gleitzsch class."""
# get image array
self.verbose = verbose
self.im_arr, _ = self.__read_im(image_in, size)
self.lame_bin = "lame"
self.__check_lame() # check that lame is available
self.supported_filters = [GLITTER, RB_SHIFT,
VERT_STREAKS, ADD_TEXT,
ADD_RAINBOW, STRETCHING]
# create temp directory
self.tmp_dir = os.path.join(os.path.dirname(__file__), TEMP)
os.mkdir(self.tmp_dir) if not os.path.isdir(self.tmp_dir) else None
self.temp_files = [] # collect temp files here (to delete later)
self.gamma = 0.4
self.text_position = RANDOM
self.v("Gleitzsch instance initiated successfully")
def __read_im(self, image_in, size):
"""Read image, return an array and shape."""
if isinstance(image_in, str) and os.path.isfile(image_in):
self.v(f"Reading file {image_in}")
matrix = img_as_float(io.imread(image_in))
elif isinstance(image_in, np.ndarray):
self.v("Reading np array")
matrix = img_as_float(image_in)
else:
matrix = None
self.__die(f"Cannot read:\n{image_in}")
# image might be either 3D or 2D; if 2D -> make it 3D
if len(matrix.shape) == 3:
pass # it's a 3D array already
elif len(matrix.shape) == 2:
# monochrome image; all procedures are 3D array-oriented
layer = np.reshape(matrix, (matrix.shape[0], matrix.shape[1], 1))
matrix = np.concatenate((layer, layer, layer), axis=2)
else: # something is wrong
self.__die("Image is corrupted")
# resize if this required
if size == 0:
# keep size as is (not recommended)
im = matrix.copy()
w, h, _ = im.shape
elif size < MIN_IM_SIZE: # what if size is negative?
im, w, h = None, 0, 0
self.__die("Image size (long side) must be > 64, got {size}")
else:
# resize the image
scale_k = max(matrix.shape[0], matrix.shape[1]) / size
h = int(matrix.shape[0] / scale_k)
w = int(matrix.shape[1] / scale_k)
im = tf.resize(image=matrix, output_shape=(h, w))
self.v(f"Successfully read image; shape: {w}x{h}")
return im, (w, h)
def __check_lame(self):
"""Check that lame is installed."""
check_cmd = f"{self.lame_bin} --version"
rc = subprocess.call(check_cmd, shell=True, stdout=DEVNULL)
if rc == 0:
self.v("Lame installation detected")
else:
self.__die("Lame installation not found, abort")
def apply_filters(self, filters_all):
"""Apply filters to image one-by-one.
filters_all -> a dict with filter_id: parameter.
"""
self.v(f"Calling apply filters function")
self.text_position = RANDOM
if not filters_all: # no filters: nothing to do
return
# keep available filters only + that have value
filters_ = {k: v for k, v in filters_all.items()
if k in self.supported_filters and v}
# better to keep them ordered
filters_order = sorted(filters_.keys(), key=lambda x: self.supported_filters.index(x))
for filt_id in filters_order:
value = filters_[filt_id]
self.v(f"Applying filter: {filt_id}, value={value}")
if filt_id == RB_SHIFT:
self.__apply_rb_shift(value)
elif filt_id == GLITTER:
self.__apply_glitter(value)
elif filt_id == VERT_STREAKS:
self.__add_vert_streaks()
elif filt_id == ADD_RAINBOW:
self.__add_rainbow()
elif filt_id == STRETCHING:
self.__apply_stretching()
def __apply_stretching(self):
"""Apply stretching filter."""
h = self.im_arr.shape[0]
w = self.im_arr.shape[1]
# split in 10 parts -> redefine this later
strips_num = 10
max_kt = 5
strip_w = w // strips_num
strips = []
scales = [x for x in max_kt * np.random.sample(strips_num)]
for j in range(strips_num):
strip = self.im_arr[:, j * strip_w: (j + 1) * strip_w, :]
new_shape = (h, int(strip_w * scales[j]))
strip_res = tf.resize(strip, new_shape)
strips.append(strip_res)
concatenation = np.concatenate(strips, axis=1)
self.im_arr = tf.resize(concatenation, (h, w))
def __make_bw_(self, thr):
"""Make BW image version."""
self.v("Producing BW version")
col_sum = np.sum(self.im_arr, axis=2) # sum over col channel
bw_im = np.zeros((col_sum.shape[0], col_sum.shape[1]))
# fill zero arr with 1 where color sum is > threshold: white
bw_im[col_sum > thr] = 1
# at prev step we created 2D arr, need 3D
bw_im = np.reshape(bw_im, (bw_im.shape[0], bw_im.shape[1], 1))
# io.imsave("test.jpg", bw_im)
return np.concatenate((bw_im, bw_im, bw_im), axis=2)
@staticmethod
def __rm_bright_zones(img):
"""Remove bright zones."""
col_sum = np.sum(img, axis=2)
over_thr = np.reshape((col_sum - 2), (img.shape[0], img.shape[1], 1))
over_thr = np.concatenate((over_thr, over_thr, over_thr), axis=2)
new_im = np.where(over_thr > 0, img - over_thr, img)
new_im[new_im < 0.0] = 0.0
new_im[new_im > 1.0] = 1.0
return new_im
def __rainbow_layer_(self, bw_im):
"""Create a rainbow layer."""
self.v("Making rainbow layer.")
rainbow_arr = self.__apply_rb_shift(80, non_self_pic=bw_im)
rainbow_arr = filters.gaussian(rainbow_arr,
sigma=30,
multichannel=True,
mode='reflect',
cval=0.6)
img_hsv = color.rgb2hsv(rainbow_arr)
img_hsv[..., 1] *= 3 # S
img_hsv[..., 2] *= 1.4 # V
img_hsv[img_hsv >= 1.0] = 1.0
rainbow_arr = color.hsv2rgb(img_hsv)
rainbow_arr = self.__rm_bright_zones(rainbow_arr)
# io.imsave("test.jpg", rainbow_arr)
return rainbow_arr
def __add_rainbow(self):
"""Add rainbow to the image."""
self.v("Adding a rainbow")
# detect bright parts
bw_version = self.__make_bw_(thr=2.1)
rainbow_pic = self.__rainbow_layer_(bw_version)
rainbow_pic /= 2
self.im_arr = rainbow_pic + self.im_arr
self.im_arr[self.im_arr > 1.0] = 1.0
def __add_vert_streaks(self):
"""Add vertical streaks."""
w, h, d = self.im_arr.shape
processed = []
streaks_borders_num = random.choice(range(8, 16, 2))
streaks_borders = [0] + list(sorted(np.random.choice(range(h),
streaks_borders_num,
replace=False))) + [h]
for num, border in enumerate(streaks_borders[1:]):
prev_border = streaks_borders[num]
pic_piece = self.im_arr[:, prev_border: border, :]
if num % 2 != 0: # don't touch this part
processed.append(pic_piece)
continue
piece_h, piece_w, _ = pic_piece.shape
piece_rearranged = []
shifts_raw = sorted([x if x > 0 else -x for x in
map(int, np.random.normal(5, 10, piece_w))])
shifts_add = np.random.choice(range(-5, 2), piece_w)
shifts_mod = [shifts_raw[x] + shifts_add[x] for x in range(piece_w)]
shifts_left = [shifts_mod[x] for x in range(0, piece_w, 2)]
shifts_right = sorted([shifts_mod[x] for x in range(1, piece_w, 2)],
reverse=True)
shifts = shifts_left + shifts_right
for col_num, col_ind in enumerate(range(piece_w)):
col = pic_piece[:, col_ind: col_ind + 1, :]
col = np.roll(col, axis=0, shift=shifts[col_num])
piece_rearranged.append(col)
piece_shifted = np.concatenate(piece_rearranged, axis=1)
processed.append(piece_shifted)
# merge shifted elements back
self.im_arr = np.concatenate(processed, axis=1)
self.im_arr = tf.resize(self.im_arr, (w, h))
def __apply_glitter(self, value):
"""Apply glitter."""
dots = [] # fill this list with dot coordinates
_dot_size = 3
w, h, _ = self.im_arr.shape
for _ in range(value):
# just randomly select some coordinates
dx = random.choice(range(_dot_size, w - _dot_size))
dy = random.choice(range(_dot_size, h - _dot_size))
dots.append((dx, dy))
for dot in dots:
self.im_arr[dot[0] - 1: dot[0], dot[1] - 3: dot[1] + 3, :] = 1
def shift_hue(self, value):
"""Shift image hue in HSV."""
img_hsv = color.rgb2hsv(self.im_arr)
img_hsv[..., 0] += value
img_hsv[..., 0] -= 1
self.im_arr = color.hsv2rgb(img_hsv)
def __apply_rb_shift(self, value, non_self_pic=None):
"""Draw chromatic aberrations."""
if non_self_pic is None:
_init_shape = self.im_arr.shape
proc_pic = self.im_arr
else: # apply this filter to something else:
self.v("Applying RGB shift to non-self.im_arr picture!")
_init_shape = non_self_pic.shape
proc_pic = non_self_pic
# extract different channels
red = proc_pic[:, :, 0]
green = proc_pic[:, :, 1]
blue = proc_pic[:, :, 2]
# resize different channels to create the effect
# define new sizes
red_x, red_y = _init_shape[0], _init_shape[1]
self.v(f"Red channel size: {red_x}x{red_y}")
green_x, green_y = _init_shape[0] - value, _init_shape[1] - value
self.v(f"Green channel size: {green_x}x{green_y}")
blue_x, blue_y = _init_shape[0] - 2 * value, _init_shape[1] - 2 * value
self.v(f"Blue channel size: {blue_x}x{blue_y}")
# check that sizes are OK
channel_borders = (red_x, red_y, green_x, green_y, blue_x, blue_y)
if any(x < 1 for x in channel_borders):
self.__die(f"{RB_SHIFT} got too bit value {value}; cannot apply")
# apply resize procedure
red = tf.resize(red, output_shape=(red_x, red_y))
green = tf.resize(green, output_shape=(green_x, green_y))
blue = tf.resize(blue, output_shape=(blue_x, blue_y))
w, h = blue.shape # temporary shape (minimal channel size)
self.v(f"Updated image size: {w}x{h}")
ktd2 = int(value / 2)
red_n = np.reshape(red[value: -value, value: -value],
(w, h, 1))
green_n = np.reshape(green[ktd2: -1 * ktd2, ktd2: -1 * ktd2],
(w, h, 1))
blue_n = np.reshape(blue[:, :], (w, h, 1))
# save changes to self.im_arr
if non_self_pic is None:
self.im_arr = np.concatenate((red_n, green_n, blue_n), axis=2)
# reshape it back
self.im_arr = tf.resize(self.im_arr, (_init_shape[0], _init_shape[1]))
self.v(f"Successfully applied {RB_SHIFT} filter")
return None
else:
# return image (not self.im_arr)
upd_img = np.concatenate((red_n, green_n, blue_n), axis=2)
upd_img = tf.resize(upd_img, (_init_shape[0], _init_shape[1]))
self.v("Applied RGB shift to non-self.im_arr image")
return upd_img
def __parse_mp3_attrs(self, attrs):
"""Parse mp3-related options."""
self.v("Defining mp3-compression parameters")
attrs_dict = {ADD_NOISE: False,
SOUND_QUALITY: 8,
BITRATE: 16,
INTENSIFY: False,
GLITCH_SOUND: False,
SHIFT_SIZE: int(self.im_arr.shape[1] / 2.35)
}
# correct shift size if bitrate is pretty high
if attrs_dict[BITRATE] >= 64:
attrs_dict[SHIFT_SIZE] = 0
avail_keys = set(attrs_dict.keys())
# re-define default params
for k, v in attrs.items():
if k not in avail_keys:
continue
if v is None:
continue
self.v(f"Set param {k} to {v}")
attrs_dict[k] = v
# sanity checks
if attrs_dict[SOUND_QUALITY] < 1 or attrs_dict[SOUND_QUALITY] > 10:
self.__die(f"Sound quality must be in [1..10]")
return attrs_dict
def __add_noise(self):
"""Add noise to image (intensifies the effect)."""
self.im_arr = util.random_noise(self.im_arr, mode="speckle")
def mp3_compression(self, attrs):
"""Compress and decompress the image using mp3 algorithm.
attrs -> a dictionary with additional parameters.
"""
# split image in channels
orig_image_ = self.im_arr.copy()
self.v("Applying mp3 compression")
mp3_attrs = self.__parse_mp3_attrs(attrs)
self.__add_noise() if mp3_attrs[ADD_NOISE] else None
# apply gamma correction upfront
self.im_arr = exposure.adjust_gamma(image=self.im_arr, gain=self.gamma)
w, h, _ = self.im_arr.shape
# after the mp3 compression the picture shifts, need to compensate that
# however, if bitrate >= 64 it doesn't actually happen
red = self.im_arr[:, :, 0]
green = self.im_arr[:, :, 1]
blue = self.im_arr[:, :, 2]
channels = (red, green, blue)
glitched_channels = []
# process them separately
for num, channel in enumerate(channels, 1):
# need 1D array now
orig_size = w * h
channel_flat = np.reshape(channel, newshape=(orig_size, ))
int_form_nd = np.around(channel_flat * 255, decimals=0)
int_form_nd[int_form_nd > 255] = 255
int_form_nd[int_form_nd < 0] = 0
# convert to bytes
int_form = list(map(int, int_form_nd))
bytes_str = bytes(int_form)
# define temp file paths
raw_chan_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.blob")
mp3_compr_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.mp3")
mp3_decompr_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.mp3")
# save paths (to remove the files later)
self.temp_files.extend([raw_chan_, mp3_compr_, mp3_decompr_])
# save bytes so a pseudo-wav file
self.v(f"Bytes size before compression: {orig_size}")
with open(raw_chan_, "wb") as f:
f.write(bytes_str)
# define compress-decompress commands
mp3_compr_cmd = f'{self.lame_bin} -r --unsigned -s 16 -q {mp3_attrs[SOUND_QUALITY]} ' \
f'--resample 16 --bitwidth 8 -b {mp3_attrs[BITRATE]} ' \
f'-m m {raw_chan_} "{mp3_compr_}"'
mp3_decompr_cmd = f'{self.lame_bin} --decode -x -t "{mp3_compr_}" {mp3_decompr_}'
# call compress-decompress commands
self.__call_proc(mp3_compr_cmd)
# if required: change mp3 stream itself
self.__glitch_sound(mp3_compr_, num, mp3_attrs) if mp3_attrs[GLITCH_SOUND] else None
self.__call_proc(mp3_decompr_cmd)
# read decompressed file | get raw sequence
with open(mp3_decompr_, "rb") as f:
mp3_bytes = f.read()
upd_size = len(mp3_bytes)
self.v(f"Bytes size after compression: {upd_size}")
# usually array size after compression is bigger
proportion = upd_size // orig_size
# split in chunks of proportion size, take the 1st element from each
bytes_num = len(bytes_str) * proportion
decompressed = mp3_bytes[:bytes_num]
glitched_channel = np.array([pair[0] / 255 for pair
in self.parts(decompressed, proportion)])
glitched_channel = np.reshape(glitched_channel, newshape=(w, h, 1))
glitched_channels.append(glitched_channel)
self.v("Concatenation of the mp3d image + rolling + adjust contrast")
self.im_arr = np.concatenate(glitched_channels, axis=2)
self.im_arr = np.roll(a=self.im_arr, axis=1, shift=mp3_attrs[SHIFT_SIZE])
perc_left, perc_right = np.percentile(self.im_arr, (5, 95))
self.im_arr = exposure.rescale_intensity(self.im_arr, in_range=(perc_left, perc_right))
self.__remove_temp_files() # don't need them anymore
self.__intensify(orig_image_) if mp3_attrs[INTENSIFY] else None
def __glitch_sound(self, mp3_path, ch_num, opts):
"""Change mp3 file directly."""
self.v(f"Changing sound stream in {mp3_path} directly")
if AudioSegment is None:
self.__die("__glitch_sound requires Audiosegment (not imported)")
x, y, _ = self.im_arr.shape
# read sound file, get array of bytes (not a list!)
sound = AudioSegment.from_mp3(mp3_path)
last_ind = x * y # sound array is a bit longer than image size
entire_sound_array = np.array(sound.get_array_of_samples())
# some processing here
# final step: convert np array back to array
new_array = array("h", entire_sound_array)
new_sound = sound._spawn(new_array)
new_sound.export(mp3_path, format='mp3')
def __intensify(self, orig_image):
"""Intensify mp3 glitch using differences with original image."""
self.v("Increasing mp3 glitch intensity")
diff = self.im_arr - orig_image
diff[diff < 0] = 0
diff_hsv = color.rgb2hsv(diff)
diff_hsv[..., 1] *= 5
diff_hsv[..., 2] *= 2.5
diff_hsv[diff_hsv >= 1.0] = 1.0
diff = color.hsv2rgb(diff_hsv)
self.im_arr += diff
self.im_arr[self.im_arr > 1.0] = 1.0
def __call_proc(self, command):
"""Call command using subprocess."""
self.v(f"Calling command: {command}")
rc = subprocess.call(command, shell=True, stderr=DEVNULL)
if rc != 0:
self.__die(f"Error! Command {command} died!")
def __remove_temp_files(self):
"""Remove temp files listed in the self.temp_files."""
self.v(f"Removing temp files: {self.temp_files}")
for tmp_file in self.temp_files:
os.remove(tmp_file) if os.path.isfile(tmp_file) else None
def save(self, path_):
"""Save the resulting image."""
self.v(f"Saving image to: {path_}")
io.imsave(fname=path_, arr=img_as_ubyte(self.im_arr))
def v(self, msg):
"""Show verbose message."""
sys.stderr.write(f"{msg}\n") if self.verbose else None
@staticmethod
def __die(message, rc=1):
"""Write message and quit."""
sys.stderr.write("Error!\n")
sys.stderr.write(f"{message}\n")
sys.exit(rc)
@staticmethod
def __id_gen(size=12, chars=string.ascii_uppercase + string.digits):
"""Return random string for temp files."""
return "".join(random.choice(chars) for _ in range(size))
@staticmethod
def parts(lst, n):
"""Split an iterable into a list of lists of len n."""
return [lst[x: x + n] for x in iter(range(0, len(lst), n))]
def parse_args():
"""Parse cmd args."""
app = argparse.ArgumentParser()
app.add_argument("input", help="Input image")
app.add_argument("output", help="Output image")
app.add_argument("--size", default=1000, type=int, help="Image size (long side)")
app.add_argument("--verbose", "--v1", action="store_true", dest="verbose",
help="Verbosity mode on.")
# filters
app.add_argument(f"--{RB_SHIFT}", "-r", default=0, type=int,
help="RGB aberrations, the bigger value -> the higher intensity")
app.add_argument(f"--{GLITTER}", "-g", default=0, type=int,
help="Add glitter, The bigger value -> the bigger sparks")
app.add_argument(f"--{VERT_STREAKS}", "-v", action="store_true", dest=VERT_STREAKS,
help="Add vertical streaks")
app.add_argument(f"--{ADD_TEXT}", "-t", default=None,
help="Add text (position is random)")
app.add_argument("--text_position", "--tp", type=str,
help="Pre-define text coordinates (left corner) "
"two comma-separated values like 100,50")
app.add_argument(f"--{ADD_RAINBOW}", "-a", dest=ADD_RAINBOW, action="store_true",
help="Add a rainbow!")
# mp3-compression params
app.add_argument(f"--compression_cycles", "--cc", default=1, type=int,
help="Number of mp3 compression-decompression cycles, default 1")
app.add_argument("--save_each_cycle", "--sec", default=None,
help="Save an image of each compression cycle, specify "
"a directory if this is a case")
app.add_argument(f"--{STRETCHING}", "--st", action="store_true", dest=STRETCHING,
help="Apply stretching filter")
app.add_argument(f"--{ADD_NOISE}", "-n", action="store_true", dest=ADD_NOISE,
help="Add random noise to increase glitch effect")
app.add_argument(f"--{SOUND_QUALITY}", "-q", type=int, default=8,
help="Gleitzsch sound quality")
app.add_argument(f"--{BITRATE}", "-b", type=int, default=16,
help="MP3 bitrate")
app.add_argument(f"--{INTENSIFY}", "-i", action="store_true", dest=INTENSIFY,
help="Get diff between mp3 glitched/not glitched image and "
"intensify glitched channel")
app.add_argument(f"--{GLITCH_SOUND}", "-s", action="store_true", dest=GLITCH_SOUND,
help="Modify intermediate mp3 files")
app.add_argument(f"--{SHIFT_SIZE}", "--sz", default=None, type=int,
help="Mp3 compression produces a horizontally shifted image "
"This parameter controls shift size, overriding "
"automatically assigned values")
if len(sys.argv) < 3:
app.print_help()
sys.exit(0)
args_ = app.parse_args()
return args_
if __name__ == "__main__":
args = parse_args()
gleitzsch = Gleitzsch(args.input, args.size, args.verbose)
gleitzsch.apply_filters(vars(args)) # as a dict: filter id -> value
if args.compression_cycles > 1 and args.save_each_cycle:
os.mkdir(args.save_each_cycle) if not os.path.isdir(args.save_each_cycle) else None
for i in range(args.compression_cycles):
if args.compression_cycles > 1:
sys.stderr.write(f"Compression cycle num {i + 1}/{args.compression_cycles}\n")
gleitzsch.mp3_compression(vars(args))
if args.save_each_cycle:
filename = f"{str(i).zfill(4)}.jpg"
path = os.path.join(args.save_each_cycle, filename)
gleitzsch.save(path)
gleitzsch.save(args.output)
| kirilenkobm/gleitzsch_v4 | gleitzsch.py | gleitzsch.py | py | 24,870 | python | en | code | 0 | github-code | 36 |
30112355766 | import os
import wikipedia
from nltk.tag.stanford import StanfordPOSTagger
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
import matplotlib.pyplot as plt
os.environ["JAVAHOME"] = "C:\\Program Files (x86)\\Common Files\\Oracle\\Java\\javapath" # didn't have java in path on my local machine :)
model_path = "../stanford-postagger-full-2018-10-16/models/english-bidirectional-distsim.tagger"
jar_path = "../stanford-postagger-full-2018-10-16/stanford-postagger.jar"
tagger = StanfordPOSTagger(model_path, jar_path)
def ex1():
page = wikipedia.page("Shrek")
content = page.content
words_in_text = word_tokenize(content)
print("Title: " + page.title)
print("First 200 words: ", words_in_text[:200])
sentences = sent_tokenize(content)
# get first 20 sentences (or maximum if not having 20)
first_20_sentences = sentences[:min(20, len(sentences))]
# word tokenize the sentence and apply tagger
tagger_results = [tagger.tag(word_tokenize(el)) for el in first_20_sentences]
print(tagger_results)
return page
def list_of_words_for_tag(text, tag):
# break it into sentences
sentences = sent_tokenize(text)
# for every sentence, get words (apply word_tokenize) and apply POS Tagger to get tags
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
# filter to get only the given tag
return [el for sublist in tagger_results for (el, word_tag) in sublist if word_tag == tag]
def ex2(text, tags):
return [el for tag in tags for el in list_of_words_for_tag(text, tag)]
def ex3(text):
nouns_tags = ["NN", "NNS", "NNP", "NNPS"]
verbs_tags = ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ"]
number_of_words_in_text = get_number_of_words_in_text(text)
nouns = ex2(text, nouns_tags)
verbs = ex2(text, verbs_tags)
print("Nouns: ", nouns)
print("Verbs: ", verbs)
print("Percentage of content words: ", (len(nouns) + len(verbs)) / number_of_words_in_text * 100, "%")
def get_number_of_words_in_text(text):
return len(word_tokenize(text))
def ex4(text, n=5):
print("Original word | POS | Simple lemmatization | Lemmatization with POS")
lemma = WordNetLemmatizer()
sentences = sent_tokenize(text)
sentences = sentences[:min(n, len(sentences))]
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
tagger_results = [el for sublist in tagger_results for el in sublist]
already_counted = []
for word, tag in tagger_results:
word_net_pos = get_wordnet_pos(tag)
# Do nothing if not a knows (or lemmatizable word)
if word_net_pos == '':
continue
lemmatization = lemma.lemmatize(word)
lemmatization_with_pos = lemma.lemmatize(word, word_net_pos)
if lemmatization != lemmatization_with_pos and (word, tag) not in already_counted:
print_table_row(word, tag, lemmatization, lemmatization_with_pos)
already_counted.append((word, tag))
def print_table_row(original_word, pos, lemmatization, lemmatization_with_pos):
print(original_word + " | " + pos + " | " + lemmatization + " | " + lemmatization_with_pos)
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return ''
def ex5(text, maximum=5):
sentences = sent_tokenize(text)
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
tagger_results = [el for sublist in tagger_results for el in sublist]
pos_numbers = dict()
for _, tag in tagger_results:
if tag not in pos_numbers:
pos_numbers[tag] = 1
else:
pos_numbers[tag] += 1
# pos_numbers = pos_numbers[:min(maximum, len(pos_numbers))]
pos_numbers = [(key, value) for key, value in pos_numbers.items()]
pos_numbers.sort(key=lambda el: el[1], reverse=True)
pos_numbers = pos_numbers[:min(maximum, len(pos_numbers))]
keys = [key for key, _ in pos_numbers]
values = [value for _, value in pos_numbers]
plt.bar(keys, values)
plt.show()
# Voi folosi un text mai scurt pentru teste. Pentru cum este conceputa cerinta, din pacate, se fac multe
# calcule oarecum, degeaba, asa ca voi folosi un text mai scurt pentru a nu astepta foarte mult.
# Daca se vrea rularea pe textul din wikipedia se va rula pe urmatorul text: page.content
TEST_TEXT = "This is my test text. With this test text I will test everything. This is great, amazing text." \
" I will make this text great again! Why are you running?"
if __name__ == "__main__":
print("Ex1")
page = ex1()
print("Ex3")
# ex3(page.content)
ex3(TEST_TEXT)
print("Ex4")
ex4(TEST_TEXT)
print("Ex4")
ex5(TEST_TEXT)
| daneel95/Master_Homework | Restanta/NLP/Lab3/homework1.py | homework1.py | py | 4,985 | python | en | code | 0 | github-code | 36 |
21620647161 | from __future__ import absolute_import
import sys
import threading
import weakref
from concurrent.futures import _base
try: # Python3
import queue
except Exception: # Python2
import Queue as queue # type: ignore[no-redef]
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self._future = future
self._fn = fn
self._fn_args = args
self._fn_kwargs = kwargs
def run(self):
if self._future.set_running_or_notify_cancel():
# If the future wasn't cancelled, then attempt to execute it.
try:
self._future.set_result(self._fn(*self._fn_args, **self._fn_kwargs))
except BaseException as exc:
# Even though Python 2 futures library has #set_exection(),
# the way it generates the traceback doesn't align with
# the way in which Python 3 does it so we provide alternative
# implementations that match our test expectations.
if sys.version_info.major >= 3:
self._future.set_exception(exc)
else:
e, tb = sys.exc_info()[1:]
self._future.set_exception_info(e, tb)
class _Worker(threading.Thread):
def __init__(
self, idle_worker_queue, permitted_thread_age_in_seconds, work_item):
super(_Worker, self).__init__()
self._idle_worker_queue = idle_worker_queue
self._permitted_thread_age_in_seconds = permitted_thread_age_in_seconds
self._work_item = work_item
self._wake_event = threading.Event()
self._lock = threading.Lock()
self._shutdown = False
def run(self):
while True:
self._work_item.run()
self._work_item = None
# If we are explicitly awake then don't add ourselves back to the
# idle queue. This occurs in case 3 described below.
if not self._wake_event.is_set():
self._idle_worker_queue.put(self)
self._wake_event.wait(self._permitted_thread_age_in_seconds)
with self._lock:
# When we are awoken, we may be in one of three states:
# 1) _work_item is set and _shutdown is False.
# This represents the case when we have accepted work.
# 2) _work_item is unset and _shutdown is True.
# This represents the case where either we timed out before
# accepting work or explicitly were shutdown without accepting
# any work.
# 3) _work_item is set and _shutdown is True.
# This represents a race where we accepted work and also
# were shutdown before the worker thread started processing
# that work. In this case we guarantee to process the work
# but we don't clear the event ensuring that the next loop
# around through to the wait() won't block and we will exit
# since _work_item will be unset.
# We only exit when _work_item is unset to prevent dropping of
# submitted work.
if self._work_item is None:
self._shutdown = True
return
if not self._shutdown:
self._wake_event.clear()
def accepted_work(self, work_item):
"""Returns True if the work was accepted.
This method must only be called while the worker is idle.
"""
with self._lock:
if self._shutdown:
return False
self._work_item = work_item
self._wake_event.set()
return True
def shutdown(self):
"""Marks this thread as shutdown possibly waking it up if it is idle."""
with self._lock:
if self._shutdown:
return
self._shutdown = True
self._wake_event.set()
class UnboundedThreadPoolExecutor(_base.Executor):
def __init__(self, permitted_thread_age_in_seconds=30):
self._permitted_thread_age_in_seconds = permitted_thread_age_in_seconds
self._idle_worker_queue = queue.Queue()
self._workers = weakref.WeakSet()
self._shutdown = False
self._lock = threading.Lock() # Guards access to _workers and _shutdown
def submit(self, fn, *args, **kwargs):
"""Attempts to submit the work item.
A runtime error is raised if the pool has been shutdown.
"""
future = _base.Future()
work_item = _WorkItem(future, fn, args, kwargs)
try:
# Keep trying to get an idle worker from the queue until we find one
# that accepts the work.
while not self._idle_worker_queue.get(
block=False).accepted_work(work_item):
pass
return future
except queue.Empty:
with self._lock:
if self._shutdown:
raise RuntimeError(
'Cannot schedule new tasks after thread pool '
'has been shutdown.')
worker = _Worker(
self._idle_worker_queue,
self._permitted_thread_age_in_seconds,
work_item)
worker.daemon = True
worker.start()
self._workers.add(worker)
return future
def shutdown(self, wait=True):
with self._lock:
if self._shutdown:
return
self._shutdown = True
for worker in self._workers:
worker.shutdown()
if wait:
for worker in self._workers:
worker.join()
| a0x8o/kafka | sdks/python/apache_beam/utils/thread_pool_executor.py | thread_pool_executor.py | py | 5,126 | python | en | code | 59 | github-code | 36 |
31414321307 | import datetime
import json
from .base_test import BaseTestCase, LoggedActivity
class EditLoggedActivityTestCase(BaseTestCase):
"""Edit activity test cases."""
def setUp(self):
"""Inherit parent tests setUp."""
super().setUp()
# add tests logged activity and corresponding activity
self.alibaba_ai_challenge.save()
self.log_alibaba_challenge.save()
self.js_meet_up.save()
self.payload = dict(
description="Participated in that event",
activityId=self.js_meet_up.uuid
)
def test_edit_logged_activity_is_successful(self):
"""Test that editing a logged activity does not fail."""
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 200)
message = 'Activity edited successfully'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
edited_activity = LoggedActivity.query.get(
self.log_alibaba_challenge.uuid
)
self.assertEqual(edited_activity.activity_id, self.js_meet_up.uuid)
def test_edit_logged_activity_by_non_owner_is_unsuccessful(self):
"""
Test that editing a logged activity that
doesn't belong to you fails.
"""
self.header["Authorization"] = self.generate_token(
self.test_user2_payload
)
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 404)
def test_edit_logged_activity_that_is_no_longer_pending(self):
"""
Test that editing a logged activity that has been approved or rejected
fails.
"""
self.log_alibaba_challenge.status = 'approved'
self.alibaba_ai_challenge.save()
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 401)
def test_edit_logged_activity_parser_works(self):
"""
Test that during editing a logged activity, the marshamallow result
parser works the same way it does while logging an activity.
"""
self.js_meet_up.activity_date = datetime.date.today() - \
datetime.timedelta(days=31)
self.js_meet_up.save()
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 422)
message = 'You\'re late. That activity happened more than 30 days ago'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
self.payload['activityId'] = 'invalid_activity_id'
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 422)
message = 'Invalid activity id'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
def test_edit_logged_activity_validation_works(self):
"""
Test that during editing a logged activity, validation via marshmallow
works the same way it does while logging an activity.
"""
self.payload['activityTypeId'] = 'blah blah'
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 400)
def test_secretary_edit_logged_activity_works(self):
"""Test secretaty can change status to pending."""
payload = {'status': 'pending'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('data').get('status'),
payload.get('status'))
self.assertEqual(response.status_code, 200)
def test_secretary_edit_reject_activity_works(self):
"""Test secretary can change status to rejected."""
payload = {'status': 'rejected'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('data').get('status'),
payload.get('status'))
self.assertEqual(response.status_code, 200)
def test_secretary_edit_invalid_input(self):
"""Test invalid input is rejected."""
payload = {'status': 'invalid'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
self.assertEqual(response.status_code, 400)
def test_secretary_edit_non_existent_logged_activity(self):
"""Test edit non-existent activity returns 404"""
payload = {'status': 'invalid'}
response = self.client.put(
'/api/v1/logged-activities/review/-KlHerwfafcvavefa',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('message'),
'Logged activity not found')
self.assertEqual(response.status_code, 404)
def test_secretary_edit_logged_activity_empty_payload(self):
"""Test edit activity with empty payload returns 400"""
payload = {}
response = self.client.put(
'/api/v1/logged-activities/review/-KlHerwfafcvavefa',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('message'),
'status is required.')
self.assertEqual(response.status_code, 400)
| andela/andela-societies-backend | src/tests/test_edit_logged_activity.py | test_edit_logged_activity.py | py | 6,852 | python | en | code | 1 | github-code | 36 |
22477753948 |
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import model_selection as sk_ms
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, accuracy_score, f1_score
from sklearn.neural_network import MLPClassifier
RANDOM_SEED = 20
FRAC_TRAIN = 0.8
class Classification(object):
def __init__(self, features, labels):
self.features = features
self.labels = labels
def classification(self):
c1 = DecisionTreeClassifier(random_state=0)
c2 = KNeighborsClassifier(n_neighbors=5) ## testar outros parametros 3 41.6666666 ### 5 45.
c3 = GaussianNB()
c4 = SVC(kernel='linear', probability=True)
#c5 = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
classifiers = [c1,c2,c3,c4]
results = []
X_train, X_test, y_train, y_test = train_test_split(self.features, self.labels, stratify=self.labels, test_size=(1.0 - FRAC_TRAIN), random_state=RANDOM_SEED)
for classifier in classifiers:
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
score = accuracy_score(y_test, y_pred)
print(y_test, y_pred)
print("Score {}".format(score))
#scores = sk_ms.cross_val_score(i, self.features, self.labels, cv=self.kfold, scoring='accuracy', n_jobs=-1, verbose=0)
#score = round(scores.mean() * 100, 2)
#sd = round(scores.std()*100, 2)
results.append(score)
return results
def get_scores(self):
return np.array(self.classification())
| mailaucq/book_classification | classifierv2.py | classifierv2.py | py | 1,771 | python | en | code | 0 | github-code | 36 |
3353061598 | import time
import redis
from django.core.management import BaseCommand
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for Redis...')
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0)
while True:
try:
redis_instance.ping()
break
except Exception:
self.stdout.write('Redis unavailable, waititng 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Redis available!'))
| MykKos/discord_automated_sender | discord_posts/management/commands/check_on_redis.py | check_on_redis.py | py | 671 | python | en | code | 0 | github-code | 36 |
44037875301 | from flask import Blueprint, request, jsonify
from flask_cors import CORS
from storeback.models import db
from storeback.models.admins import Admin
admin_api = Blueprint('admin_api', __name__)
CORS(admin_api)
@admin_api.route('/api/admin', methods=['GET'])
def get_all_admins():
params = request.args
admins = Admin.query.filter_by(**params).all()
return jsonify([admin.to_json() for admin in admins])
@admin_api.route('/api/admin/<int:id>', methods=['GET'])
def get_one_admin(id):
admin = Admin.query.filter_by(id=id).first_or_404()
return jsonify(admin.to_json())
@admin_api.route('/api/admin', methods=['POST'])
def create_one_admin():
if not request.json:
return 'Please provide a valid json body with your request', 400
admin = Admin()
admin.firstname = request.json['firstname']
admin.lastname = request.json['lastname']
admin.email = request.json['email']
admin.password = Admin.generate_hash(request.json['password'])
db.session.add(admin)
db.session.commit()
return jsonify(admin.to_json())
@admin_api.route('/api/admin/<int:id>', methods=['PATCH'])
def patch_one_admin(id):
if not request.json:
return 'Please provide a valid json body with your request', 400
Admin.query.filter_by(id=id).update(request.json)
db.session.commit()
patched_admin = Admin.query.filter_by(id=id).first_or_404()
return jsonify(patched_admin.to_json())
@admin_api.route('/api/admin/<int:id>', methods=['DELETE'])
def delete_one_admin(id):
admin_to_delete = Admin.query.filter_by(id=id).first_or_404()
db.session.delete(admin_to_delete)
db.session.commit()
return '', 204 | rguan72/StoreBack | storeback/handlers/admin.py | admin.py | py | 1,682 | python | en | code | 2 | github-code | 36 |
23216761350 | #!/usr/bin/env python
# coding: utf-8
"""
module: utilities for bounding box processing, including:
xyxy <-> xywh,
IoU,
crop,
"""
import numpy as np
def xyxy_to_xywh_int(xyxy, dtype=int):
"""
convert [xmin, ymin, xmax, ymax] -> [x-center, y-center, w, h]
xy in screen coord => x/y as matrix col/row idx
"""
xc, yc = (xyxy[0] + xyxy[2]) / 2, (xyxy[1] + xyxy[3]) / 2 # xmin, ymin, xmax, ymax
h = xyxy[3] - xyxy[1]
w = xyxy[2] - xyxy[0]
return np.floor([xc, yc, w, h]).astype(dtype)
def xyxy_to_xywh_float(xyxy, dtype=float):
xc, yc = (xyxy[0] + xyxy[2]) / 2, (xyxy[1] + xyxy[3]) / 2 # xmin, ymin, xmax, ymax
h = xyxy[3] - xyxy[1]
w = xyxy[2] - xyxy[0]
return np.floor([xc, yc, w, h]).astype(dtype)
xyxy_to_xywh = xyxy_to_xywh_int # alias
def xywh_to_xyxy_int(xywh, dtype=int):
"""
convert [x-center, y-center, w, h] -> [xmin, ymin, xmax, ymax]
xywh in screen coord => x-w/y-h as matrix idx-range in col/row axis
"""
w_2 = xywh[2] / 2
h_2 = xywh[3] / 2
# compensate for jitter (grounding due to int conversion in xyxy_to_xywh)
return np.ceil([xywh[0] - w_2, xywh[1] - h_2, xywh[0] + w_2, xywh[1] + h_2]).astype(dtype)
def xywh_to_xyxy_float(xywh, dtype=float):
w_2 = xywh[2] / 2
h_2 = xywh[3] / 2
return np.array([xywh[0] - w_2, xywh[1] - h_2, xywh[0] + w_2, xywh[1] + h_2], dtype=dtype)
xywh_to_xyxy = xywh_to_xyxy_int # alias
def calc_overlap_interval(int1, int2):
"""
calculate the overlaped interval of 2 intervals ([0] for min val, [1] for max val)
"""
return np.maximum(0, np.minimum(int1[1], int2[1]) - np.maximum(int1[0], int2[0]))
def calc_IoU(box1, box2):
"""
calculate the intersection over union for 2 xyxy bbox
(may broadcast to 2 box arr)
"""
int_x = calc_overlap_interval((box1[0], box1[2]), (box2[0], box2[2]))
int_y = calc_overlap_interval((box1[1], box1[3]), (box2[1], box2[3]))
intersection = int_x * int_y
area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
union = area1 + area2 - intersection
return intersection / union
class Bounding_Box(object):
"""
bounding box class wrapper
"""
def __init__(self, box_def, def_type='xywh'):
if def_type == 'xywh':
self.x_var = [box_def[0] - int(box_def[2] / 2), box_def[0] + int(box_def[2] / 2)]
self.y_var = [box_def[1] - int(box_def[3] / 2), box_def[1] + int(box_def[3] / 2)]
self.w = box_def[2]
self.h = box_def[3]
elif def_type == 'xyxy':
self.x_var = [box_def[0], box_def[2]]
self.y_var = [box_def[1], box_def[3]]
self.w = box_def[2] - box_def[0]
self.h = box_def[3] - box_def[1]
else:
raise TypeError
def intersection(self, bbox):
sec_x = calc_overlap_interval(self.x_var, bbox.x_var)
sec_y = calc_overlap_interval(self.y_var, bbox.y_var)
return sec_x * sec_y
def IoU(self, bbox):
i = self.intersection(bbox)
u = self.w * self.h + bbox.w * bbox.h - i
return i / u
class Det_Bounding_Box(object):
"""
bounding class with more granularity (e.g. box over points)
"""
def __init__(self, box_def):
if type(box_def) is list:
self.__init_from_list(box_def)
elif type(box_def) is dict:
self.__init_from_dict(box_def)
else:
raise TypeError('not supported box_def type \"%s\" with value: \"%s\"' % (str(type(box_def)), str(box_def)))
def __init_from_list(self, def_list):
assert len(def_list) == 3 # [[4 xy coords], [elem(s) selected], [prob in onehot]]
self.xy = def_list[0]
self.x = [xy[0] for xy in self.xy]
self.y = [xy[1] for xy in self.xy]
self.x_var = (min(self.x), max(self.x))
self.y_var = (min(self.y), max(self.y))
# may be larger than the actual size specified by points - due to resolution and other factors
self.width = self.x_var[1] - self.x_var[0]
self.height = self.y_var[1] - self.y_var[0]
self.size = self.width * self.height
# elem perspectives
self.elem = set(def_list[1])
self.elem_size = len(self.elem)
self.prob = def_list[-1]
# default setting
self.valid = True
self.blockage = 0
def __init_from_dict(self, box_dict):
def_list = [[(box_dict['xy'][0], box_dict['xy'][1]),
(box_dict['xy'][0] + box_dict['width'], box_dict['xy'][1]),
(box_dict['xy'][0], box_dict['xy'][1] + box_dict['height']),
(box_dict['xy'][0] + box_dict['width'], box_dict['xy'][1] + box_dict['height'])],
box_dict['elem'],
box_dict['prob']]
self.__init_from_list(def_list)
for k, v in box_dict.items():
if k not in ['xy', 'elem', 'prob', 'width', 'height']:
setattr(self, k, v)
def to_polar(self, origin=(0, 0)):
"""
convert to polar coord given the origin
"""
complex_xy = [xy[0] - origin[0] + 1j * (xy[1] - origin[1]) for xy in self.xy]
self.dist = [np.abs(c_xy) for c_xy in complex_xy]
self.angle = [np.angle(c_xy) for c_xy in complex_xy]
self.dist_var = (min(self.dist), max(self.dist))
self.angle_var = (min(self.angle), max(self.angle))
@staticmethod
def overlap_interval(int_1, int_2):
"""
calculate the overlaped interval of 2 intervals ([0] for min, [1] for max)
"""
return max(0, min(int_1[1], int_2[1]) - max(int_1[0], int_2[0]))
@staticmethod
def xy_intersection(box1, box2):
"""
calculate the intersection of 2 bbox using the cartesian coord
"""
sec_x = Det_Bounding_Box.overlap_interval(box1.x_var, box2.x_var)
sec_y = Det_Bounding_Box.overlap_interval(box1.y_var, box2.y_var)
return sec_x * sec_y
@staticmethod
def elem_intersection(box1, box2):
"""
calculate the intersection of elements contained by bboxes
"""
return len(set.intersection(box1.elem, box2.elem)) | LiyaoTang/Research-Lib | Utilities/Bounding_Box.py | Bounding_Box.py | py | 6,277 | python | en | code | 1 | github-code | 36 |
34836391359 | # _*_ coding: utf-8 _*_
import os
import sys
import warnings
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
warnings.filterwarnings("ignore")
import pandas as pd
from COMM import DB_Util
from COMM import Figure_Util
from COMM import TechnicalAnalysis_Util
# Wrap운용팀 DB Connect
db = DB_Util.DB()
db.connet(host="127.0.0.1", port=3306, database="investing.com", user="root", password="ryumaria")
if 1:
# Futures를 이용하여 지수별 Price 데이터
price_datas = db.select_query("select a.cd, b.date, b.open, b.close"
" from index_master a, index_price b"
" where a.cd = b.idx_cd")
price_datas.columns = ['cd', 'date', 'open', 'close']
price_datas["dateT"] = price_datas['date'].apply(lambda x: pd.to_datetime(str(x), format="%Y-%m-%d"))
resample_list = price_datas.resample('B', on='dateT', convention='end')
sampled_price_datas = price_datas.loc[price_datas['dateT'].isin(list(resample_list.indices))]
pivoted_price_datas_close = sampled_price_datas.pivot(index='date', columns='cd', values='close').fillna(method='ffill')
pivoted_price_datas_open = sampled_price_datas.pivot(index='date', columns='cd', values='open').fillna(method='ffill')
for cd in pivoted_price_datas_close:
data = pivoted_price_datas_close[cd][-500:]
# pandas의 series를 input data 형태로 사용
analysis = TechnicalAnalysis_Util.BollingerBand(data)
analysis_datas = analysis.getDatas()
analysis = TechnicalAnalysis_Util.RSI(data)
analysis_datas['rsi'] = analysis.getDatas()
analysis = TechnicalAnalysis_Util.MACD(data)
analysis_datas['macd'] = analysis.getDatas()
panel = Figure_Util.Figure()
panel.draw(analysis_datas, title=cd, subplots=['rsi', 'macd'], figsize=(20,10))
db.disconnect()
| dxcv/InvestmentTestbed | CODE/LOGIC/Test_TechnicalAnalysis.py | Test_TechnicalAnalysis.py | py | 1,913 | python | en | code | 0 | github-code | 36 |
18458121758 | from __future__ import unicode_literals
import datetime
import cairo
import pycha.line
import StringIO
import time
import six
from uds.models import getSqlDatetime
import counters
# Chart types
CHART_TYPE_LINE, CHART_TYPE_AREA, CHART_TYPE_BAR = range(3) # @UndefinedVariable
__typeTitles = None
def make(obj, counterType, **kwargs):
width, height = (kwargs.get('width', 800), kwargs.get('height', 600))
since = kwargs.get('since', None)
to = kwargs.get('to', None)
if since is None and to is None:
interval = kwargs.get('interval', None)
if interval is not None:
to = getSqlDatetime()
since = to - datetime.timedelta(days=interval)
limit = width
dataset1 = tuple((int(time.mktime(x[0].timetuple())), x[1]) for x in counters.getCounters(obj, counterType, since=since, to=to, limit=limit, use_max=kwargs.get('use_max', False)))
if len(dataset1) == 0:
dataset1 = ((getSqlDatetime(True) - 3600, 0), (getSqlDatetime(True), 0))
firstLast = (dataset1[0][0], getSqlDatetime(True))
xLabelFormat = '%y-%m-%d'
diffInterval = firstLast[1] - firstLast[0]
if diffInterval <= 60 * 60 * 24: # Less than one day
xLabelFormat = '%H:%M'
elif diffInterval <= 60 * 60 * 24 * 7:
xLabelFormat = '%A'
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
dataset = ((counters.getCounterTitle(counterType).encode('iso-8859-1', errors='ignore'), dataset1),)
options = {
'axis': {
'x': {
'ticks': [dict(v=i, label=datetime.datetime.fromtimestamp(i).strftime(xLabelFormat)) for i in firstLast],
'range': (firstLast[0], firstLast[1])
},
'y': {
'tickCount': 4,
}
},
'legend': {'hide': True},
'background': {
'chartColor': '#ffeeff',
'baseColor': '#ffffff',
'lineColor': '#444444'
},
'colorScheme': {
'name': 'gradient',
'args': {
'initialColor': 'red',
},
},
'legend': {
'hide': True,
},
'padding': {
'left': 0,
'bottom': 0,
},
'title': 'Sample Chart'
}
chart = pycha.line.LineChart(surface, options)
chart.addDataset(dataset)
chart.render()
output = StringIO.StringIO()
surface.write_to_png(output)
return output.getvalue()
| karthik-arjunan/testuds | server/src/uds/core/util/stats/charts.py | charts.py | py | 2,497 | python | en | code | 1 | github-code | 36 |
369197996 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import math
'''
最优化每个节点的坐标位置,使得相交的线段最少,保证画出的网络图比较稀疏可看性强
'''
people = ['Charlie','Augustus','Veruca','Violet','Mike','Joe','Willy','Miranda']
links=[('Augustus', 'Willy'),
('Mike', 'Joe'),
('Miranda', 'Mike'),
('Violet', 'Augustus'),
('Miranda', 'Willy'),
('Charlie', 'Mike'),
('Veruca', 'Joe'),
('Miranda', 'Augustus'),
('Willy', 'Augustus'),
('Joe', 'Charlie'),
('Veruca', 'Augustus'),
('Miranda', 'Joe')]
def crosscount(v):
'''
判断两条线段是否相交
'''
# Convert the number list into a dictionary of person:(x,y)
loc=dict([(people[i],(v[i*2],v[i*2+1])) for i in range(0,len(people))])
total=0
# Loop through every pair of links
for i in range(len(links)):
for j in range(i+1,len(links)):
# Get the locations
(x1,y1),(x2,y2)=loc[links[i][0]],loc[links[i][1]]
(x3,y3),(x4,y4)=loc[links[j][0]],loc[links[j][1]]
den=(y4-y3)*(x2-x1)-(x4-x3)*(y2-y1)
# den==0 if the lines are parallel
if den==0: continue
# Otherwise ua and ub are the fraction of the
# line where they cross
ua=((x4-x3)*(y1-y3)-(y4-y3)*(x1-x3))/den
ub=((x2-x1)*(y1-y3)-(y2-y1)*(x1-x3))/den
if ua > 0 and ua <1 and ub > 0 and ub < 1 :
total += 1
return total
| LixinZhang/bookreviews | Programming_Collective_Intelligence/chapter5/socialnetwork.py | socialnetwork.py | py | 1,526 | python | en | code | 10 | github-code | 36 |
15828932059 | def heap_sink(heap, heap_size, parent_index):
"""最大堆-下沉算法"""
child_index = 2 * parent_index + 1
# temp保存需要下沉的父节点,用于最后赋值
temp = heap[parent_index]
while child_index < heap_size:
# 如果有右孩子,且右孩子比左孩子大,则定位到右孩子
if child_index + 1 < heap_size and heap[child_index + 1] > heap[child_index]:
child_index += 1
# 如果父节点的值不小于左右孩子节点的值,可直接跳出循环
if temp >= heap[child_index]:
break
heap[parent_index] = heap[child_index]
parent_index = child_index
child_index = 2 * parent_index + 1
heap[parent_index] = temp
def heap_sort(mylist):
"""堆排序"""
n = len(mylist)
# 1. 无序列表构建成最大堆
for i in range(n - 2 // 2, -1, -1):
heap_sink(mylist, n, i)
# 2. 循环删除堆顶元素,移到列表尾部,调节堆产生新的堆顶
for i in range(n - 1, 0, -1):
mylist[0], mylist[i] = mylist[i], mylist[0]
heap_sink(mylist, i, 0)
if __name__ == "__main__":
mylist = [1, 3, 4, 5, 2, 6, 9, 7]
heap_sort(mylist)
print(mylist) | wangwenju269/leetcode | 八大排序/堆排序.py | 堆排序.py | py | 1,236 | python | en | code | 1 | github-code | 36 |
20453951121 |
class Options():
def __init__(self):
self.iters = None
self.trials = None
def copy(self):
opt = Options()
attributes = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")]
for attr in attributes:
value = getattr(self, attr)
setattr(opt, attr, value)
return opt
def load_config(self):
return
def load_dict(self, d):
for key in d.keys():
value = args[key]
setattr(self, key, value)
def load_args(self, args):
args = vars(args)
for key in args:
value = args[key]
setattr(self, key, value)
if __name__ == '__main__':
opt = Options()
opt2 = opt.copy()
| jon--lee/dfr | options.py | options.py | py | 783 | python | en | code | 0 | github-code | 36 |
14069626332 | class Solution:
def minMaxGame(self, nums: List[int]) -> int:
while 1 < len(nums):
newnums = []
for i in range(len(nums)//2):
if i % 2 == 1:
newnums.append(max(nums[2*i],nums[2*i+1]))
else:
newnums.append(min(nums[2*i],nums[2*i+1]))
nums = newnums
return nums[0] | ibrahimbayburtlu/LeetCode | 2293-min-max-game/2293-min-max-game.py | 2293-min-max-game.py | py | 442 | python | en | code | 2 | github-code | 36 |
75173901865 |
"""Read a numpy file and output an image."""
import sys
import numpy as np
from PIL import Image
def main(filename):
depth_array = np.load(filename)
print(depth_array.shape)
if np.max(depth_array) > 255:
print("Values over 255! There is going to be truncations")
depth_array = np.clip(depth_array, 0, 255)
byte_array = depth_array.astype(np.uint8)
img = Image.fromarray(byte_array, 'L')
outfilename = filename.rstrip('npy')+'png'
img.save(outfilename)
# img.show()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("usage: python {} <filename>".format(sys.argv[0]))
exit()
main(sys.argv[1])
| squeakus/bitsandbytes | blenderscripts/npy2img.py | npy2img.py | py | 677 | python | en | code | 2 | github-code | 36 |
37369375965 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
"""
iterate csv boxes in /box_4096 and convert them to images
"""
# boxfiles_dir = 'data/box_4096'
# des_dir='data/allsolar_png1500_boximage'
boxfiles_dir = 'data/box_full_4096'
des_dir='data/allsolar_full_png512_boximage'
if not os.path.exists(des_dir):
os.makedirs(des_dir)
"""
read an image to get the shape
"""
# shape_img=cv2.imread("data/allsolar_png512/20120601_0000_full_0.png").shape
shape_img=cv2.imread("data/allsolar_full_png512/20120101_0000_full_0.png").shape
shape=(4096,4096)
allFileNames = os.listdir(boxfiles_dir)
allFileNames=[ filename for filename in allFileNames if filename.endswith( '.csv' ) ]
for boxfile in allFileNames:
boxdf=pd.read_csv(os.path.join(boxfiles_dir,boxfile),header=None)
rows=boxdf.iloc[:,-4:].to_numpy()
image = np.zeros((shape))
for xmin, ymin, xmax, ymax in rows:
try:
xmin, ymin, xmax, ymax=round(xmin),4096-round(ymin),round(xmax),4096-round(ymax)
except:
print("error "+ boxfile)
continue #go to next for loop
num_channels = 1 if len(image.shape) == 2 else image.shape[2]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color=(256,) * num_channels, thickness=-10)
image=cv2.resize(image, shape_img[0:2])
cv2.imwrite(os.path.join(des_dir,boxfile.split("box")[0]+"mask.png"), image)
| dyu62/solar_share | data/box2img.py | box2img.py | py | 1,426 | python | en | code | 0 | github-code | 36 |
45097764726 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 18:29:51 2020
@author: rosaz
"""
import argparse
import sys
import errno
import os
import json
import numpy as np
from matplotlib import pyplot as plt
from numpy import array
import torch
import jsonschema
from torch.nn import functional as F
def writeJsonNorma(path,media,dev,time):
"""serve"""
#media = media.tolist()
#dev = dev.tolist()
if not os.path.exists(path):
entry={"normalize":{"mean":media,"dev_std":dev,"time":time}}
with open(path, "w") as outfile:
json.dump(entry,outfile,indent=2)
else:
with open(path, "r") as outfile:
data=json.load(outfile)
if not (data.get("normalize") is None):
#print("value is present for given JSON key")
#print(data.get("normalize"))
#aggiungi chiavi
entry ={"media":media,"dev":dev,"computeTime":time}
data["normalize"]=entry
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
entry ={"media":media,"dev":dev,"computeTime":time}
data["normalize"]=entry
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
"""
if not (data.get(dev) is None):
#print("value is present for given JSON key")
print(data.get(dev))
#aggiungi chiavi
data["dev_std"]=dev
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
data["dev_std"]=dev
with open(path, "w") as outfile:
json.dump(data,outfile)
if not (data.get(time) is None):
#print("value is present for given JSON key")
print(data.get(time))
#aggiungi chiavi
data["time"] = time
with open(path, "w") as outfile:
json.dump(data,outfile)
else:
data["time"] = time
with open(path, "w") as outfile:
json.dump(data,outfile)
"""
def controlFile2():
try:
with open("Dataset/train.csv") as f:
print("ok")
with open("Dataset/valid.csv") as f:
print("ok")
with open("Dataset/test.csv") as f:
print("ok")
# File exists
except IOError as e:
print("fatal error", file=sys.stderr)
exit()
# Raise the exception if it is not ENOENT (No such file or directory)
if e.errno != errno.ENOENT:
print("fatal error", file=sys.stderr)
exit(0)
print("ciao")
def createFolder(path):
access_rights = 0o777
try:
if not os.path.exists(path):
os.mkdir(path,access_rights)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("exist the directory %s" % path)
def controlFile(path):
try:
with open(path+"\\normalize.json") as f:
response = input("Do you want to re-calculate the mean and standard deviation? y | n : ")
if(response =="y"):
print("recalculate")
elif (response =="n"):
print("no")
else:
controlFile()
except IOError as e:
print("Normalize")
# Raise the exception if it is not ENOENT (No such file or directory)
if e.errno != errno.ENOENT:
print("fatal error", file=sys.stderr)
exit(0)
def readNorm(path):
with open(path+'\\normalize.json') as json_file:
data = json.load(json_file)
arrayMean = data["mean"]
arrayDev = data["dev_std"]
arrayMean = tuple(arrayMean)
arrayDev = tuple(arrayDev)
return arrayMean , arrayDev
"""""fuzione che aggiunge nuove keys se non esistono, mentre aggiorna valori se le chiavi esistono """
def controlNormalize(path):
#controlla se è presente la directory, altrimenti la crea
createFolder(path)
#print("Controll")
if not os.path.exists(path+'\\dataSetJson.json'):
print("1) Checking: mean, dev_std")
else: # se il file esiste controlla se ci sono le key mean e dev
try:
with open(path+"\\dataSetJson.json","r") as json_file:
data = json.load(json_file)
print(path+"\\dataSetJson.json")
if not (data.get('normalize') is None):
norm = data['normalize']
print(norm)
if not (norm.get('mean') and norm.get('dev_std')) is None:
response = input("Do you want to re-calculate the mean and standard deviation? y | n : ")
if(response =="y"):
print("recalculate")
elif (response =="n"):
print("bypass this step!!")
media = tuple(norm['mean'])
print(media)
dev= tuple(norm['dev_std'])
print(dev)
else:
controlNormalize()
else:
print("non esiste mean e dev_std, ricalcola")
else:
print("non esiste normalize")
except:
# se il parsing è errato ricalcola la media e dev
print("Il parsing è errato")
def writeJsonAccuracy(path, fileName, entry, accuracy, entryTime, time):
#a_cc = {entry: accuracy}
#timeTrain = {entryTime: time}
# se il file non esistw crealo nuovo e scrivi l'oggetto
createFolder(path)
if not os.path.exists(path+"\\"+fileName):
print("File non esiste")
entry = {entry: accuracy, entryTime: time}
with open(path+"\\"+fileName,"w") as outfile:
json.dump(entry,outfile)
#altrimenti se il file esiste
#prova a fare il parsing
else:
print("Qui")
try:
# Read in the JSON document, parsing è stato effettuato con successo
with open(path+"\\"+fileName,"r") as outfile:
print("qui3")
datum = json.load(outfile)
# modifica il valore della chiave se esiste
if not (datum.get(entry) is None):
print("value is present for given JSON key")
print(datum.get(entry))
datum[entry]=accuracy
with open(path+"\\"+fileName, "w") as outfile:
json.dump(datum, outfile)
else:
print("Chiave non esiste")
#entry = {entry: accuracy, entryTime: time}
datum[entry]=accuracy
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum, json_outfile)
if not (datum.get(entryTime) is None):
print("value is present for given JSON key")
print(datum.get(entryTime))
datum[entryTime]=time
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum, json_outfile)
else:
print("Chiave non esiste")
datum[entryTime]=time
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum,json_outfile)
except:
print("Qui2")
entry = {entry: accuracy, entryTime: time}
with open(path+"\\"+fileName, "w") as outfile:
json.dump(entry,outfile)
def plot(path="Model-1"):
plt.figure()
plt.subplot(121)
plt.ylabel('loss train')
plt.xlabel('num samples')
plt.grid()
plt.plot( [1, 2, 3, 4], [1, 4, 9, 16])
plt.subplot(122)
plt.plot([1, 2, 3, 3,2,4], [1,5,6, 4, 9, 16])
plt.ylabel('loss validation')
plt.xlabel('num samples')
plt.grid()
plt.savefig(path+'\\filename.png', dpi = 600)
plt.show()
def writeJsonAppend(path, num, accuracy):
entry = {'acc': accuracy, 'time': "wdd"}
a = []
if not os.path.isfile(path+"\\nuovo.json"):
a.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(a, indent=2))
else:
with open(path+"\\nuovo.json") as feedsjson:
feeds = json.load(feedsjson)
feeds.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(feeds, indent=2))
def writeJsonUpdate(path, num, accuracy):
entry = {'acc': accuracy, 'time': "wdd"}
a = []
if not os.path.isfile(path+"\\nuovo.json"):
a.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(a, indent=2))
else:
with open(path+"\\nuovo.json") as feedsjson:
feeds = json.load(feedsjson)
if feeds["accuracy"]:
feeds["acc"]=2
f.write(json.dumps(feeds, indent=2))
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(feeds, indent=2))
def arrayLogic():
x = np.array([4, 3,3,3,3, 2, 1])
print(x)
print(type(x))
print(len(x))
y=[]
for el in x:
if el==3:
y.append(1)
else:
y.append(0)
print(y)
"""
print(y)
print(type(y))
print(len(y))"""
def distanza():
A = torch.Tensor([
[[1,2,3], [4,5,6], [7,8,9]],
[[11,12,13], [14,15,16], [17,18,19]],
[[21,22,23], [24,25,26], [27,28,29]],
])
print(A)
print(A.size())
margin = 2
margin2 = 1
B = torch.Tensor([
[[1,2,3], [4,5,6], [7,8,9]],
[[11,12,13], [14,15,16], [17,18,19]],
[[21,22,23], [24,25,26], [27,28,29]],
])
C = A*4
d = F.pairwise_distance(A, B)
print("di",d)
print("Margin-di",margin-d)
tensor = torch.clamp( margin-d, min = 0) # sceglie il massimo -- se è zero allora sono dissimili
print("max m-d",tensor)
tensorSimil= torch.Tensor([0])
tensorDissimil= torch.Tensor([1])
result= torch.where(tensor==0.,tensorDissimil, tensorSimil)
print("max result Label", result)
print(result[0][0])
if(result[0][0]==1.):
label= 1
print("Dissimili",label)
else:
label = 0
print("Simil",label)
di = F.pairwise_distance(A, C)
print("di",di)
print("Margin-di",margin-di)
tensor = torch.clamp( margin-di, min = 0) # sceglie il massimo -- se è zero allora sono dissimili
print("max m-d",tensor)
tensorSimil= torch.Tensor([0])
tensorDissimil= torch.Tensor([1])
result= torch.where(tensor==0.,tensorDissimil, tensorSimil)
print("max result Label", result)
print(result[0][0])
if(result[0][0]==1.):
label= 1
print("Dissimili",label)
else:
label = 0
print("Simil",label)
#matrix = tensor.numpy()
#print("Matrix",matrix.ravel(), type(matrix))
#list(matrix)
#print(np.all([n<=margin for n in tensor]))
"""
if(tensor <= margin):
print("Simili A e B")
else:
print("Dissimili A e B")
"""
def readFileDataset(path,entry):
if not os.path.exists(path):
print("Dataset is not present, try --create dataset", file=sys.stderr)
exit(0)
else:
with open(path, "r") as outfile:
data= json.load(outfile)
if not (data.get(entry) is None):
value = data[entry]
return value
def lengthDataset(path,entry,key):
somma = 0
if not os.path.exists(path):
print("Dataset is not present, try --create dataset", file=sys.stderr)
exit(0)
else:
with open(path, "r") as outfile:
data= json.load(outfile)
if not (data.get(entry) is None):
value = data[entry]
for obj in value:
if not (obj.get(key) is None):
num = obj[key]
somma = somma + num
return somma
def print_to_stderr(*a):
# Here a is the array holding the objects
# passed as the arguement of the function
print(*a, file = sys.stderr)
## AGGIIUNGI FUNZIONA
def addJsonModel(directory,version, acc ,f1score, precision, recall, time):
path = "provaJson.json"
if not os.path.exists(path):
print("File %s not is exists" % path)
sys.stderr.write("File %s not is exists" % path)
exit(0)
else:
#leggi il file
with open(path, "r") as outfile:
data = json.load(outfile)
if not (data.get(version) is None): # se la versione esiste gia, aggiorna i campi
print(data.get(version))
versione = data[version]
if not (versione.get("accuracy") is None): # se la chiave accuracy esiste, aggiorna i campi
obj = versione["accuracy"]
if not (obj.get("accuracyTets") is None):
obj["accuracyTest"]=acc
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
obj["accuracyTest"]=acc
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
print("non esiste")
value={"accuracyTest":acc}
versione["accuracy"]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
#accuracy, accuracyTest ,joj
def addValueJsonModel(path,num, key ,entry, value):
path = "provaJson.json"
if not os.path.exists(path):
print("File %s not is exists" % path)
sys.stderr.write("File %s not is exists" % path)
exit(0)
else:
#leggi il file
with open(path, "r") as outfile:
data = json.load(outfile)
if not (data.get(num) is None): # se la versione esiste gia, aggiorna i campi
print(data.get(num))
versione = data[num]
if not (versione.get(key) is None): # se la chiave accuracy esiste, aggiorna i campi
obj = versione[key]
if not (obj.get(entry) is None):
obj[entry]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
obj[entry]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
print("non esiste")
obj={entry:value}
versione[entry]=obj
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
dato={key:{entry:value}}
data[num]=dato
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
def writeJson(model,num, media, dev, time):
path = model
access_rights = 0o777
try:
if not os.path.exists(path):
os.mkdir(path,access_rights)
print("Successfully created the directory %s" % path)
else:
print("Directory exist")
except OSError:
print("Creation of the directory %s failed" % path)
exit(0)
data = {"model":num, "mean":media, "dev_std":dev, "time":time}
with open(path+"\\normalize.json", "w") as outfile:
json.dump(data,outfile)
# No such file or directory
"""
a = np.arange(10).reshape(2,5) # a 2 by 5 array
b = a.tolist()
writeJson("Model-1",b,1243,33,"2e33333sec")
"""
#controlFile("Model-1")
#arrayM, arrayD = readNorm("Model-1")
#print(arrayM)
#print(arrayD)
#plot("Model-1")
entry= "acc"
entryTime="nuvissima"
time="3haf"
accuracy =125
path="Model-1"
#writeJsonAccuracy(path,"normalize.json",entry, accuracy, entryTime, time)
media=[2,4,3,4]
dev=[3,4,5,4]
time="23sec"
#writeJsonNorma("Dataset\dataSetJson.json",media,dev,time)
"""
value=readFileDataset("Dataset\dataSetJson.json", "datasetLarge")
for i in value:
print(i)
key = "num_sample"
num = lengthDataset("Dataset\dataSetJson.json","datasetLarge",key)
print(num)
#ok distanza()
#
#arrayLogic()
"""
"""
parser = argparse.ArgumentParser( description = "Dataset Money")
parser.add_argument('--model', help = "Name of model [modello1 | modello2 | modello3]", type=str)
parser.add_argument('--v', help ="version" , type=int)
args = parser.parse_args()
required_together = ('model','v')
# args.b will be None if b is not provided
if not all([getattr(args,x) for x in required_together]):
raise RuntimeError("Cannot supply --model without --v")
else:
if args.model == "model1":
print("model ",args.model)
if args.v == 1:
print("version",args.v)
else:
print("Versione non riconoscita [1 | 2 | 3]")
print_to_stderr("Hello World")
else:
print("Modello non riconosciuto [modello1 | modello2 | modello3]")
print(type(sys.stderr))
"""
#sys.stderr.write("Error messages can go here\n")
acc="uffa"
f1score="score111"
precision="perfect"
recall="recallll"
time="142sec"
#addJsonModel("provaJson.json","1", acc ,f1score, precision, recall, time)
key="accuracy"
entry="accuracyTrain"
value="ValoreAcc"
#addValueJsonModel("provaJson.json","1", key ,entry, value)
key="time"
entry="timeTrain"
value="ValoreTime"
#addValueJsonModel("provaJson.json","1", key ,entry, value)
controlNormalize("Dataset") | rroosa/machineL | ProjectCode/file_prove.py | file_prove.py | py | 18,111 | python | en | code | 0 | github-code | 36 |
6171283144 | import matplotlib.pyplot as plt
import numpy as np
from numpy import *
from mpl_toolkits import mplot3d
import random
# Presets
ax = plt.axes(projection='3d')
def randomcolor():
colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
color = ""
for i in range(6):
color += colorArr[random.randint(0, 14)]
return "#" + color
class Group(object):
def __init__(self):
self.vectors = []
def register(self, *v: ndarray):
for i in v:
self.vectors.append(i)
def display(self):
temp = []
color_p = randomcolor()
for i in self.vectors:
for j in range(len(i)):
temp.append(i[j])
ax.quiver(0, 0, 0, temp[0], temp[1], temp[2], arrow_length_ratio=0.1, color=color_p)
ax.scatter3D(temp[0], temp[1], temp[2], color=color_p, s=2500)
temp = []
def rotate(self, theta: 2 * pi):
rotation_matrix = array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
for i in range(len(self.vectors)):
self.vectors[i] = dot(rotation_matrix, self.vectors[i])
def initial():
x = np.arange(-6, 6, 0.1)
y = np.zeros(120)
ax.plot(x, y, y, color='#000000')
ax.plot(y, x, y, color='#000000')
ax.plot(y, y, x, color='#000000')
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_zlim(-2, 2)
plt.gca().set_box_aspect((1, 1, 1))
plt.show()
def main():
a1 = array([[1], [1], [1]])
a2 = array([[-1], [-1], [1]])
a3 = array([[1], [-1], [-1]])
a4 = array([[-1], [1], [-1]])
G1 = Group()
G1.register(a1, a2, a3, a4)
G1.display()
G1.rotate(pi / 6)
G1.display()
initial()
if __name__ == '__main__':
main()
| RS-gty/GTY_Chemistry | Group.py | Group.py | py | 1,802 | python | en | code | 0 | github-code | 36 |
14431724379 | #from ast import If
#from pprint import pp
#from typing import final
from doctest import master
from multiprocessing.reduction import duplicate
import re
import string
from struct import pack
from unittest import result
from tokens import tokens
from tkinter import *
from tkinter import ttk
#from Interfaz import datos
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultIndefinidas = []
resultErrores = []
resultDigitos = []
listResultados = []
class analizador:
tokens = tokens()
def inicio_analizador(self, palabras):
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultDigitos = []
resultIndefinidas = []
resultErrores = []
print("--- Lexico ---")
for i in tokens.reservadas:
for j in palabras:
if (i == j):
resultReservadas.append(i)
palabras.remove(i)
for l in tokens.caracteres_especiales:
for k in palabras:
if (l == k):
resultCaracteresEspeciales.append(k)
palabras.remove(l)
for t in tokens.delimitadores:
for f in palabras:
if (t == f):
resultDelimitadores.append(t)
palabras.remove(t)
for g in range (len(palabras)):
#dato = re.search("^[A-Za-z]+$*", palabras[g])
dato = re.search("^[a-zA-Z][a-zA-Z]+$", palabras[g])
if dato:
resultIndefinidas.append(palabras[g])
else:
dato1 = re.search("^[0-9]+$", palabras[g])
if dato1:
resultDigitos.append(palabras[g])
else:
resultErrores.append(palabras[g])
print("Token Reservadas: ",resultReservadas)
print("Token Caracteres Especiales: ",resultCaracteresEspeciales)
print("Token Delimitadores: ",resultDelimitadores)
print("Token Indefinidas: ",resultIndefinidas)
print("Token Digitos: ",resultDigitos)
print("Errores: ",resultErrores)
listResultados.append(resultReservadas)
listResultados.append(resultCaracteresEspeciales)
listResultados.append(resultDelimitadores)
listResultados.append(resultIndefinidas)
listResultados.append(resultDigitos)
listResultados.append(resultErrores)
return listResultados
def funcAuxiliar(self, palabras):
# se buscan y se agregan a una lista los terminales
for i in tokens.reservadas:
for j in palabras:
if (i == j):
resultReservadas.append(i)
for l in tokens.caracteres_especiales:
for k in palabras:
if (l == k):
resultCaracteresEspeciales.append(k)
for t in tokens.delimitadores:
for f in palabras:
if (t == f):
resultDelimitadores.append(t)
# evaluando la cantidad existentes
c = 0
s = 0
p = 0
d = 0
cs = 0
i = 0
pa = 0
pc = 0
dp = 0
up = 0
for cantidadReservadas in resultReservadas:
if cantidadReservadas == "class":
print("encontro un class")
c += 1
if cantidadReservadas == "self":
print("encontro un self")
s += 1
if cantidadReservadas == "print":
print("encontro un print")
p += 1
if cantidadReservadas == "def":
print("encontro un def")
d += 1
for cantidadCaracteres in resultCaracteresEspeciales:
if cantidadCaracteres == "'":
print("encontro un ' ")
cs += 1
for cantidadDelimitadores in resultDelimitadores:
if cantidadDelimitadores == "=":
print("encontro un = ")
i += 1
if cantidadDelimitadores == "(":
print("encontro un ( ")
pa += 1
if cantidadDelimitadores == ")":
print("encontro un ) ")
pc += 1
if cantidadDelimitadores == ":":
print("encontro un : ")
dp += 1
if cantidadDelimitadores == ".":
print("encontro un . ")
up += 1
if c == 1 and s == 1 and p == 1 and d == 1 and cs == 2 and i == 2 and pa == 5 and pc == 5 and dp == 1 and up == 1:
print("CUMPLE")
palabras.remove("class")
palabras.remove("self")
palabras.remove("def")
palabras.remove("print")
palabras.remove("'")
palabras.remove("'")
palabras.remove("=")
palabras.remove("=")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(":")
palabras.remove(".")
print(palabras)
else:
print("NO CUMPLE")
#print("Existentes REPETIDOS:", existentes)
#print("Reservadas: ",resultReservadas)
#print("Caracteres especiales: ",resultCaracteresEspeciales)
#print("Delimitadores: ",resultDelimitadores)
#print(palabras)
| AngelHernandez20/191180-191280 | analizadorlexico.py | analizadorlexico.py | py | 5,673 | python | es | code | 0 | github-code | 36 |
40393533142 | #!/usr/bin/env python
import argparse
import random
import numpy as np
import pandas as pd
from pathlib import Path
import torch
from torch import optim
from torch import nn
from torch import cuda
import torchvision
from uniform_augment import ImageTransform
from model import load_model
from train import train_model
from utils import visualize_logs
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument('base_dir', type=str)
parser.add_argument('model', type=str)
parser.add_argument('--num_epochs', default=100, type=int)
parser.add_argument('--early_stopping', action='store_true')
args = parser.parse_args()
# Loading and normalizing CIFAR10
size = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transform_train = ImageTransform(size, mean, std, train=True)
transform_test = ImageTransform(size, mean, std, train=False)
dataset_train = torchvision.datasets.CIFAR10(root=args.base_dir, train=True, download=True, transform=transform_train)
dataset_test = torchvision.datasets.CIFAR10(root=args.base_dir, train=False, download=True, transform=transform_test)
# Setting parameters
LEARNING_RATE = 1e-3
BATCH_SIZE = 32
device = 'cuda' if cuda.is_available() else 'cpu'
print(f'device: {device}')
# Loading a pretrained model
net = load_model(args.model, 10)
# Defining a loss function
criterion = nn.CrossEntropyLoss()
# Defining an optimizer
optimizer = optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=0.9)
# Training the network
torch.backends.cudnn.benchmark = True
print(f'model: {args.model}')
log = train_model(args.model,
dataset_train,
dataset_test,
BATCH_SIZE,
net,
criterion,
optimizer,
args.num_epochs,
args.base_dir,
device=device,
early_stopping=args.early_stopping)
# Visualizing logs
visualize_logs(log, Path(args.base_dir, f'train_log_{args.model}.png'))
| yamaru12345/UniformAugment | main.py | main.py | py | 2,045 | python | en | code | 0 | github-code | 36 |
70441922343 | import sys
input = sys.stdin.readline
class Solution:
def __init__(self) -> None:
numSteps = int(input())
points = [0] * (numSteps + 1)
for i in range(1, numSteps + 1):
points[i] = int(input())
self.maxScore(numSteps, points)
def maxScore(self, numSteps: int, points: list) -> None:
# dp[i]가 의미하는 바: ith 계단에 올라왔을 때까지 누적된 최대 점수
dp = [0] * (numSteps + 1)
def recur(n: int) -> int: # dp[n]을 계산해주는 함수
if dp[n] != 0:
return dp[n]
if n == 1:
dp[1] = points[1]
return dp[1]
if n == 2:
dp[2] = points[1] + points[2]
return dp[2]
if n == 3:
dp[3] = max(points[1], points[2]) + points[3]
return dp[3]
dp[n] = max(recur(n-3) + points[n-1], recur(n-2)) + points[n]
return dp[n]
print(recur(numSteps))
Solution() | cjy13753/algo-solutions | baekjoon/solution_2579.py | solution_2579.py | py | 1,075 | python | en | code | 0 | github-code | 36 |
71514879785 | import sys
import time
import numpy as np
import torch
import torch.nn as nn
class RewardTracker:
def __init__(self, writer, stop_reward, group_rewards=1):
self.writer = writer
self.stop_reward = stop_reward
self.reward_buf = []
self.steps_buf = []
self.group_rewards = group_rewards
def __enter__(self):
self.ts = time.time()
self.ts_frame = 0
self.total_rewards = []
self.total_steps = []
return self
def __exit__(self, *args):
self.writer.close()
def reward(self, reward_steps, frame, epsilon=None):
reward, steps = reward_steps
self.reward_buf.append(reward)
self.steps_buf.append(steps)
if len(self.reward_buf) < self.group_rewards:
return False
reward = np.mean(self.reward_buf)
steps = np.mean(self.steps_buf)
self.reward_buf.clear()
self.steps_buf.clear()
self.total_rewards.append(reward)
self.total_steps.append(steps)
speed = (frame - self.ts_frame) / (time.time() - self.ts)
self.ts_frame = frame
self.ts = time.time()
mean_reward = np.mean(self.total_rewards[-100:])
mean_steps = np.mean(self.total_steps[-100:])
epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
print("%d: done %d games, mean reward %.3f, mean steps %.2f, speed %.2f f/s%s" % (
frame, len(self.total_rewards)*self.group_rewards, mean_reward, mean_steps, speed, epsilon_str
))
sys.stdout.flush()
if epsilon is not None:
self.writer.add_scalar("epsilon", epsilon, frame)
self.writer.add_scalar("speed", speed, frame)
self.writer.add_scalar("reward_100", mean_reward, frame)
self.writer.add_scalar("reward", reward, frame)
self.writer.add_scalar("steps_100", mean_steps, frame)
self.writer.add_scalar("steps", steps, frame)
if mean_reward > self.stop_reward:
print("Solved in %d frames!" % frame)
return True
return False
def calc_values_of_states(states, net, device="cpu"):
""" action_values_v = net(states_v):這裡,模型net為給定的states_v預測每個可能動作的價值。
best_action_values_v = action_values_v.max(1)[0]:接著,我們只考慮每個狀態的最佳動作價值,這是透過取每一行(代表每個狀態)的最大值來完成的。
結果是所有狀態的最佳動作價值的均值。
回答你的問題:“假設我部位完全沒有任何的變化下,收益為什麼會改變?”:
即使部位方向不變,模型的權重和偏差是在訓練過程中不斷更新的。所以,當你使用同一組states重新評估模型時,你會得到不同的動作價值,因為模型已經學到了新的知識。
動作價值的改變並不直接代表收益的改變。它只是模型對給定狀態應該採取何種動作的估計價值。當你在真實環境中執行這些動作時,真正的收益可能會與模型的估計有所不同。
訓練過程中,模型試圖學習一個策略,使其預測的動作價值越來越接近真實價值。但這不代表模型總是正確的,只是說它試圖接近真實價值。
所以,雖然部位方向不變,但模型的估計動作價值可能會變,這反映了模型在訓練過程中的學習進展。
Args:
states (_type_): _description_
net (_type_): _description_
device (str, optional): _description_. Defaults to "cpu".
Returns:
_type_: _description_
"""
mean_vals = []
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_values_v = net(states_v)
best_action_values_v = action_values_v.max(1)[0]
mean_vals.append(best_action_values_v.mean().item())
return np.mean(mean_vals)
def unpack_batch(batch):
states, actions, rewards, dones, last_states = [], [], [], [], []
for exp in batch:
state = np.array(exp.state, copy=False)
states.append(state)
actions.append(exp.action)
rewards.append(exp.reward)
dones.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(state) # the result will be masked anyway
else:
last_states.append(np.array(exp.last_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(last_states, copy=False)
def calc_loss(batch, net, tgt_net, gamma, device="cpu"):
states, actions, rewards, dones, next_states = unpack_batch(batch)
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.tensor(dones, dtype=torch.bool).to(device)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
next_state_actions = net(next_states_v).max(1)[1]
next_state_values = tgt_net(next_states_v).gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)
next_state_values[done_mask] = 0.0
expected_state_action_values = next_state_values.detach() * gamma + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
| a046829713/DQNStockSysteam | lib/common.py | common.py | py | 5,518 | python | en | code | 0 | github-code | 36 |
74473073385 | import urllib.request, urllib.parse
import bs4 as BeautifulSoup
# 建立与用户以及网络的会话
base = input("Enter the URL: ")
try:
page = urllib.request.urlopen(base)
except:
print("Cannot open %s" % base)
quit()
# 准备soup
soup = BeautifulSoup.BeautifulSoup(page)
# 提取链接,并用(名称,网址)的元组表示
links = [(link.string, link['href']) for link in soup.find_all("a") if link.has_attr("href")]
# 尝试打开每个链接
broken = False
for name, url in links:
# 将base和链接目标组合在一起,因为页面内的链接都是相对地址,所以这里要与base结合在一起
# 例如: base=http://hanwen.me url=/about
# dest = http://hanwen.me/about
dest = urllib.parse.urljoin(base, url)
print(dest)
try:
page = urllib.request.urlopen(dest)
page.close()
except:
print("Link \"%s\" to \"%s\" is probably broken." %(name, dest))
broken = True
# 显示好消息
if not broken:
print("Page %s does not seem to have broken links. " %base) | zhanwen/PythonDataScience | chapter3/practice/Solution_Broken_link.py | Solution_Broken_link.py | py | 1,067 | python | en | code | 20 | github-code | 36 |
5941584041 | import turtle as t
class Rectangle(t.RawTurtle):
def __init__(self, screen=t.Screen(), width=0, height=0):
super().__init__(screen)
self.screen = screen
self.width = width
self.height = height
def draw(self):
for i in range(2):
self.fd(self.height)
self.right(90)
self.fd(self.width)
self.right(90)
def move_forward(self, unit):
self.clear()
self.penup()
self.fd(unit)
self.pendown()
self.draw()
def move_backward(self, unit):
self.clear()
self.penup()
self.bk(unit)
self.pendown()
self.draw()
def turn_right(self, angle=90):
self.right(angle)
self.clear()
self.draw()
def turn_left(self, angle=90):
self.left(angle)
self.clear()
self.draw()
| musaaj/bot | rectangle.py | rectangle.py | py | 888 | python | en | code | 0 | github-code | 36 |
9929931369 | from models.base import (
db,
TableOperateMixin,
GenColumn)
class DemoModel(db.Model, TableOperateMixin):
"""
CREATE TABLE `tb_demo` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(100) NOT NULL DEFAULT '' COMMENT '名称',
`tag` varchar(100) NOT NULL DEFAULT '' COMMENT '标签',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`last_update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='demo';
"""
__tablename__ = "tb_demo"
name = GenColumn(db.String(100), name="name", default="", comment="名称")
tag = GenColumn(db.String(100), name="tag", default="", comment="标签")
def __init__(self, **kwargs):
self.name = kwargs.get("name")
self.tag = kwargs.get("tag")
super().__init__()
| spxinjie6/sql-crud | models/tb_demo.py | tb_demo.py | py | 943 | python | en | code | 1 | github-code | 36 |
74267279783 | import requests
import time
TIMEOUT: int = 1
ATTEMPTS: int = 3
def _get_base_headers() -> dict:
"""Get base header for request."""
return {"Content-Type": "application/json"}
def _get(url: str, headers: dict, params: dict) -> requests.Response:
"""Send GET request to server."""
for _ in range(ATTEMPTS):
try:
response: requests.Response = requests.get(url, headers=headers, params=params, timeout=TIMEOUT)
return response
except requests.exceptions.Timeout:
time.sleep(0.5)
raise requests.exceptions.Timeout
def get(url: str, **kwargs) -> dict:
"""Sending package to server and return json response."""
headers: dict = _get_base_headers()
headers.update(kwargs.get("headers", {}))
params: dict = kwargs.get("params", {})
response = requests.get(url, headers=headers, params=params, timeout=TIMEOUT)
response.raise_for_status()
return response.json()
| gordienko-dmitry/job_analyzer | api/server.py | server.py | py | 961 | python | en | code | 1 | github-code | 36 |
20026451101 | from hashlib import sha1
import time
import json
from pulsar import get_actor, Future
from pulsar.apps.wsgi import WSGIServer, WsgiResponse, WsgiHandler
import aio_etcd as etcd
from jirachi.io.abstract import JirachiMonitor, JirachiMonitorNotFound
from pulsar.apps.wsgi import Router
__all__ = ['RemoteMonitorWSGI']
blueprint = Router('/')
@blueprint.router('/remote/<string:monitor>/event/<string:event>/', methods=['post'])
async def remote_wsgi(request):
hash_code = sha1(str(time.time()).encode()).hexdigest()
monitor_name = request.urlargs['monitor']
event_name = request.urlargs['event']
data = request.body_data
actor = get_actor()
monitor = actor.get_actor(monitor_name)
if monitor:
monitor.fire_event(event_name, data)
elif not (actor.is_arbiter() or actor.is_monitor() and actor.monitor == monitor):
actor.monitor.fire_event(event_name, msg=data)
else:
raise JirachiMonitorNotFound('Cant found Monitor %s' % monitor)
return WsgiResponse(200, json.dumps({
'successed': True,
'token': hash_code
}))
class RemoteMonitorWSGI(WSGIServer, JirachiMonitor):
name = 'remote_monitor'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cfg.callable = WsgiHandler((blueprint, ))
if not hasattr(self.cfg, 'blacklist'):
self.cfg.blacklist = []
@staticmethod
async def singlecast(msg, actor_name):
actor = get_actor()
if not actor.name == actor_name:
actor = actor.get_actor(actor_name)
if not actor and actor.monitor.name == actor.name:
actor = actor.monitor
actor.fire_event('singlecast', msg)
actor.future = Future()
return actor.future
@staticmethod
def event_test(msg):
print('test event %s' % msg)
async def monitor_start(self, monitor, exec=None):
monitor.bind_event('test', self.event_test)
if not hasattr(self.cfg, 'etcdconf'):
monitor.etcd = etcd.Client()
else:
monitor.etcd = etcd.Client(**self.cfg.etcdconf)
await super().monitor_start(monitor)
async def worker_start(self, worker, *args, **kwargs):
worker.bind_event('test', self.event_test)
async def search_remote(self):
pass
async def sync_remote(self):
pass
| RyanKung/jirachi | jirachi/io/remote/monitor.py | monitor.py | py | 2,379 | python | en | code | 3 | github-code | 36 |
70123922664 | #! /usr/bin/env python
from sortrobot.mech import Robot
from sortrobot.webcam import Camera
from sortrobot.neural import Classifier, OrientationClassifier
from sortrobot.utils import random_filename
import numpy as np
from PIL import Image
import sys, random, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir", default='/home/pi/scans',
help="Directory to write sorted scans.")
parser.add_option("-c", "--classifier", dest="classifier", default='orient',
help="Classifier from sortrobot.neural to use.")
opts, args = parser.parse_args(sys.argv[1:])
directory = opts.outdir
assert os.path.exists(directory)
classifier = {
'orient': OrientationClassifier,
'color': Classifier,
}[opts.classifier]()
#DEFAULT_ORDER = 'black,blue,green mana back red,white,other'
DEFAULT_ORDER = 'top_front top_back bot_back bot_front'
order = ' '.join(args)
sr = Robot()
cam = Camera()
MAXITER = 500
while True:
if len(order.split()) != 4:
order = input('Enter a valid order [DEFAULT %s]: ' % DEFAULT_ORDER)
order = order.strip()
if len(order) == 0:
order = DEFAULT_ORDER
if input('Confirm order "%s" [Y/n]? ' % order).strip().lower() == 'n':
order = ''
continue
print('Using order:', order)
DEFAULT_ORDER = order # save for next time
POSITIONS = {}
for pos,arg in enumerate(order.split()):
for label in arg.split(','):
POSITIONS[label] = pos
def go(pos):
if type(pos) is str:
try:
pos = POSITIONS[label]
except(KeyError):
print(' label %s has no position! Choosing 0.' % label)
pos = 0
sr.go(pos)
for i in range(MAXITER):
filebase = random_filename()
filename = os.path.join(directory, filebase)
print('%d scanning -> %s' % (i, filename))
cam.rgb_to_file(filename)
im = Image.open(filename)
label = classifier.classify(im)
print(' classfied as %s' % (label))
new_directory = os.path.join(directory, label)
if not os.path.exists(new_directory):
os.mkdir(new_directory)
print(' moving to %s' % (new_directory))
os.rename(filename, os.path.join(new_directory, filebase))
if label == 'empty':
break
go(label)
sr.feed_card()
order = '' # triggers prompt for input at top of loop
| AaronParsons/sortrobot | scripts/sr_neural_sort.py | sr_neural_sort.py | py | 2,532 | python | en | code | 0 | github-code | 36 |
26335020274 | x = 5
#While后面接是非题(True, Fasle)
while True:
print("我还在里面,现在是:", x )
x = x + 1
#当 x < 10的时候,印出两句话,并且回头审视问题,造成“循环”
#要怎么停止?把条件增加到已经超出问题
#此版本是无限,因为问题永远正确,不管结果怎样都会接到True
#怎么解决?
break
print('我逃出循环了!')
| penguin87315/while_pratices | while_true.py | while_true.py | py | 398 | python | zh | code | 0 | github-code | 36 |
33912887364 | from Faturas.Pyside2 import GUITela_de_Login as tl, Tela_Principal as tp
import sys
def system_load():
logged = tl.execution()
if logged:
tp.execution()
system_load()
sys.exit(0)
| LC-burigo/Camerge_Faturas | Faturas/Pyside2/Gerente.py | Gerente.py | py | 199 | python | en | code | 1 | github-code | 36 |
72056734183 | from warnings import filters
from billing.billing.api.sales_invoice.create_sales_invoice import re_eveluate_sales_orders
# from billing.billing.utils.payment_notifications import get_party_phone
import frappe
from datetime import date
from frappe.utils.background_jobs import enqueue
from frappe.utils.data import nowdate
def calculate_age(birthDate):
days_in_year = 365.2425
age = int((date.today() - birthDate).days / days_in_year)
return age
def lab_test_after_insert_hook(doc,state):
patient = doc.get('patient')
dob = frappe.db.get_value('Patient',{ 'name':patient }, 'dob')
gender = frappe.db.get_value('Patient',{ 'name':patient }, 'gender')
sex = frappe.db.get_value('Patient',{ 'name':patient }, 'sex')
doc.patient_age = age_calc(dob,doc.name)
doc.patient_sex = gender or sex
doc.save(ignore_permissions=True)
enqueue(method=append_same_category_tests,name=doc.get('name'), queue='short', timeout=600)
enqueue(method=tetst_age_fix, queue='short', timeout=600)
# test_sharing_sample_with = doc.get('share_sample_with')
# frappe.msgprint("sharing sample " + test_sharing_sample_with)
# if test_sharing_sample_with:
# frappe.db.set_value('Lab Test', test_sharing_sample_with,{'share_sample_with': doc.name})
# tests_sharing_sample = frappe.db.get_all('Lab Test Sample Share',filters={'parent':doc.name},fields=['name','lab_test'])
# if len(tests_sharing_sample)>0:
# for test in tests_sharing_sample:
# lab_test = frappe.get_doc('Lab Test',test['lab_test'])
# test_item = lab_test.append('lab_test_sample_share')
# test_item.lab_test = test['lab_test']
# lab_test.save(ignore_permissions=True)
# frappe.msgprint('updated related')
def age_calc(dob,lab_name=''):
currentDate = date.today()#nowdate() #datetime.datetime.now()
# dob = '2022-05-01'
# deadline= dob #input ('Plz enter your date of birth (mm/dd/yyyy) ')
deadlineDate= dob# datetime.datetime.strptime(deadline,'%Y-%m-%d')
# print (type(deadlineDate))
# print (type(currentDate))
daysLeft = currentDate - deadlineDate
# print(daysLeft)
years = ((daysLeft.total_seconds())/(365.242*24*3600))
yearsInt=int(years)
months=(years-yearsInt)*12
monthsInt=int(months)
days=(months-monthsInt)*(365.242/12)
daysInt=int(days)
hours = (days-daysInt)*24
hoursInt=int(hours)
if yearsInt>0:
# if yearsInt>100:
# enqueue(method=log_error ,lab_name=lab_name,dob=dob, queue='short', timeout=600)
# # print("{0}Y".format(yearsInt))
# else:
return "{0}Y".format(yearsInt)
if monthsInt>0:
# print("{0}M".format(monthsInt))
return "{0}M".format(monthsInt)
if daysInt>0:
# print("{0}D".format(daysInt))
return "{0}D".format(daysInt)
if hoursInt>0:
# print("{0}H".format(hoursInt))
return "{0}H".format(hoursInt)
# bench execute lims.doc_hooks.lab_test.age_test
def age_test():
pats = frappe.get_all('Lab Test',fields=['name','patient'],filters={'patient':'1122083'})
for p in pats:
print(p['name'])
dob = frappe.db.get_value('Patient',{ 'name': p['patient'] }, 'dob')
# print(type(dob))
print(str(dob))
age = age_calc(dob,p['name'])
print('age ',age)
# frappe.db.set_value('Lab Test',p['name'],{'patient_age':age})
# bench execute lims.doc_hooks.lab_test.append_same_category_tests
@frappe.whitelist()
def append_same_category_tests(name):
from mtrh_dev.mtrh_dev.utilities import get_link_to_form_new_tab
# from clinical.hook.lab_test import get_sample_shares
# name='3BQ'
lab_doc = frappe.get_doc('Lab Test',name)
sql="""select tlt.name,tlt.template,tlt.workflow_state,ltc.test_group,ltc.lab_test_template,tlt.patient from `tabLab Test` tlt RIGHT join `tabLab Test Codes` ltc on tlt.template=ltc.lab_test_template where tlt.patient='{0}' and tlt.docstatus=0""".format(lab_doc.patient)
# tlt.workflow_state='To Receive' and and tlt.template='{1}' ,doc.template
# print(sql)
# link_arr=[]
name_arr=[]
res = frappe.db.sql(sql,as_dict=1)
names = [x.name for x in res]
for n in names:
# link_arr.append(get_link_to_form_new_tab(doctype="Lab Test", name=n, label=n))
name_arr.append({'name':get_link_to_form_new_tab(doctype="Lab Test", name=n, label=n) ,'Test':frappe.db.get_value('Lab Test',n,'template'),'workflow_state':frappe.db.get_value('Lab Test',n,'workflow_state')})
update_sample_share(n,names)
# frappe.msgprint(title='Labs Sharing Same Sample',msg=str(name_arr))
get_sample_shares(name)
return name_arr
def update_sample_share(name,names):
lab_doc = frappe.get_doc('Lab Test',name)
for n in names:
if n!= lab_doc.get('name'):
if not frappe.db.exists('Lab Test Sample Share',{'parent':name,'lab_test':n}):
sample_share = lab_doc.append('lab_test_sample_share')
sample_share.lab_test = n
lab_doc.save(ignore_permissions=True)
def get_sample_shares(lab_name):
tests_sharing_sample_child = frappe.db.get_all('Lab Test Sample Share',filters={'lab_test':['IN',lab_name]},fields=['name','lab_test','parent'])
tests_sharing_sample_parent = frappe.db.get_all('Lab Test Sample Share',filters={'parent':lab_name},fields=['name','lab_test','parent'])
tests_sharing_sample = tests_sharing_sample_parent or tests_sharing_sample_child
test_names = []
if len(tests_sharing_sample)>0:
parent_test = tests_sharing_sample[0]['parent']
tests_sharing_sample = frappe.db.get_all('Lab Test Sample Share',filters={'parent':parent_test},fields=['name','lab_test'])
for test in tests_sharing_sample:
test_names.append(test['lab_test'])
test_names.append(lab_name)
else:
test_names.append(lab_name)
shares = list(dict.fromkeys(test_names))
process_lab_array= frappe.db.get_all('Lab Test',filters={'name':['IN',shares]},fields=['processing_lab'])
employee_array = frappe.db.get_all('Lab Test',filters={'name':['IN',shares]},fields=['employee'])
process_lab=''
employee=''
# print(str(process_lab))
# print(str(employee))
for l in process_lab_array:
if l.processing_lab:
process_lab = l.processing_lab
for e in employee_array:
if e.employee:
employee = e.employee
for n in shares:
print(n)
# bulk_workflow_update(docname=n,process_lab=process_lab,employee=employee)
def bulk_workflow_update(docname,process_lab='',employee=''):
from frappe.model.workflow import apply_workflow
doc=frappe.get_doc('Lab Test',docname )#'IP'ß
# print(get_sample_shares(doc.name))
# actions=['Forward For Payment','Approve Payment','Send To Lab','Receive Lab Test','Forward For Verification']
# state_action_dict=[
# { 'state':'Awaiting Checkin','action':actions[0]},
# { 'state':'Awaiting Payment','action':actions[1]},
# { 'state':'Awaiting Sampling','action':actions[2]},
# { 'state':'To receive','action':actions[3]},
# { 'state':'Processing','action':actions[4]},
# { 'state':'Awaiting Verification','action':actions[5]}
# ]
workflow_state = doc.get('workflow_state')
if workflow_state=='Awaiting Checkin':
apply_workflow(doc=doc, action="Forward For Payment")
if workflow_state=='Awaiting Payment':
# apply_workflow(doc=doc, action="Approve Payment")
re_eveluate_sales_orders(patient_name=doc.patient,lab_name=doc.name)
apply_workflow(doc=doc, action="Approve Payment")
if workflow_state=='Awaiting Sampling':
doc.processing_lab = process_lab
doc.employee = employee
doc.save(ignore_permissions=True)
apply_workflow(doc=doc, action="Send To Lab")
if workflow_state=='To receive':
doc.processing_lab = process_lab
doc.employee = employee
doc.save(ignore_permissions=True)
apply_workflow(doc=doc, action="Receive Lab Test")
if workflow_state=='Processing':
apply_workflow(doc=doc, action="Forward For Verification")
if workflow_state=='Awaiting Verification':
apply_workflow(doc=doc, action="Post Lab Test")
# bench execute lims.doc_hooks.lab_test.lab_clean
def lab_clean():
sql = "select name,idx from `tabNormal Test Result` where parent='B73'"
items=frappe.db.sql(sql,as_dict=1)
count = 0
for i in items:
count +=1
if count>1:
sq= "delete from `tabNormal Test Result` where name='{0}'".format(i.name)
frappe.db.sql(sq,as_dict=1)
print(count)
# bench execute lims.doc_hooks.lab_test.comment_count
def comment_count(name='B73'):
# return 1
sqlc="select count(name) as cnt,reference_name from tabComment where reference_doctype='Sales Invoice' and reference_name is not null group by reference_name HAVING COUNT(name) > 15 order by reference_name"
parents=frappe.db.sql(sqlc,as_dict=1)
for p in parents:
print('ref ',p.reference_name, ' ',p.cnt)
sql = "select name,reference_doctype,reference_name from tabComment where reference_name='{0}'".format(p.reference_name)
# print(sql)
items=frappe.db.sql(sql,as_dict=1)
count = 0
for i in items:
count +=1
if count>1:
print('item count ',count)
sq= "delete from tabComment where name='{0}'".format(i.name)
frappe.db.sql(sq,as_dict=1)
# frappe.delete_doc('Comment',i.name)
frappe.db.commit()
# print(count)
# bench execute lims.doc_hooks.lab_test.tetst_age_fix
def tetst_age_fix():
sql = "select name,patient,docstatus from `tabLab Test` where patient_age is null;"
labs = frappe.db.sql(sql,as_dict=1)
for lab in labs:
patient = lab.get('patient')
# print(' patient ', patient)
dob = frappe.db.get_value('Patient',{ 'name':patient }, 'dob')
patient_age = age_calc(dob,lab.get('name'))
# print(patient_age)
up_sq = "update `tabLab Test` set patient_age ='{0}' where name='{1}';".format(patient_age,lab.get('name'))
print(up_sq)
frappe.db.sql(up_sq,as_dict=1)
def log_error(lab_name,dob):
log = frappe.new_doc('Lims Error Log')
log.ordernumber = lab_name
log.log_number = ''
log.unprocessed_result = str(dob)
log.save(ignore_permissions=True)
# bench execute lims.doc_hooks.lab_test.patient_record_exist
def patient_record_exist():
# labs = frappe.get_all("Lab Test",filters={'docstatus':1},fields=['name','patient'])
labs = frappe.db.count('Lab Test', {'docstatus': 1})
recs = frappe.db.count('Patient Medical Record', {'reference_doctype': 'Lab Test'})
print('labs ',labs, ' recs ',recs) | mudux/lims | lims/doc_hooks/lab_test.py | lab_test.py | py | 11,016 | python | en | code | 0 | github-code | 36 |
26224523041 | import xml.etree.ElementTree as ET
import time
import requests
class BaseAPIConnector(object):
def __init__(self, user_agent='', verbose=False):
self.user_agent = user_agent
self.verbose = verbose
def construct_url(self):
return None
def html_request(self):
if self.user_agent == '':
raise UserWarning('Please specify a user agent.')
url = self.construct_url()
if self.verbose:
print(url)
request = None
exception_count = 0
while exception_count < 10:
try:
request = requests.get(url, headers={'User-Agent': self.user_agent})
except Exception as e:
print("Exception '%s' while querying url: '%s', trying again..." % (e, url))
time.sleep(10)
exception_count += 1
else:
break
return request
def get_xml_from_url(self):
try:
return ET.fromstring(self.html_request().text)
except:
return None
def get_json_from_url(self):
return self.html_request().json() | TheOneWho/EveCommonLibrary | EveCommon/BaseAPIConnector.py | BaseAPIConnector.py | py | 1,149 | python | en | code | 0 | github-code | 36 |
16726065794 | from __future__ import print_function
import sys
data = {}
for ln in sys.stdin:
flds = ln.rstrip('\n').split('\t')
if flds[0] not in ['320600', '360100']:
continue
key = (flds[0], flds[1], flds[2][:6])
if key not in data:
data[key] = [0.0] * 5
for i, val in enumerate(flds[3:]):
data[key][i] += float(val)
for key in data:
cid, rid, m = key
print(cid, rid, m, *data[key], sep='\t')
| kn45/rider-level | tranform.py | tranform.py | py | 436 | python | en | code | 0 | github-code | 36 |
14114323371 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mktable2matrix",
version="0.1.2",
author="Guilherme Lucas",
author_email="guilherme.slucas@gmail.com",
description="Converts markdown table to Matrix",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Guilhermeslucas/mktable2matrix",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | Guilhermeslucas/mktable2matrix | setup.py | setup.py | py | 646 | python | en | code | 1 | github-code | 36 |
24593021836 | """Implement a text output of the time entered from the console (the user should input data in the format hh:mm).
Show the responses to the user in Russian according to the rules listed below:
min == 0: такое-то значение часа ровно (15:00 - три часа ровно)
min < 30: столько-то минут следующего часа (19:12 - двенадцать минут восьмого)
min == 30: половина такого-то (15:30 - половина четвёртого)
min > 30 and min < 45 столько-то минут следующего часа (12:38 - тридцать восемь минут первого)
min >= 45 без min такого-то (08:54 - без шести минут девять)"""
from db import myList
user_time = input("Enter your data in the format (hh:mm):\n")
user_time = user_time.replace(':', '')
#input check 4 time values
if len(user_time) != 4:
print("your input has incorrect format")
exit()
#I assign each digit of the time to the variable h*
h1, h2, m1, m2 = user_time[0], user_time[1], user_time[2], user_time[3]
#checking that the user entered input integers
if not h1.isdigit() or not h2.isdigit() or not m1.isdigit() or not m2.isdigit():
print("your input has incorrect format")
exit()
h1, h2, m1, m2 = int(h1), int(h2), int(m1), int(m2) #change the data type for each variable h*
hh = int(user_time[0:2]) #assign the hour digits to a variable hh
mm = int(user_time[2:4]) #assign the minutes digits to a variable mm
#check if input is in 24h range
if hh > 23 or m1 > 5 or m2 > 9:
print("your input has incorrect format")
exit() #
#I simplify my task, I translate the 24h format into 12h
if int(user_time[0:2]) > 12:
hh -= 12
#Ifulfill the condition for the correct output of the end of hours
if hh == 1:
hhText = myList["hh"][0]
elif hh <= 4 and hh > 1:
hhText = myList["hh"][1]
elif hh <= 12 and hh > 4:
hhText = myList["hh"][2]
elif hh == 0:
hhText = myList["hh"][2]
#first condition
#min == 0: такое-то значение часа ровно (15:00 - три часа ровно)
if mm == 0:
print(f"""{myList[hh][0]} {hhText} {myList[0][0]}""")
exit()
#second condition
#min < 30: столько-то минут следующего часа (19:12 - двенадцать минут восьмого)
if m2 == m2:
if m2 == 1:
mmText = myList["mm"][0]
elif m2 <= 4 and m2 > 1:
mmText = myList["mm"][1]
elif m2 <= 9 and m2 >= 5:
mmText = myList["mm"][2]
elif m2 == 0:
mmText = myList["mm"][2]
if mm < 30:
if hh + 1 == 5 or hh + 1 == 6 or hh + 1 == 9 or hh + 1 == 10:
next_hh = myList[hh + 1][0][:-1]+myList["suff"][3]
elif hh + 1 == 11 or hh + 1 == 12:
next_hh = myList[hh + 1][0][:-1]+myList["suff"][3]
elif hh + 1 == 13:
next_hh = myList[1][1]
elif hh + 1 == 7 or hh + 1 == 8 or hh + 1 == 1 or hh + 1 == 2 or hh + 1 == 3 or hh + 1 == 4:
next_hh = myList[hh + 1][1]
if mm == 12:
print(f"""{myList[m2][3]+myList["suff"][0]} {myList["mm"][2]} {next_hh}""")
exit()
if mm > 10 and mm < 20:
print(f"""{myList[m2][0][:-1]+myList["suff"][0]} {myList["mm"][2]} {next_hh}""")
exit()
if mm == 10:
print(f"""{myList[10][0]} {myList["mm"][2]} {next_hh}""")
exit()
if mm == 20:
print(f"""{myList[m1][0][:-1]+myList["suff"][1]} {myList["mm"][2]} {next_hh}""")
exit()
if mm > 20:
if m2 == 1 or m2 == 2:
print(f"""{myList[m1][0]+myList["suff"][1]} {myList[m2][3]} {mmText} {next_hh}""")
exit()
print(f"""{myList[m1][0]+myList["suff"][1]} {myList[m2][0]} {mmText} {next_hh}""")
exit()
if 0 < mm:
if mm == 1 or m2 == 2:
print(f"""{myList[m2][3]} {mmText} {next_hh}""")
exit()
print(f"""{myList[m2][0]} {mmText} {next_hh}""")
exit()
#third condition
# min == 30: половина такого-то (15:30 - половина четвёртого)
if mm == 30:
print(f"""{myList[0][1]} {myList[hh + 1][1]}""")
exit()
#fourth condition
# min > 30 and min < 45 столько-то минут следующего часа (12:38 - тридцать восемь минут первого)
if mm > 30 and mm < 45:
if mm == 40:
print(f"""{myList[m1 * 10][0]} {mmText} {myList[hh + 1][1]}""")
exit()
if mm > 40 and mm < 45:
if m2 == 1 or m2 == 2:
print(f"""{myList[40][0]} {myList[m2][3]} {mmText} {myList[hh + 1][1]}""")
exit()
print(f"""{myList[40][0]} {myList[m2][0]} {mmText} {myList[hh + 1][1]}""")
exit()
if mm > 30 and mm < 40:
if m2 == 1 or m2 == 2:
print(f"""{myList[m1][0] + myList["suff"][1]} {myList[m2][3]} {mmText} {myList[hh + 1][1]}""")
exit()
print(f"""{myList[m1][0]+myList["suff"][1]} {myList[m2][0]} {mmText} {myList[hh + 1][1]}""")
exit()
#fifth condition
#min >= 45 без min такого-то (08:54 - без шести минут девять)
if mm >= 45:
if hh == 12:
myList[hh + 1][0] = myList["hh"][0]
if 60-mm >= 5 and 60-mm <= 13:
print(f"""{myList[0][2]} {myList[60-mm][0][:-1] + myList["suff"][2]} {myList["mm"][2]} {myList[hh + 1][0]}""")
exit()
if mm == 59:
print(f"""{myList[0][2]} {myList[60 - mm][2]} {myList["mm"][1]} {myList[hh + 1][0]}""")
exit()
if 60 - mm == 14 or 60 - mm == 15:
x = 60 - mm - 10
print(f"""{myList[0][2]} {myList[x][0][:-1]+ myList["suff"][0][:-1]+ myList["suff"][2]} {myList["mm"][2]} {myList[hh + 1][0]}""")
exit()
if mm >= 45:
print(f"""{myList[0][2]} {myList[60-mm][2]} {myList["mm"][2]} {myList[hh + 1][0]}""")
exit() | MikitaTsiarentsyeu/Md-PT1-69-23 | Tasks/Sherel/Task2/Task2.py | Task2.py | py | 5,840 | python | en | code | 0 | github-code | 36 |
71091836904 | from unittest import TestCase
from gobeventproducer.naming import camel_case
class TestNaming(TestCase):
def test_camel_case(self):
cases = [
("test_case", "testCase"),
("test_case_2", "testCase2"),
("test", "test"),
]
for _in, _out in cases:
self.assertEqual(_out, camel_case(_in))
| Amsterdam/GOB-EventProducer | src/tests/test_naming.py | test_naming.py | py | 364 | python | en | code | 0 | github-code | 36 |
35260765665 | from binaryninja import *
import xxhash
################################################################################################################
# MLIL Instruction #
################################################################################################################
class Neo4jInstruction:
def __init__(self, instr: mediumlevelil.MediumLevelILInstruction, context, parent_type: str):
self.instr = instr
self.parent_type = parent_type
self.operands = str(instr.operands)
self.context = context
if self.parent_type == 'BasicBlock':
self.relationship_label = 'InstructionChain'
else:
self.relationship_label = 'NextInstruction'
self.context.set_hash(self.instr_hash())
def instr_hash(self):
instruction_hash = xxhash.xxh64()
instruction_hash.update(str(self.instr.operands) + str(self.instr.operation))
return instruction_hash.hexdigest()
def serialize(self):
csv_template = {
'mandatory_node_dict': {
'HASH': self.context.SelfHASH,
'LABEL': 'Instruction',
},
'mandatory_relationship_dict': {
'START_ID': self.context.ParentHASH,
'END_ID': self.context.SelfHASH,
'TYPE': self.relationship_label,
'StartNodeLabel': self.parent_type,
'EndNodeLabel': 'Instruction',
'AssemblyOffset': self.instr.address,
},
'mandatory_context_dict': self.context.get_context(),
'node_attributes': {
},
'relationship_attributes': {
'InstructionIndex': self.instr.instr_index,
'PossibleValues': self.instr.possible_values.type.value,
'VarsRead': [var.name for var in self.instr.vars_read],
'VarsWritten': [var.name for var in self.instr.vars_written],
},
}
return csv_template
| CySHell/Binja4J | Core/extraction_helpers/Instruction.py | Instruction.py | py | 2,116 | python | en | code | 15 | github-code | 36 |
30473232781 | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from tkinter import messagebox
import time
import os
from App.Login import username, password, security_question
class Core:
def __init__(self, file_path, driver):
self.driver = driver
self.file_path = file_path
self.file_content = self.readFileContent()
self.file_name = os.path.split(file_path)[1]
self.IDs_list = []
if self.file_content:
try:
self.collectCheckboxsIDs()
self.selectCheckbox()
except NoSuchElementException:
self.driver.close()
messagebox.showinfo(
"Bet Selector", "Loading took too much time or Not yet defined.")
else:
messagebox.showinfo(
"Bet Selector", "Your Bet File '{}' is Empty.".format(self.file_name))
def readFileContent(self):
with open(self.file_path, 'r') as f:
file_content = f.read().split('\n')
file_content = list(filter(None, file_content))
return file_content
def collectCheckboxsIDs(self):
checkboxs = self.driver.find_elements_by_class_name("checkbox")
IDs_list = []
for el in checkboxs:
IDs_list.append(el.get_attribute('id'))
IDs_list = [IDs_list[x:x+9] for x in range(0, len(IDs_list), 9)]
IDs_list.insert(0, [])
for lis in IDs_list:
lis.insert(0, "")
self.IDs_list = IDs_list
def selectCheckbox(self):
for line in self.file_content:
line = line.split(',')
row = 1
for choice in line:
i = 0
while i < len(choice):
self.driver.find_element_by_id(
self.IDs_list[row][int(choice[i])]).click()
i += 1
row += 1
self.driver.find_element_by_xpath(
'//*[@title="Add to Slip"]').click()
| abdlalisalmi/UpWork-Select-Checkbox-in-a-Web-Page-with-Python-Selenium | App/Core.py | Core.py | py | 2,090 | python | en | code | 1 | github-code | 36 |
30990966321 | # need to implement CSRF
from rest_framework.authentication import CSRFCheck
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework import exceptions
from channels.db import database_sync_to_async
from server.settings import SIMPLE_JWT
from django.core.exceptions import ObjectDoesNotExist
from rest_framework_simplejwt.tokens import UntypedToken, TokenError
from urllib.parse import parse_qs
from server.settings import SIMPLE_JWT, SECRET_KEY
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
import jwt
def enforce_csrf(request):
check = CSRFCheck()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
raise exceptions.PermissionDenied('CSRF failed: %s' % reason)
class CustomAuthentication(JWTAuthentication):
def authenticate(self, request):
header = self.get_header(request)
if header is None:
raw_token = request.COOKIES.get(SIMPLE_JWT['AUTH_COOKIE']) or None
else:
raw_token = self.get_raw_token(header)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
enforce_csrf(request)
return self.get_user(validated_token), validated_token
@database_sync_to_async
def get_user(user_id):
try:
return get_user_model().objects.get(pk=user_id)
except ObjectDoesNotExist:
return AnonymousUser()
class TokenAuthMiddleWare:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
# needs utf8 to decode from byte format b''
user_id = -1
try:
raw_token = parse_qs(scope["query_string"].decode("utf8"))["token"][0]
UntypedToken(raw_token)
decode_token = jwt.decode(raw_token, SECRET_KEY, SIMPLE_JWT["ALGORITHM"])
user_id = decode_token['user_id']
except:
print("Token is invalid")
finally:
user = await get_user(user_id)
return await self.app(dict(scope, user=user), receive, send)
| Kredam/MyRoom | back-end/server/api/authentication.py | authentication.py | py | 2,167 | python | en | code | 2 | github-code | 36 |
15969653865 | #!/usr/bin/env python
## Test an algorithm in real life
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python version >=3.6")
sys.exit(1)
import algorithms
import datetime
import exchange
import pandas_market_calendars as mcal
import portfolioLive
## Main function
def main():
today = datetime.date.today()
nyse = mcal.get_calendar('NYSE')
if len(nyse.valid_days(start_date=today, end_date=today)) == 0:
print("Markets are closed today")
return
with open("symbolsPruned", "r", newline="\n") as file:
symbols = file.read().strip().split("\n")
exchange.init(
symbols,
period=1,
historyDays=algorithms.KIPPslowDays)
p = portfolioLive.Portfolio()
algorithms.KIPP(symbols, p)
if __name__ == "__main__":
main()
| WattsUp/PyStonks | stonks/live.py | live.py | py | 810 | python | en | code | 2 | github-code | 36 |
14071366665 | # -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import sys
total=0
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ResistorLab(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
def setupUi(self, ResistorLab):
ResistorLab.setObjectName(_fromUtf8("ResistorLab"))
ResistorLab.resize(384, 424)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 191))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 159))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 191))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 191))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 159))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 191))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 191))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 159))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
ResistorLab.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("MS UI Gothic"))
ResistorLab.setFont(font)
self.ThirdColor5 = QtGui.QComboBox(ResistorLab)
self.ThirdColor5.currentIndexChanged.connect(self.Total5)
self.ThirdColor5.setGeometry(QtCore.QRect(260, 100, 69, 22))
self.ThirdColor5.setObjectName(_fromUtf8("ThirdColor5"))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.ThirdColor5.addItem(_fromUtf8(""))
self.FourthColor5 = QtGui.QComboBox(ResistorLab)
self.FourthColor5.currentIndexChanged.connect(self.Total5)
self.FourthColor5.setGeometry(QtCore.QRect(100, 130, 69, 22))
self.FourthColor5.setObjectName(_fromUtf8("FourthColor5"))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.FourthColor5.addItem(_fromUtf8(""))
self.Cores4 = QtGui.QLabel(ResistorLab)
self.Cores4.setGeometry(QtCore.QRect(150, 190, 181, 61))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Levenim MT"))
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.Cores4.setFont(font)
self.Cores4.setTextFormat(QtCore.Qt.PlainText)
self.Cores4.setObjectName(_fromUtf8("Cores4"))
self.Cores5 = QtGui.QLabel(ResistorLab)
self.Cores5.setGeometry(QtCore.QRect(140, 30, 241, 61))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Eras Demi ITC"))
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
self.Cores5.setFont(font)
self.Cores5.setObjectName(_fromUtf8("Cores5"))
self.FirstColor4 = QtGui.QComboBox(ResistorLab)
self.FirstColor4.currentIndexChanged.connect(self.Total4)
self.FirstColor4.setGeometry(QtCore.QRect(120, 240, 69, 22))
self.FirstColor4.setObjectName(_fromUtf8("FirstColor4"))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.FirstColor4.addItem(_fromUtf8(""))
self.SecondColor4 = QtGui.QComboBox(ResistorLab)
self.SecondColor4.currentIndexChanged.connect(self.Total4)
self.SecondColor4.setGeometry(QtCore.QRect(200, 240, 69, 22))
self.SecondColor4.setObjectName(_fromUtf8("SecondColor4"))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.SecondColor4.addItem(_fromUtf8(""))
self.Titulo = QtGui.QLabel(ResistorLab)
self.Titulo.setGeometry(QtCore.QRect(100, 0, 201, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("KaiTi"))
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.Titulo.setFont(font)
self.Titulo.setObjectName(_fromUtf8("Titulo"))
self.ThirdColor4 = QtGui.QComboBox(ResistorLab)
self.ThirdColor4.currentIndexChanged.connect(self.Total4)
self.ThirdColor4.setGeometry(QtCore.QRect(110, 280, 69, 22))
self.ThirdColor4.setObjectName(_fromUtf8("ThirdColor4"))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.ThirdColor4.addItem(_fromUtf8(""))
self.FourthColor4 = QtGui.QComboBox(ResistorLab)
self.FourthColor4.currentIndexChanged.connect(self.Tolerancia4)
self.FourthColor4.setGeometry(QtCore.QRect(220, 280, 69, 22))
self.FourthColor4.setObjectName(_fromUtf8("FourthColor4"))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FourthColor4.addItem(_fromUtf8(""))
self.FirstColor5 = QtGui.QComboBox(ResistorLab)
self.FirstColor5.currentIndexChanged.connect(self.Total5)
self.FirstColor5.setGeometry(QtCore.QRect(60, 100, 69, 22))
self.FirstColor5.setObjectName(_fromUtf8("FirstColor5"))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.FirstColor5.addItem(_fromUtf8(""))
self.SecondColor5 = QtGui.QComboBox(ResistorLab)
self.SecondColor5.currentIndexChanged.connect(self.Total5)
self.SecondColor5.setGeometry(QtCore.QRect(160, 100, 69, 22))
self.SecondColor5.setObjectName(_fromUtf8("SecondColor5"))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.SecondColor5.addItem(_fromUtf8(""))
self.FifthColor5 = QtGui.QComboBox(ResistorLab)
self.FifthColor5.currentIndexChanged.connect(self.Tolerancia5)
self.FifthColor5.setGeometry(QtCore.QRect(220, 130, 69, 22))
self.FifthColor5.setObjectName(_fromUtf8("FifthColor5"))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.FifthColor5.addItem(_fromUtf8(""))
self.Result5cores = QtGui.QPushButton(ResistorLab)
self.Result5cores.setDisabled(True)
self.Result5cores.setGeometry(QtCore.QRect(110, 160, 75, 23))
self.Result5cores.setCheckable(False)
self.Result5cores.setDefault(False)
self.Result5cores.setObjectName(_fromUtf8("Result5cores"))
self.Result4cores = QtGui.QPushButton(ResistorLab)
self.Result4cores.setDisabled(True)
self.Result4cores.setGeometry(QtCore.QRect(100, 340, 75, 23))
self.Result4cores.setAutoDefault(True)
self.Result4cores.setDefault(False)
self.Result4cores.setObjectName(_fromUtf8("Result4cores"))
self.Result5cores_2 = QtGui.QPushButton(ResistorLab)
self.Result5cores_2.setDisabled(True)
self.Result5cores_2.setGeometry(QtCore.QRect(200, 160, 75, 23))
self.Result5cores_2.setCheckable(False)
self.Result5cores_2.setDefault(False)
self.Result5cores_2.setObjectName(_fromUtf8("Result5cores_2"))
self.Result5cores_3 = QtGui.QPushButton(ResistorLab)
self.Result5cores_3.setDisabled(True)
self.Result5cores_3.setGeometry(QtCore.QRect(220, 340, 75, 23))
self.Result5cores_3.setCheckable(False)
self.Result5cores_3.setDefault(False)
self.Result5cores_3.setObjectName(_fromUtf8("Result5cores_3"))
self.retranslateUi(ResistorLab)
QtCore.QMetaObject.connectSlotsByName(ResistorLab)
def retranslateUi(self, ResistorLab):
ResistorLab.setWindowTitle(_translate("ResistorLab", "WizardPage", None))
self.ThirdColor5.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.ThirdColor5.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.ThirdColor5.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.ThirdColor5.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.ThirdColor5.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.ThirdColor5.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.ThirdColor5.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.ThirdColor5.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.ThirdColor5.setItemText(8, _translate("ResistorLab", "CINZA", None))
self.ThirdColor5.setItemText(9, _translate("ResistorLab", "BRANCO", None))
self.FourthColor5.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.FourthColor5.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.FourthColor5.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.FourthColor5.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.FourthColor5.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.FourthColor5.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.FourthColor5.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.FourthColor5.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.FourthColor5.setItemText(8, _translate("ResistorLab", "DOURADO", None))
self.FourthColor5.setItemText(9, _translate("ResistorLab", "PRATEADO", None))
self.Cores4.setText(_translate("ResistorLab", "4 CORES", None))
self.Cores5.setText(_translate("ResistorLab", "5 CORES", None))
self.FirstColor4.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.FirstColor4.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.FirstColor4.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.FirstColor4.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.FirstColor4.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.FirstColor4.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.FirstColor4.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.FirstColor4.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.FirstColor4.setItemText(8, _translate("ResistorLab", "CINZA", None))
self.FirstColor4.setItemText(9, _translate("ResistorLab", "BRANCO", None))
self.SecondColor4.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.SecondColor4.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.SecondColor4.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.SecondColor4.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.SecondColor4.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.SecondColor4.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.SecondColor4.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.SecondColor4.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.SecondColor4.setItemText(8, _translate("ResistorLab", "CINZA", None))
self.SecondColor4.setItemText(9, _translate("ResistorLab", "BRANCO", None))
self.Titulo.setText(_translate("ResistorLab", "Resistor Lab", None))
self.ThirdColor4.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.ThirdColor4.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.ThirdColor4.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.ThirdColor4.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.ThirdColor4.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.ThirdColor4.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.ThirdColor4.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.ThirdColor4.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.ThirdColor4.setItemText(8, _translate("ResistorLab", "DOURADO", None))
self.ThirdColor4.setItemText(9, _translate("ResistorLab", "PRATEADO", None))
self.FourthColor4.setItemText(0, _translate("ResistorLab", "MARROM", None))
self.FourthColor4.setItemText(1, _translate("ResistorLab", "VERMELHO", None))
self.FourthColor4.setItemText(2, _translate("ResistorLab", "VERDE", None))
self.FourthColor4.setItemText(3, _translate("ResistorLab", "AZUL", None))
self.FourthColor4.setItemText(4, _translate("ResistorLab", "VIOLETA", None))
self.FourthColor4.setItemText(5, _translate("ResistorLab", "CINZA", None))
self.FourthColor4.setItemText(6, _translate("ResistorLab", "DOURADO", None))
self.FourthColor4.setItemText(7, _translate("ResistorLab", "PRATEADO", None))
self.FirstColor5.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.FirstColor5.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.FirstColor5.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.FirstColor5.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.FirstColor5.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.FirstColor5.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.FirstColor5.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.FirstColor5.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.FirstColor5.setItemText(8, _translate("ResistorLab", "CINZA", None))
self.FirstColor5.setItemText(9, _translate("ResistorLab", "BRANCO", None))
self.SecondColor5.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.SecondColor5.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.SecondColor5.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.SecondColor5.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.SecondColor5.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.SecondColor5.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.SecondColor5.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.SecondColor5.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.SecondColor5.setItemText(8, _translate("ResistorLab", "CINZA", None))
self.SecondColor5.setItemText(9, _translate("ResistorLab", "BRANCO", None))
self.FifthColor5.setItemText(0, _translate("ResistorLab", "PRETO", None))
self.FifthColor5.setItemText(1, _translate("ResistorLab", "MARROM", None))
self.FifthColor5.setItemText(2, _translate("ResistorLab", "VERMELHO", None))
self.FifthColor5.setItemText(3, _translate("ResistorLab", "LARANJA", None))
self.FifthColor5.setItemText(4, _translate("ResistorLab", "AMARELO", None))
self.FifthColor5.setItemText(5, _translate("ResistorLab", "VERDE", None))
self.FifthColor5.setItemText(6, _translate("ResistorLab", "AZUL", None))
self.FifthColor5.setItemText(7, _translate("ResistorLab", "VIOLETA", None))
self.FifthColor5.setItemText(8, _translate("ResistorLab", "DOURADO", None))
self.FifthColor5.setItemText(9, _translate("ResistorLab", "PRATEADO", None))
self.Result5cores.setText(_translate("ResistorLab", "Valor ", None))
self.Result4cores.setText(_translate("ResistorLab", "Valor", None))
self.Result5cores_2.setText(_translate("ResistorLab", "Tolerância", None))
self.Result5cores_3.setText(_translate("ResistorLab", "Tolerância", None))
def Total4(self):
total1=0
cor1 = self.FirstColor4.currentIndex()
if cor1==0:
total1+=0
elif cor1==1:
total1+=10
elif cor1==2:
total1+=20
elif cor1==3:
total1+=30
elif cor1==4:
total1+=40
elif cor1==5:
total1+=50
elif cor1==6:
total1+=60
elif cor1==7:
total1+=70
elif cor1==8:
total1+=80
elif cor1==9:
total1+=90
total2=0
cor2 = self.SecondColor4.currentIndex()
if cor2==0:
total2+=0
elif cor2==1:
total2+=1
elif cor2==2:
total2+=2
elif cor2==3:
total2+=3
elif cor2==4:
total2+=4
elif cor2==5:
total2+=5
elif cor2==6:
total2+=6
elif cor2==7:
total2+=7
elif cor2==8:
total2+=8
elif cor2==9:
total2+=9
multiplicador= self.ThirdColor4.currentIndex()
if multiplicador==0:
mult = 1
elif multiplicador==1:
mult=10
elif multiplicador==2:
mult=100
elif multiplicador==3:
mult=1000
elif multiplicador==4:
mult=10000
elif multiplicador==5:
mult=100000
elif multiplicador==6:
mult=1000000
elif multiplicador==7:
mult=10000000
elif multiplicador==8:
mult=0.1
elif multiplicador==9:
mult=0.01
total = (total1 + total2)*mult
self.Result4cores.setStyleSheet('color: black')
self.Result4cores.setText('{0}'.format(total))
def Tolerancia4(self):
tolerancia = self.FourthColor4.currentIndex()
if tolerancia==0:
toleran=1
elif tolerancia==1:
toleran=2
elif tolerancia==2:
toleran=0.5
elif tolerancia==3:
toleran=0.25
elif tolerancia==4:
toleran=0.1
elif tolerancia==5:
toleran=0.05
elif tolerancia==6:
toleran=5
elif tolerancia==7:
toleran=10
self.Result5cores_3.setStyleSheet('color: black')
self.Result5cores_3.setText('{0}'.format(toleran))
def Total5(self):
total1=0
cor1 = self.FirstColor5.currentIndex()
if cor1==0:
total1+=0
elif cor1==1:
total1+=100
elif cor1==2:
total1+=200
elif cor1==3:
total1+=300
elif cor1==4:
total1+=400
elif cor1==5:
total1+=500
elif cor1==6:
total1+=600
elif cor1==7:
total1+=700
elif cor1==8:
total1+=800
elif cor1==9:
total1+=900
total2=0
cor2 = self.SecondColor5.currentIndex()
if cor2==0:
total2+=0
elif cor2==1:
total2+=10
elif cor2==2:
total2+=20
elif cor2==3:
total2+=30
elif cor2==4:
total2+=40
elif cor2==5:
total2+=50
elif cor2==6:
total2+=60
elif cor2==7:
total2+=70
elif cor2==8:
total2+=80
elif cor2==9:
total2+=90
total3=0
cor3 = self.ThirdColor5.currentIndex()
if cor3==0:
total3+=0
elif cor3==1:
total3+=10
elif cor3==2:
total3+=20
elif cor3==3:
total3+=30
elif cor3==4:
total3+=40
elif cor3==5:
total3+=50
elif cor3==6:
total3+=60
elif cor3==7:
total3+=70
elif cor3==8:
total3+=80
elif cor3==9:
total3+=90
multiplicador= self.FourthColor5.currentIndex()
if multiplicador==0:
mult = 1
elif multiplicador==1:
mult=10
elif multiplicador==2:
mult=100
elif multiplicador==3:
mult=1000
elif multiplicador==4:
mult=10000
elif multiplicador==5:
mult=100000
elif multiplicador==6:
mult=1000000
elif multiplicador==7:
mult=10000000
elif multiplicador==8:
mult=0.1
elif multiplicador==9:
mult=0.01
total = (total1 + total2 + total3)*mult
self.Result5cores.setStyleSheet('color: black')
self.Result5cores.setText('{0}'.format(total))
def Tolerancia5(self):
tolerancia = self.FifthColor5.currentIndex()
if tolerancia==0:
toleran=1
elif tolerancia==1:
toleran=2
elif tolerancia==2:
toleran=0.5
elif tolerancia==3:
toleran=0.25
elif tolerancia==4:
toleran=0.1
elif tolerancia==5:
toleran=0.05
elif tolerancia==6:
toleran=5
elif tolerancia==7:
toleran=10
self.Result5cores_2.setStyleSheet('color: black')
self.Result5cores_2.setText('{0}'.format(toleran))
print("Rodou com sucesso")
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win = Ui_ResistorLab()
win.show()
sys.exit(app.exec_())
| LucasMatBorges/ProjetoFinal_DesignSoftware | DesignCircuitLab.py | DesignCircuitLab.py | py | 32,163 | python | en | code | 0 | github-code | 36 |
6070176870 | # encoding=utf-8
'''
Author: Haitaifantuan
Create Date: 2020-09-08 23:47:11
Author Email: 47970915@qq.com
Description: Should you have any question, do not hesitate to contact me via E-mail.
'''
import numpy as np
import random
import time
class First_Visit_Monte_Carlo_Policy_Evaluation(object):
def __init__(self):
self.total_rows = 4
self.total_columns = 4
self.total_action_num = 4 # 0代表上,1代表右,2代表下,3代表左
self.reward_each_step = -1
self.action_dict = {0: '上', 1: '右', 2: '下', 3: '左'}
self.reversed_action_dict = {'上': 0, '右':1, '下':2, '左': 3}
# 分别是走上、下、左、右的概率。随机数命中某个数字如49,那就是向右。随机数只在0-100随机选数字。
self.four_action_probability = {'上': range(0, 25), '右': range(25, 50), '下': range(50, 75), '左': range(75, 100)}
self.idx_change_dict = {'上': (-1, 0), '右': (0, 1), '下': (1, 0), '左': (0, -1)} # 左边这个是行的索引的改变,右边这个是列的索引的改变
self.episode = 100000 # 共采集TOTAL_ITERATION幕数据
# 初始化状态价值函数V
maze = np.zeros((self.total_rows, self.total_columns)) # 用0代表迷宫的每一格,这个maze只是方便我们看迷宫,没有其他作用。
print(maze)
def get_current_reward_and_next_state(self, current_state, action):
'''
根据当前的状态,以及行为,计算当前行为的奖励以及下一个状态
'''
row_idx, column_idx = current_state
# 计算下下一步的state和reward
next_row_idx = row_idx + self.idx_change_dict[action][0]
next_column_idx = column_idx + self.idx_change_dict[action][1]
# 先判断是否到了终点,如果是终点,不管执行什么操作
# 奖励都是0,并且都会回到终点
if (next_row_idx == 0 and next_column_idx == 0):
return 0, (0, 0)
if (next_row_idx == 3 and next_column_idx == 3):
return 0, (3, 3)
# 再判断是否在边缘,如果是的话,那就回到该位置。
if next_row_idx < 0 or next_row_idx > self.total_rows - 1 or next_column_idx < 0 or next_column_idx > self.total_columns - 1:
return self.reward_each_step, (row_idx, column_idx)
else:
return self.reward_each_step, (next_row_idx, next_column_idx)
def generate_initial_state(self, total_rows, total_columns):
row_idx = random.randint(0, total_rows - 1)
column_idx = random.randint(0, total_columns - 1)
while (row_idx == 0 and column_idx == 0) or (row_idx == 3 and column_idx == 3):
row_idx = random.randint(0, total_rows - 1)
column_idx = random.randint(0, total_columns - 1)
return (row_idx, column_idx)
def generate_one_episode_data(self, init_state):
one_episode_data = []
current_state = init_state
while not ((current_state[0] == 0 and current_state[1] == 0) or (current_state[0] == 3 and current_state[1] == 3)):
# 根据概率产生一个动作
rand_int = random.randint(0, 99)
for each in self.four_action_probability.items():
if rand_int in each[1]:
action = each[0]
break
# 根据要走的动作得到奖励以及获取下一个状态
reward, next_state = self.get_current_reward_and_next_state(current_state, action)
# (当前状态,当前行为,当前行为的奖励)
one_episode_data.append((current_state, self.reversed_action_dict[action], reward))
current_state = next_state
# while循环出来的时候,最后一个terminal状态没加进去。
one_episode_data.append((current_state, None, None))
return one_episode_data
def fire_calculation(self):
# 计算“状态-动作价值”
# 创建一个字典保存出现的状态-动作以及奖励
begin_time = time.time()
episode_record_dict = {}
final_state_action_reward_dict = {} # 这个和state_action_reward_dict是有区别的,它是记录总体的。
final_state_action_count_dict = {}
for episode in range(self.episode):
# 生成每一幕的数据
all_episode_list = []
# 随机生成一个起始状态
init_state = self.generate_initial_state(self.total_rows, self.total_columns)
# 生成一幕数据
episode_record_dict[episode] = self.generate_one_episode_data(init_state)
# 对这幕数据进行遍历,然后将出现过的状态进行统计
has_been_counted = {} # 记录状态是否在该幕出现过
state_action_reward_dict = {} # 记录每一个状态当前的总共的reward
for idx, eachTuple in enumerate(episode_record_dict[episode]):
# 先判断是不是到了终点,如果是的话就跳出循环
if idx == len(episode_record_dict[episode])-1:
break
# 将state和action组合成字符串,方便作为dict的key
state_action_combination = str(eachTuple[0][0]) + str(eachTuple[0][1]) + str(eachTuple[1])
# 对state_action_reward_dict()里的所有的key都累加当前的reward。
for key in state_action_reward_dict.keys():
state_action_reward_dict[key] += eachTuple[2]
# 检测当前这一幕该状态和动作组合是否出现过
if state_action_combination not in has_been_counted.keys():
# 如果不存在在state_count_dict.keys()里,说明是第一次碰到该状态。
has_been_counted[state_action_combination] = 1 # 随便赋值一个value
state_action_reward_dict[state_action_combination] = eachTuple[2]
# 将该募最后统计到总的变量里。
for state_action, reward in state_action_reward_dict.items():
if state_action not in final_state_action_reward_dict.keys():
final_state_action_reward_dict[state_action] = reward # 将该状态-动作计数设为reward
final_state_action_count_dict[state_action] = 1 # 将该状态-动作计数设为1
else:
# 否则说明其他幕中出现过该状态-动作,并且曾经统计到final_state_action_reward_dict和final_state_action_count_dict变量里面
# 直接累加就好了。
final_state_action_reward_dict[state_action] += reward
final_state_action_count_dict[state_action] += 1
if episode % 100 == 0:
print("第{}个episode已完成=====已花费{}分钟".format(episode, (time.time() - begin_time) / 60))
# 计算下最终的状态-动作价值
# 由于是按概率采样,因此可能会导致某些动作-状态没有出现过,这个时候就需要一些方法去解决了。
# 一种方法是增加采样次数,这种方法相当于是暴力解决。
# 另一种方法可以参考sutton的《强化学习第二版》的98页的5.4内容
self.averaged_state_action_value_dict = {}
for state_action, reward in final_state_action_reward_dict.items():
self.averaged_state_action_value_dict[state_action] = reward / final_state_action_count_dict[state_action]
# print(self.averaged_state_action_value_dict)
def show_policy(self):
policy_dict = {}
for state_action, value in self.averaged_state_action_value_dict.items():
if state_action[0:2] not in policy_dict.keys():
policy_dict[state_action[0:2]] = {self.action_dict[int(state_action[2])]: value}
else:
policy_dict[state_action[0:2]][self.action_dict[int(state_action[2])]] = value
print(policy_dict)
obj = First_Visit_Monte_Carlo_Policy_Evaluation()
obj.fire_calculation()
obj.show_policy() | haitaifantuan/reinforcement_leanring | 强化学习中的蒙特卡罗应用(贝尔曼方程)(含代码)-《强化学习系列专栏第2篇》/首次访问型蒙特卡罗策略估计.py | 首次访问型蒙特卡罗策略估计.py | py | 8,217 | python | zh | code | 8 | github-code | 36 |
35479998073 | import pandas as pd
replay_data = pd.read_csv('ReplayCharacters 2015-12-30 - 2016-01-29.csv')
hero_info = pd.read_csv('hero_info.csv')
replay_info = pd.read_csv('Replays 2015-12-30 - 2016-01-29.csv')
map_info = pd.read_csv('map_info.csv')
all_games = replay_data.merge(replay_info, how='left', on='ReplayID')
all_games = all_games.merge(hero_info, how='left', on='HeroID')
print('Merging done')
a = all_games.head(100000)
a[['ReplayID', 'PrimaryName']].loc[a['PrimaryName'] == 'Leoric'].groupby([]) | veldrin23/heroes_of_the_storm | bayes.py | bayes.py | py | 503 | python | en | code | 0 | github-code | 36 |
31746754037 | from django.core.exceptions import ValidationError
from rest_framework import serializers
from reviews.models import Category, Comment, Genre, Review, Title, User
class ConfirmationTokenSerializer(serializers.Serializer):
"""Serializing verification data to provide full user registration"""
username = serializers.CharField(required=True)
confirmation_code = serializers.CharField(required=True)
class RegistrationSerializer(serializers.Serializer):
"""Serializing data to provide a user creation"""
email = serializers.EmailField(required=True)
username = serializers.CharField(required=True)
def validate(self, data):
if data["username"] == "me":
raise ValidationError("Пользователь не может иметь имя 'me'")
return data
class UserSerializer(serializers.ModelSerializer):
"""Serializing data for work with user and his profile"""
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
"bio",
"role",
)
class CategorySerializer(serializers.ModelSerializer):
"""Serializer for categories."""
class Meta:
model = Category
fields = ("name", "slug")
lookup_field = 'slug'
class GenreSerializer(serializers.ModelSerializer):
"""Serializer for genres."""
class Meta:
model = Genre
fields = ("name", "slug")
class WriteTitleSerializer(serializers.ModelSerializer):
"""Serializer for write request for titles."""
category = serializers.SlugRelatedField(
slug_field="slug",
queryset=Category.objects.all(),
)
genre = serializers.SlugRelatedField(
many=True,
slug_field="slug",
queryset=Genre.objects.all(),
)
rating = serializers.SerializerMethodField()
class Meta:
model = Title
fields = "__all__"
def get_rating(self, obj):
"""Return 0 after creation."""
return 0
class ReadTitleSerializer(serializers.ModelSerializer):
"""Serializer for read requests for titles."""
genre = GenreSerializer(many=True)
category = CategorySerializer()
rating = serializers.SerializerMethodField()
class Meta:
model = Title
fields = "__all__"
read_only_fields = ("name", "year", "description", "genre", "category")
def get_rating(self, obj):
"""Return object rating calculated in viewset."""
return obj.rating
class ReviewSerializer(serializers.ModelSerializer):
"""Serializer for reviews."""
score = serializers.IntegerField(max_value=10, min_value=0)
author = serializers.SlugRelatedField(
slug_field="username",
read_only=True,
default=serializers.CurrentUserDefault(), # добавил новое
)
class Meta:
model = Review
fields = ("id", "text", "author", "score", "pub_date")
def validate(self, attrs):
"""Check that each author can have only one review
for particular title.
"""
if not self.context["request"].method == "POST":
return attrs
if Review.objects.filter(
title_id=self.context["view"].kwargs.get("title_id"),
author=self.context["request"].user,
).exists():
raise serializers.ValidationError(
(
"Автор может оставлять ревью на каждое произведение "
"только один раз"
)
)
return attrs
class CommentSerializer(serializers.ModelSerializer):
"""Serializer for comments."""
author = serializers.SlugRelatedField(
slug_field="username",
read_only=True,
)
class Meta:
model = Comment
fields = ("id", "text", "author", "pub_date")
| GenVas/yamdb_final | api/serializers.py | serializers.py | py | 3,954 | python | en | code | 1 | github-code | 36 |
18664116649 | from pathlib import Path
import joblib
IS_KAGGLE = True
if IS_KAGGLE:
DATA_DIR = Path("/kaggle/working/chap5-data")
OUTPUT_DIR = Path("/kaggle/working/")
else:
DATA_DIR = Path("../data") # Path(os.getenv("QQP_DATA_DIR", "/data"))
OUTPUT_DIR = Path("../outputs")
INPUT_DIR = DATA_DIR / "input"
TRAIN_CSV_PATH = INPUT_DIR / "train.csv"
TEST_CSV_PATH = INPUT_DIR / "test.csv"
EMBEDDING_DIR = DATA_DIR / "embeddings"
GLOVE_PATH = EMBEDDING_DIR / "glove.840B.300d.bin"
FEATURE_MEMORY = joblib.Memory(DATA_DIR / "cache")
SPLIT_RANDOM_SEED = 1
EPS = 1e-10
NUM_PROCESSES = 4 # int(os.getenv("NUM_PROCESSES", 1))
NUM_TRAIN_SAMPLES = 404290
NUM_TEST_SAMPLES = 2345796
NUM_DRYRUN_SAMPLES = 1000
| room-208/Kaggle-Gokui-Book | chap5/common/constants.py | constants.py | py | 708 | python | en | code | 0 | github-code | 36 |
29053052757 | from db import session, UniqueVictims, Victims
from sqlalchemy.sql import func
def calc_totals():
"""
Calculate the total time frame the IP appears in and the udp/tcp/icmp packet
count as well as the packets/s rate
:return:
"""
all_victims = session.query(UniqueVictims).all()
for victim in all_victims:
ip = victim.ip
victim.time_frame_count = session.query(Victims).filter_by(ip=ip).count()
victim.tcp_count = session.query(func.sum(Victims.tcp_count).filter(Victims.ip == ip)).scalar()
victim.udp_count = session.query(func.sum(Victims.udp_count).filter(Victims.ip == ip)).scalar()
victim.icmp_count = session.query(func.sum(Victims.icmp_count).filter(Victims.ip == ip)).scalar()
victim.rate = (victim.udp_count + victim.tcp_count + victim.icmp_count)/(victim.time_frame_count * 60)
session.commit()
if __name__ == '__main__':
calc_totals()
| Kbman99/DDoS-Detection | calculate.py | calculate.py | py | 934 | python | en | code | 1 | github-code | 36 |
34002187294 | import os
from app import app
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
from pneumonia_prediction import predict
import tensorflow as tf
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
physical_devices = tf.config.list_physical_devices('CPU')
mariaunet = tf.keras.models.load_model('mariaunet')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
"""
display upload page
"""
return render_template('upload.html')
@app.route('/', methods=['POST'])
def upload_image():
"""
display image with the tensorflow model prediction
"""
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
elif file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], 'xray.jpg')
if os.path.exists(filepath):
os.remove(filepath)
file.save(filepath)
flash('Image successfully uploaded and displayed below')
predict(filepath, mariaunet)
return render_template('upload.html', filename='xray.jpg')
else:
flash('Allowed image types are -> png, jpg, jpeg')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
"""
display image
"""
return redirect(url_for('static', filename='uploads/' + filename), code=301)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| Nathanael-Mariaule/Pneumonia_Detection | flask_app/main.py | main.py | py | 1,710 | python | en | code | 0 | github-code | 36 |
40423351728 | from django.urls import path, include
from . import views
from rest_framework import routers
route = routers.DefaultRouter()
route.register("user", views.UserViewSet, basename='user')
route.register("tuyenxe", views.TuyenXeViewset, basename='tuyenxe')
route.register("chuyenxe", views.ChuyenXeViewset, basename='chuyenxe')
route.register("datve", views.DatVeViewset, basename='datve')
route.register(prefix='comments', viewset=views.CommentViewSet, basename='comment')
# route.register("thongke", views.ThongKeViewSet, basename='thongke')
urlpatterns = [
path('', include(route.urls)),
]
| TamHoang1512/backend-django | QuanLyXeKhach/quanly/urls.py | urls.py | py | 595 | python | en | code | 0 | github-code | 36 |
27744894909 | import numpy as np
import pickle
import string
import sys
import math
class RNN:
def __init__(self, input_dim, output_dim, sentence_length, initializer="normal", optimizer="gd", hidden_dim=64, learning_rate=0.001, momentum=0.9, beta=0.9):
# Checking if the optimizer is a valid optimizer
valid_optimizers = ["gd", "momentum", "rmsprop"]
try:
assert(optimizer in valid_optimizers)
except:
print("Available optimizers are : {}".format(valid_optimizers))
raise ValueError("Cannot recognize optimizer : {}".format(optimizer))
# Checking if the initializer is a valid initializer
valid_initializers = ["normal", "xavier"]
try:
assert(initializer in valid_initializers)
except:
print("Available initializers are : {}".format(valid_initializers))
raise ValueError("Cannot recognize initializer : {}".format(initializer))
self.minibatches = 1
self.beta1 = momentum
self.beta2 = beta
self.optimizer = optimizer
self.initializer = initializer
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.learning_rate = learning_rate
self.sentence_length = sentence_length
# Calculating xavier initializer constants
if(self.initializer == "xavier"):
whx = math.sqrt(6) / math.sqrt(self.hidden_dim + self.input_dim)
whh = math.sqrt(6) / math.sqrt(self.hidden_dim + self.hidden_dim)
wyh = math.sqrt(6) / math.sqrt(self.output_dim + self.hidden_dim)
# Creating the initial weights
self.Whx = np.random.uniform(-whx, whx, (hidden_dim, input_dim))
self.Whh = np.random.uniform(-whh, whh, (hidden_dim, hidden_dim))
self.Wyh = np.random.uniform(-wyh, wyh, (output_dim, hidden_dim))
# Creating the initial biases
self.bh = np.random.normal(0, 1, (hidden_dim, 1))
self.by = np.random.normal(0, 1, (output_dim, 1))
elif(self.initializer == "normal"):
# Creating the initial weights
self.Whx = np.random.normal(0, 1, (hidden_dim, input_dim))
self.Whh = np.random.normal(0, 1, (hidden_dim, hidden_dim))
self.Wyh = np.random.normal(0, 1, (output_dim, hidden_dim))
# Creating the initial biases
self.bh = np.random.normal(0, 1, (hidden_dim, 1))
self.by = np.random.normal(0, 1, (output_dim, 1))
# Creating variables to store exponential averages for momentum
if(self.optimizer == "momentum"):
self.dWhx = np.zeros((hidden_dim, input_dim))
self.dWhh = np.zeros((hidden_dim, hidden_dim))
self.dWyh = np.zeros((output_dim, hidden_dim))
self.dbh = np.zeros((hidden_dim, 1))
self.dby = np.zeros((output_dim, 1))
# Creating variables to save exponential averages for RMS prop
elif(self.optimizer == "rmsprop"):
self.sWhx = np.zeros((hidden_dim, input_dim))
self.sWhh = np.zeros((hidden_dim, hidden_dim))
self.sWyh = np.zeros((output_dim, hidden_dim))
self.sbh = np.zeros((hidden_dim, 1))
self.sby = np.zeros((output_dim, 1))
# Performs feed forward on the RNN
def forward(self, data):
# Getting dimensions from the input
try:
assert(data.shape[0] == self.sentence_length)
assert(data.shape[1] == self.input_dim)
except:
raise ValueError("Expected data with dims of : {} but got data with dims : {}").format(
(self.sentence_length, self.input_dim, None), data.shape)
batch_size = data.shape[2]
# Fixing the initial state as a zero vector
h = np.zeros((self.hidden_dim, batch_size))
# Storing all states
states = [h]
for words in data:
assert(np.array(words).shape == (self.input_dim, batch_size))
h = np.tanh(np.matmul(self.Whh, h) +
np.matmul(self.Whx, words) + self.bh)
states.append(h)
# Shape of states : (sentence_length + 1, self.hidden_dim, batch_size)
states = np.array(states)
# Calculating the output using state
# Shape (self.output_dim, batch_size)
output = np.matmul(self.Wyh, states[-1]) + self.by
output = np.array(self.softmax(output)).reshape(
(self.output_dim, batch_size))
return output, states
# Calculates softmax of the outputs
def softmax(self, data):
# Clipping the data
np.clip(data, -200, 200, out=data)
# Calculating softmax
data = np.exp(data)
data /= np.sum(data, axis=0)
return data
# Performs back propagation
def backPropagate(self, train_X, train_Y, preds, states, batch_size):
# Shape (self.output_dim, batch_size)
dL_dY = preds.T
for index, pred in enumerate(dL_dY):
pred[train_Y[index]] -= 1
dL_dY = dL_dY.T
# Timesteps
T = len(states) - 1
# Calculating gradients for Wyh and by
# Shape (self.output_dim, self.hidden_dim)
dL_dWyh = np.matmul(dL_dY, states[-1].T)
dL_dby = np.sum(dL_dY, axis=1).reshape(self.output_dim, 1)
# Calculating gradients for Whx, Whh, bh
dL_dWhh = np.zeros(shape=self.Whh.shape)
dL_dWhx = np.zeros(shape=self.Whx.shape)
dL_dbh = np.zeros(shape=self.bh.shape)
# Calculating dL / dh
# Shape(self.hidden_dim, batch_size)
dL_dh = np.matmul(self.Wyh.T, dL_dY)
for t in reversed(range(T)):
dL_dWhh += np.matmul(dL_dh *
(1 - (states[t + 1] ** 2)), states[t].T)
dL_dWhx += np.matmul(dL_dh *
(1 - (states[t + 1] ** 2)), train_X[t].T)
dL_dbh += np.sum(dL_dh * (1 - (states[t + 1] ** 2)),
axis=1).reshape(self.hidden_dim, 1)
# Updating dL_dh
dL_dh = np.matmul(self.Whh, dL_dh * (1 - states[t + 1] ** 2))
if(self.optimizer == "gd"):
# Clipping the gradients for exploding gradients
for updates in [dL_dWhh, dL_dWhx, dL_dWyh, dL_dbh, dL_dby]:
np.clip(updates, -1, 1, out=updates)
# Updating the weights and biases
self.Whh -= self.learning_rate * dL_dWhh
self.Whx -= self.learning_rate * dL_dWhx
self.Wyh -= self.learning_rate * dL_dWyh
self.bh -= self.learning_rate * dL_dbh
self.by -= self.learning_rate * dL_dby
# Applying momentum and normalizing
elif(self.optimizer == "momentum"):
# Calculating exponential averages
normalization_factor = 1 - (self.beta1 ** min(self.minibatches, 100))
self.dWhh = (self.beta1 * self.dWhh + (1 - self.beta1) * (dL_dWhh)) / normalization_factor
self.dWhx = (self.beta1 * self.dWhx + (1 - self.beta1) * (dL_dWhx)) / normalization_factor
self.dWyh = (self.beta1 * self.dWyh + (1 - self.beta1) * (dL_dWyh)) / normalization_factor
self.dbh = (self.beta1 * self.dbh + (1 - self.beta1) * (dL_dbh)) / normalization_factor
self.dby = (self.beta1 * self.dby + (1 - self.beta1) * (dL_dby)) / normalization_factor
# Clipping the gradients for exploding gradients
for updates in [self.dWhh, self.dWhx, self.dWyh, self.dbh, self.dby]:
np.clip(updates, -1, 1, out=updates)
# Updating the weights and biases
self.Whh -= self.learning_rate * self.dWhh
self.Whx -= self.learning_rate * self.dWhx
self.Wyh -= self.learning_rate * self.dWyh
self.bh -= self.learning_rate * self.dbh
self.by -= self.learning_rate * self.dby
# Applying RMS Prop
elif(self.optimizer == "rmsprop"):
self.sWhh = (self.beta2 * self.sWhh + (1 - self.beta2) * (dL_dWhh ** 2))
self.sWhx = (self.beta2 * self.sWhx + (1 - self.beta2) * (dL_dWhx ** 2))
self.sWyh = (self.beta2 * self.sWyh + (1 - self.beta2) * (dL_dWyh ** 2))
self.sbh = (self.beta2 * self.sbh + (1 - self.beta2) * (dL_dbh ** 2))
self.sby = (self.beta2 * self.sby + (1 - self.beta2) * (dL_dby ** 2))
# Clipping the gradients for exploding gradients
for updates in [self.sWhh, self.sWhx, self.sWyh, self.sbh, self.sby]:
np.clip(updates, -1, 1, out=updates)
# Updating the weights and biases
self.Whh -= self.learning_rate * (dL_dWhh / np.sqrt(self.sWhh))
self.Whx -= self.learning_rate * (dL_dWhx / np.sqrt(self.sWhx))
self.Wyh -= self.learning_rate * (dL_dWyh / np.sqrt(self.sWyh))
self.bh -= self.learning_rate * (dL_dbh / np.sqrt(self.sbh))
self.by -= self.learning_rate * (dL_dby / np.sqrt(self.sby))
def train(self, train_X, train_Y, test_X, test_Y, epochs, verbose=False, batch_size=32):
# Checking params
train_X = np.array(train_X)
train_Y = [int(target) for target in train_Y]
test_X = np.array(test_X)
test_Y = [int(target) for target in test_Y]
# Checking that train_X and train_Y have equal data points
try:
assert(train_X.shape[0] == len(train_Y))
except:
print("Number of training samples in train_X and train_Y are not equal. Number of samples in train_X is {} and {} in train_Y".format(
train_X.shape[0], len(train_Y)))
# Checking that test_X and test_Y have equal data points
try:
assert(test_X.shape[0] == len(test_Y))
except:
print("Number of training samples in train_X and train_Y are not equal. Number of samples in train_X is {} and {} in train_Y".format(
test_X.shape[0], len(test_Y)))
# Calculating training and testing data size
training_size = train_X.shape[0]
testing_size = test_X.shape[0]
# Conforming batch size to a maximum of training_size / 2
batch_size = min(batch_size, training_size / 2)
# Checking that the dimensions of the training data are correct
try:
assert(train_X.shape[1] == self.sentence_length)
assert(train_X.shape[2] == self.input_dim)
except:
print("Expected training data shape to be {} but got {}".format(
training_size, self.sentence_length, self.input_dim))
# Checking that the dimensions of the testing data are correct
try:
assert(test_X.shape[1] == self.sentence_length)
assert(test_X.shape[2] == self.input_dim)
except:
print("Expected testing data shape to be {} but got {}".format(
testing_size, self.sentence_length, self.input_dim))
# Transposing the testing data
test_X = np.transpose(test_X, (1, 2, 0))
# Array to store metrics
losses = []
correct_ans = []
log_frequency = max(int(float(epochs) / 100), 1)
# Converting training and testing data into numpy arrays
print("Size of training data : {}".format(
sys.getsizeof(train_X) + sys.getsizeof(train_Y)))
print("Size of testing data : {}".format(
sys.getsizeof(test_X) + sys.getsizeof(test_Y)))
# Splitting the training data into batches of size batchsize
batches = int(math.ceil(float(training_size) / batch_size))
batch_training_X = []
batch_training_Y = []
# Splitting training data into batches
for batch in range(batches):
start_index = batch_size * batch
end_index = min(batch_size * (batch + 1), training_size)
temp_batch_size = end_index - start_index
batch_train_X = train_X[start_index: end_index]
# Creating an np array of size (batch_size, max_batch_length, self.hidden_dim)
batch_train_X = np.array(batch_train_X).reshape(
(temp_batch_size, self.sentence_length, self.input_dim))
batch_train_Y = np.array(train_Y[start_index: end_index])
batch_training_X.append(batch_train_X)
batch_training_Y.append(batch_train_Y)
# Checking if the correct number of batches were inserted
assert(len(batch_training_X) == batches)
# Deleting variables to clear RAM
del train_X, train_Y
# Training the net
for epoch in range(epochs):
loss = 0
num_correct = 0
# Iterating through each training batch
for batch in range(batches):
# Picking out one batch of training samples
train_X = batch_training_X[batch]
train_Y = batch_training_Y[batch]
# Train_X from (batch_size, sentence_length, self.input_dim) to (sentence_length, self.input_dim, batch_size)
train_X = np.transpose(train_X, (1, 2, 0))
# Feed Forwarding
preds, states = self.forward(train_X)
loss -= np.sum(np.log([pred[train_Y[index]]
for index, pred in enumerate(preds.T)]))
num_correct += np.sum(np.argmax(preds, axis=0) == train_Y)
# Back propagating the error
self.backPropagate(train_X, train_Y, preds, states, batch_size)
# Updating the mini batch number
self.minibatches += 1
# Appending loss to training data
losses.append(loss)
correct_ans.append((float(num_correct) / training_size) * 100)
# Printing loss and number of correctly classified values
if(verbose and epoch % log_frequency == 0):
print("\n\n\n Epoch : {} \n".format(epoch))
print("TRAINING_DATA")
print("Loss : {}".format(loss / training_size))
print("Correctly classified : {} percent of data".format(
100 * float(num_correct) / training_size))
# Resetting loss and correct answers
loss = 0
num_correct = 0
# Feed Forwarding
preds, states = self.forward(test_X)
# Calculating the loss and correct classifications
loss -= np.sum(np.log([pred[test_Y[index]]
for index, pred in enumerate(preds.T)]))
num_correct += np.sum(np.argmax(preds, axis=0) == test_Y)
print(" ")
print("TESTING_DATA")
print("Loss : {}".format(loss / testing_size))
print("Correctly classified : {} percent of data".format(
100 * float(num_correct) / testing_size))
return losses, correct_ans
def summary(self):
total_params = (self.hidden_dim * self.hidden_dim) + (self.input_dim * self.hidden_dim) + (
self.output_dim * self.hidden_dim) + (self.hidden_dim) + (self.output_dim)
print(" ")
print(" ====================================================")
print(" Total trainable parameters : {}".format(total_params))
print(" Learning Rate : {}".format(self.learning_rate))
print(" Optimizer : {}".format(self.optimizer))
print(" Beta1 : {}".format(self.beta1))
print(" Beta2 : {}".format(self.beta2))
print(" Input dimension : {}".format(self.input_dim))
print(" Output dimension : {}".format(self.output_dim))
print(" Hidden dimension : {}".format(self.hidden_dim))
print(" ====================================================")
print(" ")
def save_weights(self, save_path):
weights = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'hidden_dim': self.hidden_dim,
'sentence_length': self.sentence_length,
'minibatches' : self.minibatches,
'Whh': self.Whh,
'Whx': self.Whx,
'Wyh': self.Wyh,
'bh': self.bh,
'by': self.by
}
weights_file = open(save_path, "wb")
pickle.dump(weights, weights_file)
print("Saved weights to path : {} successfully".format(save_path))
def load_weights(self, save_path):
weights_file = open(save_path, "rb")
weights = pickle.load(weights_file)
# Checking whether model created has input and output dims the same as the loaded file
if(self.input_dim != weights['input_dim'] or self.hidden_dim != weights['hidden_dim'] or self.output_dim != weights['output_dim']):
print(
"Warning : The dimensions of your current model and the loaded model do not match")
print("Your model dimensions : ")
print("Input Dimension : {}".format(self.input_dim))
print("Hidden Dimension : {}".format(self.hidden_dim))
print("Output Dimension : {} ".format(self.output_dim))
print(" Loaded model's dimensions")
print("Input Dimension : {}".format(weights['input_dim']))
print("Hidden Dimension : {}".format(weights['hidden_dim']))
print("Output Dimension : {} ".format(weights['output_dim']))
text = raw_input(" Enter [Y] to continue to load model : \t")
if(text != "Y" and text != "y"):
print(" ")
print("Aborting model loading")
sys.exit()
# Checking if the sentence length of loaded model matches the current model
if(weights['sentence_length'] != self.sentence_length):
print(
"Warning : The sentence length of your current model and the loaded model do not match")
text = raw_input(" Enter [Y] to continue to load model : \t")
if(text != "Y" and text != "y"):
print(" ")
print("Aborting model loading")
sys.exit()
# Resassigning weights to correct locations
self.Whh = weights['Whh']
self.Whx = weights['Whx']
self.Wyh = weights['Wyh']
self.bh = weights['bh']
self.by = weights['by']
self.minibatches = weights['minibatches']
self.input_dim = weights['input_dim']
self.output_dim = weights['output_dim']
self.hidden_dim = weights['hidden_dim']
self.sentence_length = weights['sentence_length']
print("Loaded weights from path : {} successfully".format(save_path))
def predict(self, words):
words = np.array(words)
data_size = words.shape[0]
try:
assert(words.shape[1] == self.sentence_length)
assert(words.shape[2] == self.input_dim)
except:
raise ValueError("Expected dimesion of input as {} but received dimensions {}".format(
(data_size, self.sentence_length, self.input_dim), words.shape))
inputs = words.reshape(data_size, self.sentence_length, self.input_dim)
inputs = np.transpose(inputs, (1, 2, 0))
preds, _ = self.forward(inputs)
return preds
| hrishikeshshekhar/Vanilla-RNN | rnn.py | rnn.py | py | 19,714 | python | en | code | 0 | github-code | 36 |
15149970678 | import Image
import PSDraw
#-*- coding:utf-8 -*-
def text2png(text):
adtexts = [ ]
textcolor = "#000000"
adcolor = "#FF0000"
import Image, ImageDraw, ImageFont, uuid
ad = []
for adtext in adtexts:
ad += [(adtext.encode('gbk'), adcolor)]
wraptext = [""]
l = 0
for i in text.decode('utf-8'):
fi = i.encode('gbk')
delta = len(fi)
if i == '\n':
wraptext += [""]
l = 0
elif l + delta > 40:
wraptext += [fi]
l = delta
else:
wraptext[-1] += fi
l += delta
wrap = [(text, textcolor) for text in wraptext]
wrap += ad
i = Image.new("RGB", (15, len(wrap) * 17 + 5), "#FFFFFF")
d = ImageDraw.Draw(i)
f = ImageFont.truetype("1.ttf", 16)
for num, (text, color) in enumerate(wrap):
d.text((2, 17 * num + 1), text.decode('gbk'), font = f, fill = color)
filename = text + ".png"
with open( "letter/"+ filename, "wb") as s:
i.save(s, "PNG")
return filename
for i in range(ord("a"), ord("z")):
text2png(chr(i))
for i in range(ord("A"), ord("Z")+1):
text2png(chr(i))
| Entel/yeqin.me | model/picToAscii/letter.py | letter.py | py | 1,171 | python | en | code | 0 | github-code | 36 |
10923117030 | """This is my attempt to solve the random numbers challenge in python game"""
import random
# Display welcome message to player
print("Welcome Code Breaker! Lets see if you can break my 3 digit number!")
print("Code has been generated, please guess a 3 digit number ")
guess = list(input("What is your guess?: "))
print(guess)
# Create a range of numbers and shuffle to create a list of 3 random numbers
digits = list(range(10))
random.shuffle(digits)
# Create an mty list and assign numbers from ordinal list to a new list
games_list = []
for x in digits[:3]:
games_list.append(x)
playersMatch = [] # May not need this list
print(games_list)
while guess != games_list:
for i in guess:
if i == games_list[0] or i == games_list[1] or i == games_list[2]:
print(games_list[i])
print("Close")
elif i != games_list[0] and i != games_list[1] and i != games_list[2]:
print(i)
print(games_list)
print("Nope ")
list(input("Please try and enter a new guess "))
continue
else:
print(games_list)
print("Nope!")
guess = list(input("Please try again to enter a correct digit: "))
# ----------------------------------------------- Below this line are possible functions and
# other code that may possibly be correct to use and solve the problem
# return a[-(len(b)):] == b or a == b[-(len(a)):]:
# for x in guess:
# if guess[0] == games_list[0] or guess[1] == games_list[1] or guess[2] == games_list[2]:
# print("close")
def code_cracker(my_list):
if my_list[2] == 1:
print("Don't do anything for now")
| BornRiot/Python_DjangoDev | python_LevelOne/P10_SimpleGame_MySolution.py | P10_SimpleGame_MySolution.py | py | 1,674 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.