text
stringlengths
0
27.1M
meta
dict
[STATEMENT] lemma wadjust_loop_start_Oc_via_Bk_move[simp]: "wadjust_loop_right_move2 m rs (c, Bk # list) \<Longrightarrow> wadjust_loop_start m rs (c, Oc # list)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. wadjust_loop_right_move2 m rs (c, Bk # list) \<Longrightarrow> wadjust_loop_start m rs (c, Oc # list) [PROOF STEP] apply(auto simp: wadjust_loop_right_move2.simps wadjust_loop_start.simps replicate_app_Cons_same) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>ml mr ln rn. \<lbrakk>c = Oc # Oc \<up> ml @ Bk # Oc # Oc \<up> m; list = Bk # Bk \<up> ln @ Oc \<up> mr @ Bk \<up> rn; ml + mr = Suc rs; 0 < mr\<rbrakk> \<Longrightarrow> \<exists>lna rna mla. Oc # Oc \<up> ml = Oc \<up> mla \<and> (\<exists>mra. Bk \<up> ln @ Oc \<up> mr @ Bk \<up> rn = Bk \<up> lna @ Oc \<up> mra @ Bk \<up> rna \<and> mla + mra = Suc (Suc rs) \<and> 0 < mra) [PROOF STEP] by (metis add_Suc replicate_Suc)
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Universal_Turing_Machine_UTM", "hexsha": null, "include": null, "lang": null, "length": 2, "llama_tokens": 430, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
# -*- coding: utf-8 -*- """ Created on Sun Apr 23 2017 Last update on Mon Apr 24 2017 @author: Michiel Stock Parsing the Ghent park network """ import json import geopandas as gpd import shapely import numpy as np if __name__=='__main__': # read roads in Ghent streets = gpd.read_file('Data/ex_SXXm38nTMVKwsPsrBjWKF5Q6ch5zi_osm_line.geojson') n_streets = len(streets) length_streets = streets.length # get all parks polygons = gpd.read_file('Data/ex_SXXm38nTMVKwsPsrBjWKF5Q6ch5zi_osm_polygon.geojson') parks = polygons.loc[polygons.leisure=='park'] parks = parks.loc[parks.geometry.area > 1e-5] # only parks larger than 100 m^2 # make graph edges = set([]) vertices_park = set([]) vertices = set([]) for i in range(n_streets): x, y = streets.iloc[i].geometry.coords.xy x0, xe = x[0], x[-1] y0, ye = y[0], y[-1] v0, v1 = (x0, y0), (xe, ye) length = float(length_streets.iloc[i]) edges.add((length, v0, v1)) vertices.add(v0) vertices.add(v1) if np.any(parks.contains(shapely.geometry.asPoint(v0))): vertices_park.add(v0) if np.any(parks.contains(shapely.geometry.asPoint(v1))): vertices_park.add(v1) # put in dictionaries coordinates = {str(i) : e for i, e in enumerate(vertices)} coor_to_ind = {v : k for k, v in coordinates.items()} ghent_graph = {} for w, c0, c1 in edges: v0 = coor_to_ind[c0] v1 = coor_to_ind[c1] if v0 not in ghent_graph: ghent_graph[v0] = set([(w, v1)]) else: ghent_graph[v0].add((w, v1)) if v1 not in ghent_graph: ghent_graph[v1] = set([(w, v0)]) else: ghent_graph[v1].add((w, v0)) # dump in json file ghent_data = { 'adjacency_list' : {k : list(v) for k, v in ghent_graph.items()}, 'coordinates' : coordinates, 'park vertices' : [coor_to_ind[c] for c in vertices_park] } json.dump(obj=ghent_data, fp=open('Data/graph_parks_ghent.json', 'w'))
{ "alphanum_fraction": 0.604784689, "author": null, "avg_line_length": 29.0277777778, "converted": null, "ext": "py", "file": null, "hexsha": "65001edf51736ee6e536d6a732a07bf186e242e7", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 18, "max_forks_repo_forks_event_max_datetime": "2022-02-05T20:12:03.000Z", "max_forks_repo_forks_event_min_datetime": "2018-01-21T15:23:51.000Z", "max_forks_repo_head_hexsha": "20f6b37566d23cdde0ac6b765ffcc5ed72a11172", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MichielStock/SelectedTopicsOptimization", "max_forks_repo_path": "Chapters/Old/05.ShortestPaths/make_Ghent_graph.py", "max_issues_count": 2, "max_issues_repo_head_hexsha": "20f6b37566d23cdde0ac6b765ffcc5ed72a11172", "max_issues_repo_issues_event_max_datetime": "2018-05-30T16:16:53.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-22T09:54:01.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MichielStock/SelectedTopicsOptimization", "max_issues_repo_path": "Chapters/Old/05.ShortestPaths/make_Ghent_graph.py", "max_line_length": 89, "max_stars_count": 22, "max_stars_repo_head_hexsha": "20f6b37566d23cdde0ac6b765ffcc5ed72a11172", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MichielStock/SelectedTopicsOptimization", "max_stars_repo_path": "Chapters/Old/05.ShortestPaths/make_Ghent_graph.py", "max_stars_repo_stars_event_max_datetime": "2022-03-02T18:51:40.000Z", "max_stars_repo_stars_event_min_datetime": "2017-03-21T14:01:10.000Z", "num_tokens": 650, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2090 }
from PIL import Image import cv2 import matplotlib.pyplot as plt import pandas as pd from src.utils import rle_utils as rle import numpy as np # Code modified from https://www.kaggle.com/dschettler8845/sartorius-segmentation-mask-dataset#create_dataset def get_img_and_mask(img_path, annotation, width, height, mask_only=False): """ Capture the relevant image array as well as the image mask """ img_mask = np.zeros((height, width), dtype=np.int16) for i, annot in enumerate(annotation): img_mask = np.where(rle.decode(annot, (height, width))!=0, i, img_mask) if mask_only: return img_mask img = np.array(Image.open(img_path), dtype=np.int16) return img, img_mask # Code from https://www.kaggle.com/dschettler8845/sartorius-segmentation-mask-dataset#create_dataset def plot_img_and_mask(img, mask): """ Function to take an image and the corresponding mask and plot Args: img (np.arr): 1 channel np arr representing the image of cellular structures mask (np.arr): 1 channel np arr representing the instance masks (incrementing by one) Returns: None; Plots the two arrays and overlays them to create a merged image """ plt.figure(figsize=(20,10)) plt.subplot(1,3,1) _img = np.tile(np.expand_dims(img, axis=-1), 3) plt.imshow(_img) plt.axis(False) plt.title("Cell Image", fontweight="bold") plt.subplot(1,3,2) _mask = np.zeros_like(_img) _mask[..., 0] = mask plt.imshow(mask, cmap="inferno") plt.axis(False) plt.title("Instance Segmentation Mask", fontweight="bold") merged = cv2.addWeighted(_img, 0.75, np.clip(_mask, 0, 1)*255, 0.25, 0.0,) plt.subplot(1,3,3) plt.imshow(merged) plt.axis(False) plt.title("Cell Image w/ Instance Segmentation Mask Overlay", fontweight="bold") plt.tight_layout() plt.show() # Code from https://www.kaggle.com/dschettler8845/sartorius-segmentation-mask-dataset#create_dataset def save_mask(row, path = ''): msk = get_img_and_mask(**row[["img_path", "annotation", "width", "height"]].to_dict(), mask_only=True) np.savez(path+"{}".format(row.id), msk)
{ "alphanum_fraction": 0.682626539, "author": null, "avg_line_length": 35.3709677419, "converted": null, "ext": "py", "file": null, "hexsha": "ebda1c655645b01a0b90c8dcdf7226e2be178583", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "af43132c6d5446310d0c1b5a68f8bae528e0ca8b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Pablo-snz/Cell-Instance-Segmentation", "max_forks_repo_path": "src/utils/mask_utils.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "af43132c6d5446310d0c1b5a68f8bae528e0ca8b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Pablo-snz/Cell-Instance-Segmentation", "max_issues_repo_path": "src/utils/mask_utils.py", "max_line_length": 109, "max_stars_count": 1, "max_stars_repo_head_hexsha": "af43132c6d5446310d0c1b5a68f8bae528e0ca8b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Pablo-snz/Cell-Instance-Segmentation", "max_stars_repo_path": "src/utils/mask_utils.py", "max_stars_repo_stars_event_max_datetime": "2021-11-04T08:29:26.000Z", "max_stars_repo_stars_event_min_datetime": "2021-11-04T08:29:26.000Z", "num_tokens": 594, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2193 }
import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_probability as tfp from tensorflow.keras import Model tfd = tfp.distributions tfb = tfp.bijectors def trainable_lu_factorization(event_size, batch_shape=(), seed=None, dtype=tf.float32, name=None): with tf.name_scope('trainable_lu_factorization'): event_size = tf.convert_to_tensor(event_size, dtype=tf.int32, name='event_size') batch_shape = tf.convert_to_tensor(batch_shape, dtype=event_size.dtype, name='batch_shape') random_matrix = tf.Variable(tf.random.uniform( shape=tf.concat([batch_shape, [event_size, event_size]], axis=0), dtype=dtype, seed=seed, ), name='conv1x1_weights') def lu_p(m): return tf.linalg.lu(tf.linalg.qr(m).q) # lower_upper = tfp.util.DeferredTensor(lambda m: lu_p(m)[0], # random_matrix) # permutation = tfp.util.DeferredTensor(lambda m: lu_p(m)[1], # random_matrix, # # trainable=False, # dtype=tf.int32, # shape=random_matrix.shape[:-1]) lower_upper = tf.Variable(lu_p(random_matrix)[0], name='lower_upper') # ref https://github.com/tensorflow/probability/issues/545 permutation = tf.Variable(lu_p(random_matrix)[1], trainable=False, name='permutation') return lower_upper, permutation def build_model(channels=3): # conv1x1 setup t_lower_upper, t_permutation = trainable_lu_factorization(channels) conv1x1 = tfb.MatvecLU(t_lower_upper, t_permutation, name='MatvecLU') print('conv1x1 variable\n', conv1x1.variables) inv_conv1x1 = tfb.Invert(conv1x1) # forward setup fwd = tfp.layers.DistributionLambda( lambda x: conv1x1(tfd.Deterministic(x))) fwd.vars = conv1x1.trainable_variables # inverse setup inv = tfp.layers.DistributionLambda( lambda x: inv_conv1x1(tfd.Deterministic(x))) inv.vars = inv_conv1x1.trainable_variables x: tf.Tensor = tf.keras.Input(shape=[28, 28, channels]) fwd_x: tfp.distributions.TransformedDistribution = fwd(x) rev_fwd_x: tfp.distributions.TransformedDistribution = inv(fwd_x) example_model = tf.keras.Model(inputs=x, outputs=rev_fwd_x, name='conv1x1') return example_model def test_conv1x1(): example_model = build_model() example_model.trainable = True example_model.summary() real_x = tf.random.uniform(shape=[2, 28, 28, 3], dtype=tf.float32) if example_model.weights == []: print('No Trainable Variable exists') else: print('Some Trainable Variables exist') with tf.GradientTape() as tape: tape.watch(real_x) out_x = example_model(real_x) out_x = out_x loss = out_x - real_x print(tf.math.reduce_sum(real_x - out_x)) # => nealy 0 # ex. tf.Tensor(1.3522818e-05, shape=(), dtype=float32) try: print(tape.gradient(loss, real_x).shape) except Exception as e: print('Cannot Calculate Gradient') print(e)
{ "alphanum_fraction": 0.5693069307, "author": null, "avg_line_length": 37.4845360825, "converted": null, "ext": "py", "file": null, "hexsha": "626358498f51a3f717b84e758e0301e1451ad169", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-01-12T19:10:23.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-06T09:01:05.000Z", "max_forks_repo_head_hexsha": "e56ed672f2dcccef6be38da5db3f5d2cb95d8545", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MokkeMeguru/implement-glow", "max_forks_repo_path": "realworld/layers/conv1x1.py", "max_issues_count": 7, "max_issues_repo_head_hexsha": "e56ed672f2dcccef6be38da5db3f5d2cb95d8545", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:16:56.000Z", "max_issues_repo_issues_event_min_datetime": "2019-12-13T10:13:13.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MokkeMeguru/implement-glow", "max_issues_repo_path": "realworld/layers/conv1x1.py", "max_line_length": 79, "max_stars_count": 31, "max_stars_repo_head_hexsha": "e56ed672f2dcccef6be38da5db3f5d2cb95d8545", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MokkeMeguru/implement-glow", "max_stars_repo_path": "realworld/layers/conv1x1.py", "max_stars_repo_stars_event_max_datetime": "2021-08-03T20:06:37.000Z", "max_stars_repo_stars_event_min_datetime": "2019-09-16T13:32:02.000Z", "num_tokens": 796, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3636 }
#coding: utf-8 import pygame from pygame.locals import * import random import sys #import os #import codecs from PIL import Image import numpy as np import cv2 # import module that I made import faceCamera import detectFace CS = 6 # cell size SCR_RECT = Rect(0, 0, 6*int(800/6), 6*int(800/6)) # screen size depends on the size of detected face THRESHOLD = 100 # the threshold to pixelate the pic NUM_ROW = SCR_RECT.height / CS # row of field NUM_COL = SCR_RECT.width / CS # column of field DEAD, ALIVE, STAY = 0, 1, 2 # constant for live or dead RAND_LIFE = 0.1 # You can choose the file from agtFace.jpg or face.jpg img = np.array(Image.open('face.jpg').convert('L')) # Adaptive Mean Thresholding img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,21,2) cv2.imwrite("faceAdapt.jpg",img) img = cv2.medianBlur(img, 5) cv2.imwrite("faceBlur1.jpg",img) for i in range(10): img = cv2.medianBlur(img, 3) cv2.imwrite("faceBlur"+str(i+3)+".jpg",img) class LifeGame: def __init__(self): pygame.init() screen = pygame.display.set_mode(SCR_RECT.size) pygame.display.set_caption(u"Conway's Game of Life") self.font = pygame.font.SysFont(None, 16) # Field that has size of NUM_ROW * NUM_ROL self.field = [[DEAD for x in range(NUM_COL)] for y in range(NUM_ROW)] self.color = [[DEAD for x in range(NUM_COL)] for y in range(NUM_ROW)] self.generation = 0 # the number of generation self.time = 100 self.run = False # run or not self.cursor = [NUM_COL, NUM_ROW] # the position of carsor # initiate the life game self.clear() # main loop clock = pygame.time.Clock() if detectFace.isFace : self.draw_face() else : print "No face is detected..." return 0 while True: clock.tick(30) self.update() self.draw(screen) pygame.display.update() for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() elif event.type == KEYDOWN: if event.key == K_ESCAPE: pygame.quit() sys.exit() # move cursor by key elif event.key == K_LEFT: self.cursor[0] -= 1 if self.cursor[0] < 0: self.cursor[0] = 0 elif event.key == K_RIGHT: self.cursor[0] += 1 if self.cursor[0] > NUM_COL-1: self.cursor[0] = NUM_COL-1 elif event.key == K_UP: self.cursor[1] -= 1 if self.cursor[1] < 0: self.cursor[1] = 0 elif event.key == K_DOWN: self.cursor[1] += 1 if self.cursor[1] > NUM_ROW-1: self.cursor[1] = NUM_ROW-1 # turn a cell when pushing space key elif event.key == K_SPACE: x, y = self.cursor if self.field[y][x] == DEAD: self.field[y][x] = ALIVE self.color[y][x] = ALIVE elif self.field[y][x] == ALIVE: self.field[y][x] = DEAD # start simulation when pushing 's' key elif event.key == K_s: self.run = not self.run # progress just one generation by pushing 'n' elif event.key == K_n: self.step() # clear by pushing 'c' elif event.key == K_c: self.clear() self.run = False # add a alive cell randomly by pushing 'r' elif event.key == K_r: self.rand() elif event.type == MOUSEBUTTONDOWN and event.button == 1: # turn a cell by pushing left click px, py = event.pos x, y = px/CS, py/CS self.cursor = [x, y] if self.field[y][x] == DEAD: self.field[y][x] = ALIVE self.color[y][x] = ALIVE elif self.field[y][x] == ALIVE: self.field[y][x] = DEAD elif event.type == MOUSEMOTION and event.buttons == (1,0,0): px, py = event.pos x, y = px/CS, py/CS if self.field[y][x] == DEAD: self.field[y][x] = ALIVE self.color[y][x] = ALIVE elif self.field[y][x] == ALIVE: self.field[y][x] = DEAD def clear(self): """Initiate a game""" self.generation = 0 for y in range(NUM_ROW): for x in range(NUM_COL): self.field[y][x] = DEAD def rand(self): """Add a alive cell randomly""" for y in range(NUM_ROW): for x in range(NUM_COL): if random.random() < RAND_LIFE: self.field[y][x] = ALIVE self.color[y][x] = ALIVE def update(self): pygame.time.wait(200) """Update the field""" if self.run: self.step() # Progress one step def step(self): """Progress one generation""" # next field next_field = [[False for x in range(NUM_COL)] for y in range(NUM_ROW)] # Set the field by following the rule of life game sum_alive_cells = 0 for y in range(NUM_ROW): for x in range(NUM_COL): num_alive_cells = self.around(x, y) if num_alive_cells == 2: # keep a cell if 2 cells around the cell are alive next_field[y][x] = self.field[y][x] self.color[y][x] = STAY sum_alive_cells += 1 elif num_alive_cells == 3: # born a cell if 3 cells around the cell are alive next_field[y][x] = ALIVE self.color[y][x] = ALIVE sum_alive_cells += 1 else: # other cells are dead next_field[y][x] = DEAD self.color[y][x] = DEAD self.field = next_field self.generation += 1 def draw(self, screen): """Draw the field""" # Paint cells for y in range(NUM_ROW): for x in range(NUM_COL): if self.field[y][x] == ALIVE: if(self.color[y][x] == ALIVE): pygame.draw.rect(screen, (255,255,0), Rect(x*CS,y*CS,CS,CS)) elif(self.color[y][x] == STAY): pygame.draw.rect(screen, (255,0,255),Rect(x*CS,y*CS,CS,CS)) elif self.field[y][x] == DEAD: pygame.draw.rect(screen, (0,255,255), Rect(x*CS,y*CS,CS,CS)) pygame.draw.line(screen, (255,255,255), (x*CS,0),(x*CS,SCR_RECT.height)) pygame.draw.line(screen, (255,255,255), (0,y*CS),(SCR_RECT.width,y*CS)) # Draw cursor pygame.draw.rect(screen, (0,0,255), Rect(self.cursor[0]*CS,self.cursor[1]*CS,CS,CS), 1) # Draw the information of the game screen.blit(self.font.render("generation:%d" % self.generation, True, (0,0,0)), (0,0)) screen.blit(self.font.render("space : birth/kill",True,(0,0,0,)),(0,12)) screen.blit(self.font.render("s : start/stop",True,(0,0,0)),(0,24)) screen.blit(self.font.render("n : next",True,(0,0,0)),(0,36)) screen.blit(self.font.render("r : random",True,(0,0,0)),(0,48)) def around(self, x, y): """Return the number of the alive cells around (x,y)""" if x == 0 or x == NUM_COL-1 or y == 0 or y == NUM_ROW-1: return 0 sum = 0 sum += self.field[y-1][x-1] # cell at the upper left sum += self.field[y-1][x] # cell at upper side sum += self.field[y-1][x+1] # cell at the upper right sum += self.field[y][x-1] # cell at the left sum += self.field[y][x+1] # cell at the right sum += self.field[y+1][x-1] # cell at the lower left sum += self.field[y+1][x] # cell at the lower side sum += self.field[y+1][x+1] # cell at the lower right return sum def draw_face(self): x = detectFace.detectedFace[0] - 50 y = detectFace.detectedFace[1] - 80 width = detectFace.detectedFace[2] + 100 height = detectFace.detectedFace[3] + 150 THRESHOLD = np.average(img[x:x+width][y:y+height]) for i in xrange(0,NUM_ROW): for j in xrange(0,NUM_COL): searchX = x + i*width/NUM_ROW searchY = y + j*height/NUM_COL if img[searchY][searchX] < THRESHOLD: self.field[j][i] = ALIVE self.color[j][i] = ALIVE if __name__ == "__main__": LifeGame()
{ "alphanum_fraction": 0.4932053494, "author": null, "avg_line_length": 39.9655172414, "converted": null, "ext": "py", "file": null, "hexsha": "07f499a08994986445ae6a78c851c8bd15746b66", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1a36b47720c165f79aa49511165ae2d689d7066c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Bakuo/FaceToLifeGame", "max_forks_repo_path": "faceToLifeGame.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "1a36b47720c165f79aa49511165ae2d689d7066c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Bakuo/FaceToLifeGame", "max_issues_repo_path": "faceToLifeGame.py", "max_line_length": 100, "max_stars_count": null, "max_stars_repo_head_hexsha": "1a36b47720c165f79aa49511165ae2d689d7066c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Bakuo/FaceToLifeGame", "max_stars_repo_path": "faceToLifeGame.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2313, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 9272 }
import numpy as np import tensorflow as tf from ..core.image_warp import image_warp class ImageWarpTest(tf.test.TestCase): def _warp_test(self, first, second, flow, debug=False): num_batch, height, width, channels = second.shape second_ = tf.placeholder(tf.float32, shape=second.shape, name='im') flow_ = tf.placeholder(tf.float32, shape=flow.shape, name='flow') inv_warped_second = image_warp(second_, flow_) sess = tf.Session() pred = sess.run(inv_warped_second, feed_dict={second_: second, flow_: flow}) if debug: print('-- result channels') for c in range(channels): print(np.reshape(pred[:, :, :, c], [height, width])) self.assertAllClose(first, pred) def test_move(self): first = [ [0, 0, 0, 0], [0, 1, 0.5, 0], [0, 0.3, 0.4, 0], [0, 0, 0, 0]] second = [ [0, 1, 0, 0], [0, 0, 0, 0.5], [0.3, 0, 0, 0], [0, 0, 0.4, 0]] zero = [0, 0] flow = [ [zero, [-1, 0], zero, zero], [zero, [0, -1], [1, 0], [0, -1]], [[0, -1], [-1, 0], [0, 1], zero], [zero, zero, [0, -1], zero]] self._warp_test( np.reshape(first, [1, 4, 4, 1]), np.reshape(second, [1, 4, 4, 1]), np.reshape(flow, [1, 4, 4, 2])) def test_batches(self): # Make sure that batches do not interfere with each other first_1 = [ [0, 0, 0, 0], [0, 1, 0.5, 0], [0, 0.3, 0.4, 0], [0, 0, 0, 0]] second_1 = [ [0, 1, 0, 0], [0, 0, 0, 0.5], [0.3, 0, 0, 0], [0, 0, 0.4, 0]] zero = [0, 0] flow_1 = [ [zero, [-1, 0], zero, zero], [zero, [0, -1], [1, 0], [0, -1]], [[0, -1], [-1, 0], [0, 1], zero], [zero, zero, [0, -1], zero]] first_2 = np.zeros([4, 4]) second_2 = np.zeros([4, 4]) flow_2 = np.ones([4, 4, 2]) first = np.concatenate([first_1, first_2, first_1]) second = np.concatenate([second_1, second_2, second_1]) flow = np.concatenate([flow_1, flow_2, flow_1]) self._warp_test( np.reshape(first, [3, 4, 4, 1]), np.reshape(second, [3, 4, 4, 1]), np.reshape(flow, [3, 4, 4, 2])) def test_interpolate(self): first = [ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 2.1]] second = [ [0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]] zero = [0, 0] flow = [ [zero, zero, zero, zero], [zero, [-2, -2], [-2, -2], zero], [zero, [-2, -2], [-2, -2], zero], [zero, zero, zero, [-1.7, -1.6]]] self._warp_test( np.reshape(first, [1, 4, 4, 1]), np.reshape(second, [1, 4, 4, 1]), np.reshape(flow, [1, 4, 4, 2]))
{ "alphanum_fraction": 0.4169611307, "author": null, "avg_line_length": 31.7653061224, "converted": null, "ext": "py", "file": null, "hexsha": "e85ba55aa866df8d2756a7b9e6f8243088da3458", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 67, "max_forks_repo_forks_event_max_datetime": "2021-11-03T20:08:34.000Z", "max_forks_repo_forks_event_min_datetime": "2017-12-04T01:49:08.000Z", "max_forks_repo_head_hexsha": "efaba92f51d6ae46dbe6445180b476e2b6b7b451", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "3bhady/UnFlow", "max_forks_repo_path": "src/e2eflow/test/test_image_warp.py", "max_issues_count": 73, "max_issues_repo_head_hexsha": "efaba92f51d6ae46dbe6445180b476e2b6b7b451", "max_issues_repo_issues_event_max_datetime": "2021-12-04T09:21:51.000Z", "max_issues_repo_issues_event_min_datetime": "2017-12-14T09:12:42.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "3bhady/UnFlow", "max_issues_repo_path": "src/e2eflow/test/test_image_warp.py", "max_line_length": 75, "max_stars_count": 281, "max_stars_repo_head_hexsha": "efaba92f51d6ae46dbe6445180b476e2b6b7b451", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "3bhady/UnFlow", "max_stars_repo_path": "src/e2eflow/test/test_image_warp.py", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:42:09.000Z", "max_stars_repo_stars_event_min_datetime": "2017-12-02T13:52:16.000Z", "num_tokens": 1098, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3113 }
import numpy as np import pandas as pd import theano import theano.tensor as tt def get_variational_scores(result, config, model, inference, true_pop_size): approx_params = list(inference.approx.shared_params.values()) distance = abs(model.pop_size - true_pop_size)/true_pop_size input_vars = tt.dvectors(len(approx_params)) distance_sample = inference.approx.sample_node(distance, size=config['n_eval_samples'], more_replacements={ shared: var for shared, var in zip(approx_params, input_vars) }) distance_mean = tt.mean(distance_sample) distance_function = theano.function(input_vars, distance_mean) distances = [distance_function(*[result[var][i] for var in inference.approx.shared_params.keys()]) for i in range(len(result['i']))] return pd.DataFrame({ 'date_time': result['date_time'], 'error': np.stack(distances) }) def get_beast_scores(result_filename, config, true_pop_size): trace_df = pd.read_table(result_filename, parse_dates=['datetime'], comment='#') def get_score_for_iteration(i): to_use = trace_df[int(i * config['burn_in']):(i+1)] return np.mean(abs(to_use.popSize - true_pop_size))/true_pop_size scores = [get_score_for_iteration(i) for i in range(trace_df.shape[0])] return pd.DataFrame({ 'date_time': trace_df['datetime'], 'error': scores }) def get_mcmc_scores(trace, config, true_pop_size): draws = config['chain_length'] log_every = config['log_every'] pop_size_samples = trace.get_values('pop_size')[-draws:] def get_score_for_iteration(i): to_use = pop_size_samples[int(i * config['burn_in']):(i+1)] return np.mean(abs(to_use - true_pop_size))/true_pop_size scores = [get_score_for_iteration(i) for i in range(0, draws, log_every)] return pd.DataFrame({ 'date_time': trace.times[-draws::log_every], 'error': scores })
{ "alphanum_fraction": 0.7032224532, "author": null, "avg_line_length": 41.8260869565, "converted": null, "ext": "py", "file": null, "hexsha": "9158b33782dc36bb929b1f44d14ca211ddc0bc84", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b4095995a8c789267cee4268d8e6ba107d1b8428", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "christiaanjs/phylo-hacking", "max_forks_repo_path": "pymc/eval/process_results.py", "max_issues_count": 2, "max_issues_repo_head_hexsha": "b4095995a8c789267cee4268d8e6ba107d1b8428", "max_issues_repo_issues_event_max_datetime": "2019-03-27T01:30:15.000Z", "max_issues_repo_issues_event_min_datetime": "2019-03-18T03:23:37.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "christiaanjs/phylo-hacking", "max_issues_repo_path": "pymc/eval/process_results.py", "max_line_length": 176, "max_stars_count": null, "max_stars_repo_head_hexsha": "b4095995a8c789267cee4268d8e6ba107d1b8428", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "christiaanjs/phylo-hacking", "max_stars_repo_path": "pymc/eval/process_results.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 459, "path": null, "reason": "import numpy,import theano", "repo": null, "save_path": null, "sha": null, "size": 1924 }
function loadData(path::String) datasets = DataFrame[] # Init empty vector if ispath(path) if isfile(path) push!(datasets, CSV.read(path)) return datasets else files = readdir(path) # grab all files in the given folder for f in files push!(datasets, CSV.read(path * "/" * f)) end return datasets end else error("Enter a valid file or folder") end end function arData(datasets::Vector{DataFrame}) return Array.(datasets) end
{ "alphanum_fraction": 0.5646017699, "author": null, "avg_line_length": 25.6818181818, "converted": null, "ext": "jl", "file": null, "hexsha": "4649451f966b7b3bb8314cd196d2c5e078509f59", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-07-10T02:00:17.000Z", "max_forks_repo_forks_event_min_datetime": "2018-05-11T09:51:52.000Z", "max_forks_repo_head_hexsha": "dcb679e4564a62388fe23acf370820c62fd20718", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "pzuliani/SPICE", "max_forks_repo_path": "src/SystemCore/DataDist.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "dcb679e4564a62388fe23acf370820c62fd20718", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "pzuliani/SPICE", "max_issues_repo_path": "src/SystemCore/DataDist.jl", "max_line_length": 70, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dcb679e4564a62388fe23acf370820c62fd20718", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "pzuliani/SPICE", "max_stars_repo_path": "src/SystemCore/DataDist.jl", "max_stars_repo_stars_event_max_datetime": "2018-05-11T09:51:46.000Z", "max_stars_repo_stars_event_min_datetime": "2018-05-11T09:51:46.000Z", "num_tokens": 124, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 565 }
@reexport module Maze export MazeEnv using ReinforcementLearningBase import Base: * const Actions = [ CartesianIndex(0, -1), # left CartesianIndex(0, 1), # right CartesianIndex(-1, 0), # up CartesianIndex(1, 0), # down ] mutable struct MazeEnv <: AbstractEnv walls::Set{CartesianIndex{2}} position::CartesianIndex{2} start::CartesianIndex{2} goal::CartesianIndex{2} NX::Int NY::Int observation_space::DiscreteSpace action_space::DiscreteSpace MazeEnv(w, p, s, g, NX, NY) = new(w, p, s, g, NX, NY, DiscreteSpace(NX * NY), DiscreteSpace(length(Actions))) end RLBase.get_observation_space(env::MazeEnv) = env.observation_space RLBase.get_action_space(env::MazeEnv) = env.action_space function MazeEnv() walls = Set([ [CartesianIndex(i, 3) for i = 2:4] CartesianIndex(5, 6) [CartesianIndex(j, 8) for j = 1:3] ]) start = CartesianIndex(3, 1) goal = CartesianIndex(1, 9) MazeEnv(walls, start, start, goal, 6, 9) end function extend(p::CartesianIndex{2}, n::Int) x, y = Tuple(p) [CartesianIndex(n * (x - 1) + i, n * (y - 1) + j) for i = 1:n for j = 1:n] end function remap(p::CartesianIndex{2}, n::Int) x, y = Tuple(p) CartesianIndex((x - 1) * n + 1, (y - 1) * n + 1) end function *(env::MazeEnv, n::Int) walls = Set{CartesianIndex{2}}(ww for w in env.walls for ww in extend(w, n)) start, position, goal = remap(env.start, n), remap(env.position, n), remap(env.goal, n) NX, NY = env.NX * n, env.NY * n MazeEnv(walls, position, start, goal, NX, NY) end function (env::MazeEnv)(a::Int) p = env.position + Actions[a] if p == env.goal env.position = env.goal elseif !(p ∈ env.walls) env.position = CartesianIndex(min(max(p[1], 1), env.NX), min(max(p[2], 1), env.NY)) end nothing end RLBase.observe(env::MazeEnv) = ( reward = Float64(env.position == env.goal), terminal = env.position == env.goal, state = (env.position[2] - 1) * env.NX + env.position[1], ) function RLBase.reset!(env::MazeEnv) env.position = env.start nothing end end
{ "alphanum_fraction": 0.6194895592, "author": null, "avg_line_length": 26.2804878049, "converted": null, "ext": "jl", "file": null, "hexsha": "4c8cbc89782f7705c48434944d9612361f4bbe65", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1be791487f5f00dbedebb82ff895c56b30d736db", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rishabhvarshney14/ReinforcementLearningAnIntroduction.jl", "max_forks_repo_path": "src/environments/Maze.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "1be791487f5f00dbedebb82ff895c56b30d736db", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rishabhvarshney14/ReinforcementLearningAnIntroduction.jl", "max_issues_repo_path": "src/environments/Maze.jl", "max_line_length": 91, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1be791487f5f00dbedebb82ff895c56b30d736db", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rishabhvarshney14/ReinforcementLearningAnIntroduction.jl", "max_stars_repo_path": "src/environments/Maze.jl", "max_stars_repo_stars_event_max_datetime": "2021-08-10T05:32:35.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-10T05:32:35.000Z", "num_tokens": 711, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2155 }
""" Generate sample x, y, data of hierarchical structure for bebi103 documentation. """ import numpy as np import pandas as pd def _generate_sample_data(): np.random.seed(3252) J_1 = 3 n = np.array([20, 25, 18]) theta = np.array([3, 7]) tau = np.array([1, 4]) sigma = np.array([2, 3]) rho = 0.6 sigma_cov = np.array( [ [sigma[0] ** 2, rho * sigma[0] * sigma[1]], [rho * sigma[0] * sigma[1], sigma[1] ** 2], ] ) x = [] y = [] trial = [] for i, n_val in enumerate(n): theta_1 = np.random.multivariate_normal(theta, np.diag(tau ** 2)) x_vals, y_vals = np.random.multivariate_normal( theta_1, sigma_cov, size=n_val ).transpose() x += list(x_vals) y += list(y_vals) trial += [i + 1] * n_val return pd.DataFrame(dict(x=x, y=y, trial=trial)) if __name__ == "__main__": df = _generate_sample_data() df.to_csv('sample_data.csv', index=False)
{ "alphanum_fraction": 0.5487077535, "author": null, "avg_line_length": 22.3555555556, "converted": null, "ext": "py", "file": null, "hexsha": "7e953b4eece94ef933bfb60ed5da0ef2aa91df5b", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2021-03-08T06:22:14.000Z", "max_forks_repo_forks_event_min_datetime": "2017-10-23T02:41:13.000Z", "max_forks_repo_head_hexsha": "dbfca4c9168297383f82395c1c88a9e51863fe67", "max_forks_repo_licenses": [ "MIT", "BSD-3-Clause" ], "max_forks_repo_name": "justinbois/bebi103", "max_forks_repo_path": "doc/user_guide/sample_data.py", "max_issues_count": 22, "max_issues_repo_head_hexsha": "dbfca4c9168297383f82395c1c88a9e51863fe67", "max_issues_repo_issues_event_max_datetime": "2022-03-09T21:01:22.000Z", "max_issues_repo_issues_event_min_datetime": "2017-10-23T03:22:52.000Z", "max_issues_repo_licenses": [ "MIT", "BSD-3-Clause" ], "max_issues_repo_name": "justinbois/bebi103", "max_issues_repo_path": "doc/user_guide/sample_data.py", "max_line_length": 73, "max_stars_count": 10, "max_stars_repo_head_hexsha": "dbfca4c9168297383f82395c1c88a9e51863fe67", "max_stars_repo_licenses": [ "MIT", "BSD-3-Clause" ], "max_stars_repo_name": "justinbois/bebi103", "max_stars_repo_path": "doc/user_guide/sample_data.py", "max_stars_repo_stars_event_max_datetime": "2021-09-13T09:09:00.000Z", "max_stars_repo_stars_event_min_datetime": "2018-10-01T16:27:25.000Z", "num_tokens": 302, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1006 }
from ml_logger import logger from cde.model_fitting.GoodnessOfFitResults import GoodnessOfFitResults from cde.evaluation.simulation_eval import base_experiment import cde.model_fitting.ConfigRunner as ConfigRunner import matplotlib.pyplot as plt import numpy as np import pandas as pd EXP_PREFIX = 'question1_noise_reg_x_v1' RESULTS_FILE = 'results.pkl' logger.configure( '/home/jonasrothfuss/Dropbox/Eigene_Dateien/Uni/WS17_18/Density_Estimation/Nonparametric_Density_Estimation/data/cluster/', EXP_PREFIX) results_from_pkl_file = dict(logger.load_pkl_log(RESULTS_FILE)) gof_result = GoodnessOfFitResults(single_results_dict=results_from_pkl_file) results_df = gof_result.generate_results_dataframe(base_experiment.KEYS_OF_INTEREST) #gof_result = ConfigRunner.load_dumped_estimators(gof_result) SMALL_SIZE = 11 MEDIUM_SIZE = 12 LARGE_SIZE = 16 TITLE_SIZE = 20 LINEWIDTH = 6 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=LARGE_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize """ X-Noise Regularization""" for estimator, n_centers in [("MixtureDensityNetwork", 10), ("KernelMixtureNetwork", 20)]: title = "%s (%i kernels) - X-Noise Regularization"%(estimator, n_centers) plot_dict = dict([(simulator, { "x_noise_std=0.4": {"estimator": estimator, "x_noise_std": 0.4, "y_noise_std": None, "n_centers": n_centers, "simulator": simulator}, "x_noise_std=0.2": {"estimator": estimator, "x_noise_std": 0.2, "y_noise_std": None, "n_centers": n_centers, "simulator": simulator}, "x_noise_std=0.1": {"estimator": estimator, "x_noise_std": 0.1, "y_noise_std": None, "n_centers": n_centers, "simulator": simulator}, "x_noise_std=0.0": {"estimator": estimator, "x_noise_std": None, "y_noise_std": None, "n_centers": n_centers, "simulator": simulator} }) for simulator in ["EconDensity", "ArmaJump", "GaussianMixture", "SkewNormal"]]) fig = gof_result.plot_metric(plot_dict, metric="hellinger_distance", figsize=(14,10), layout=(2,2)) plt.suptitle(title, fontsize=TITLE_SIZE) plt.tight_layout(h_pad=2, rect=[0, 0, 1, 0.95]) plt.savefig(EXP_PREFIX + "/" + "%s_%i_x_noise.png"%(estimator, n_centers)) plt.clf() """ Y-Noise Regularization""" for estimator, n_centers in [("MixtureDensityNetwork", 10), ("KernelMixtureNetwork", 20)]: title = "%s (%i kernels) - Y-Noise Regularization"%(estimator, n_centers) plot_dict = dict([(simulator, { "y_noise_std=0.2": {"estimator": estimator, "y_noise_std": 0.2, "x_noise_std": None, "n_centers": n_centers, "simulator": simulator}, "y_noise_std=0.1": {"estimator": estimator, "y_noise_std": 0.1, "x_noise_std": None, "n_centers": n_centers, "simulator": simulator}, "y_noise_std=0.02": {"estimator": estimator, "y_noise_std": 0.02, "x_noise_std": None, "n_centers": n_centers,"simulator": simulator}, "y_noise_std=0.0": {"estimator": estimator, "y_noise_std": None, "x_noise_std": None, "n_centers": n_centers, "simulator": simulator} }, ) for simulator in ["EconDensity", "ArmaJump", "GaussianMixture", "SkewNormal"]]) fig = gof_result.plot_metric(plot_dict, metric="hellinger_distance", figsize=(14,10), layout=(2,2)) plt.suptitle(title, fontsize=TITLE_SIZE) plt.tight_layout(h_pad=2, rect=[0, 0, 1, 0.95]) plt.savefig(EXP_PREFIX + "/" +"%s_%i_y_noise.png"%(estimator, n_centers)) plt.clf() """ XY-Noise Regularization""" color = iter(['red', 'orange', 'blue', 'green']) title = "Effect of XY-Noise Regularization (x_noise_std=0.2, y_noise_std=0.1)" plot_dict = dict([(simulator, { "MDN noise": {"estimator": "MixtureDensityNetwork", "y_noise_std": 0.1, "x_noise_std": 0.2, "n_centers": 10, "simulator": simulator}, "MDN no noise": {"estimator": "MixtureDensityNetwork", "y_noise_std": None, "x_noise_std": None, "n_centers": 10, "simulator": simulator}, "KMN noise": {"estimator": "KernelMixtureNetwork", "y_noise_std": 0.1, "x_noise_std": 0.2, "n_centers": 20, "simulator": simulator}, "KMN no noise": {"estimator": "KernelMixtureNetwork", "y_noise_std": None, "x_noise_std": None, "n_centers": 20, "simulator": simulator} }, ) for simulator in ["EconDensity", "ArmaJump", "SkewNormal"]]) fig = gof_result.plot_metric(plot_dict, metric="hellinger_distance", figsize=(14, 4.5), layout=(1, 3), color=color) #plt.suptitle(title, fontsize=TITLE_SIZE) for i, ax in enumerate(fig.axes): if i != 2: ax.get_legend().remove() ax.set_ylabel('Hellinger distance') ax.set_xlabel('number of training samples') ax.set_xticks([200, 500, 1000, 2000, 5000]) ax.set_xticklabels([200, 500, 1000, 2000, 5000]) plt.tight_layout(h_pad=2, rect=[0, 0, 1, 1]) plt.savefig(EXP_PREFIX + "/" + "xy_noise_overview.png") plt.savefig(EXP_PREFIX + "/" + "xy_noise_overview.pdf") plt.clf() """ X-Y Noise Reg heatplots""" """ X-Y Noise Reg heatplots""" n_samples = 1600 metric = 'hellinger_distance' x_noise_vals = list(reversed([None, 0.1, 0.2, 0.4])) y_noise_vals = [None, 0.01, 0.02, 0.05, 0.1, 0.2] for estimator, n_centers in [("MixtureDensityNetwork", 10), ("KernelMixtureNetwork", 20)]: fig, axarr = plt.subplots(2, 2, figsize=(12, 7.5)) axarr = axarr.flatten() for k, simulator in enumerate(["EconDensity", "ArmaJump", "GaussianMixture", "SkewNormal"]): result_grid = np.empty((len(x_noise_vals), len(y_noise_vals))) for i, x_noise_std in enumerate(x_noise_vals): for j, y_noise_std in enumerate(y_noise_vals): graph_dict = {"estimator": estimator, "x_noise_std": x_noise_std, "y_noise_std": y_noise_std, "n_centers": n_centers, "simulator": simulator, 'n_observations': n_samples} sub_df = results_df.loc[(results_df[list(graph_dict)] == pd.Series(graph_dict)).all(axis=1)] result_grid[i,j] = sub_df[metric].mean() im = axarr[k].imshow(result_grid) # annotate pixels for i, x_noise_std in enumerate(x_noise_vals): for j, y_noise_std in enumerate(y_noise_vals): axarr[k].text(j, i, "%.3f"%result_grid[i, j], ha="center", va="center", color="w") axarr[k].set_ylabel("x-noise std") axarr[k].set_xlabel("y-noise std") axarr[k].set_yticks(np.arange(len(x_noise_vals))) axarr[k].set_xticks(np.arange(len(y_noise_vals))) axarr[k].set_yticklabels([str(val) for val in x_noise_vals]) axarr[k].set_xticklabels([str(val) for val in y_noise_vals]) cbar = axarr[k].figure.colorbar(im, ax=axarr[k], shrink=0.8) cbar.ax.set_ylabel("Hellinger distance", rotation=-90, va="bottom") axarr[k].set_title(simulator) plt.tight_layout(w_pad=3.5, rect=[0, 0, 1, 1]) #plt.suptitle("Hellinger Distance X-Y-Noise:\n%s (%i centers) - %i observations"%(estimator, n_centers, n_samples), fontsize=TITLE_SIZE) plt.savefig(EXP_PREFIX + "/" +"%s_%i_%iobs_xy_noise_heatmap.png" % (estimator, n_centers, n_samples)) plt.savefig(EXP_PREFIX + "/" + "%s_%i_%iobs_xy_noise_heatmap.pdf" % (estimator, n_centers, n_samples))
{ "alphanum_fraction": 0.694630414, "author": null, "avg_line_length": 51.9078014184, "converted": null, "ext": "py", "file": null, "hexsha": "807162c0ef56b9f90162e85d50f0322e6d219fbb", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5e64fca31e0f8aafac43cfca7a37f70edb4d90fe", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tobikuhlmann/Conditional_Density_Estimation", "max_forks_repo_path": "cde/evaluation/simulation_eval/plotting/question1_plots.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "5e64fca31e0f8aafac43cfca7a37f70edb4d90fe", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tobikuhlmann/Conditional_Density_Estimation", "max_issues_repo_path": "cde/evaluation/simulation_eval/plotting/question1_plots.py", "max_line_length": 144, "max_stars_count": null, "max_stars_repo_head_hexsha": "5e64fca31e0f8aafac43cfca7a37f70edb4d90fe", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tobikuhlmann/Conditional_Density_Estimation", "max_stars_repo_path": "cde/evaluation/simulation_eval/plotting/question1_plots.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2193, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 7319 }
#!/usr/bin/env python # ============================================================================= # MODULE DOCSTRING # ============================================================================= """ Test layers in modules.autoregressive. """ # ============================================================================= # GLOBAL IMPORTS # ============================================================================= from unittest import mock import numpy as np import pytest import torch from ..modules.autoregressive import MADE # ============================================================================= # FIXTURES # ============================================================================= # Each test case is a tuple: # (dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, # expected_dimensions_hidden, expected_out_dimensions) @pytest.fixture( params=[ (3, 2, 1, 0, [2, 2], 3), (3, [5], 1, 0, [5], 3), (3, 4, 1, 1, [2]*4, 2), (5, 7, 2, 0, [8]*7, 10), (5, 7, 2, 2, [8]*7, 6), (5, [4, 7, 9], 2, 2, [4, 7, 9], 6) ] ) def dimensions(request): return request.param # Each test case is a tuple: # (blocks, degrees_in, # dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, # expected_dimensions_hidden, expected_out_dimensions) @pytest.fixture( params=[ (2, 'input', 3, 2, 1, 0, [2, 2], 3), (2, 'reversed', 3, 2, 1, 0, [1, 1], 3), ([1, 2], 'input', 3, 2, 1, 0, [1, 1], 3), ([1, 2], 'reversed', 3, 2, 1, 0, [2, 2], 3), (3, 'input', 7, 3, 2, 2, [10]*3, 10), (3, 'reversed', 7, 3, 2, 2, [8]*3, 10), ([1, 2, 2], 'input', 7, 3, 2, 2, [10]*3, 10), ([1, 2, 2], 'reversed', 7, 3, 2, 2, [12]*3, 10), ([2, 3], 'input', 7, 3, 2, 2, [8]*3, 10), ([2, 3], 'reversed', 7, 3, 2, 2, [10]*3, 10), (2, 'input', 7, [6, 9, 11], 2, 2, [6, 9, 11], 10), (2, 'reversed', 7, [6, 9, 11], 2, 2, [6, 9, 11], 10), ([1, 2, 3], np.array([0, 1, 2]), 8, 3, 2, 2, [10]*3, 12), ([1, 2, 3], np.array([0, 2, 1]), 8, 3, 2, 2, [12]*3, 12), ([1, 2, 3], np.array([2, 0, 1]), 8, 3, 2, 2, [14]*3, 12), ] ) def blocked_dimensions(request): return request.param # ============================================================================= # HELPER FUNCTIONS # ============================================================================= def generate_random_degrees_in(dimension_in, dimension_conditioning): # Make sure the test is reproducible with a random state. random_state = np.random.RandomState(dimension_in) return random_state.permutation(list(range(dimension_in-dimension_conditioning))) # ============================================================================= # TESTS # ============================================================================= @pytest.mark.parametrize('in_out_dimension,n_layers,dimension_conditioning', [ (5, 3, 0), (7, 2, 2), (7, 4, 1), (10, 3, 3), ]) def test_MADE_create_mask(in_out_dimension, n_layers, dimension_conditioning): """Test the method MADE.create_mask(). Simulate a 3-layer network with sequential degree assignment and check that all the masks have the appropriate shape and are lower triangular. """ first_layer_dim = in_out_dimension inner_layer_dim = in_out_dimension - 1 output_layer_dim = in_out_dimension - dimension_conditioning # Assign degrees sequentially for the simulated 3-layer network. degrees = [] for layer_idx in range(n_layers+1): # The first and last layers have an extra unit. if layer_idx == 0: degrees.append(np.arange(first_layer_dim)) elif layer_idx == n_layers: degrees.append(np.arange(dimension_conditioning, dimension_conditioning+output_layer_dim)) else: degrees.append(np.arange(inner_layer_dim)) # Build masks for all 3 layers. masks = [MADE.create_mask(degrees[i], degrees[i+1], is_output_layer=(i==n_layers-1)) for i in range(n_layers)] for layer_idx, mask in enumerate(masks): is_output_layer = (layer_idx == n_layers-1) # Check that they are all lower triangular. if is_output_layer: assert torch.all(mask == torch.tril(mask, diagonal=dimension_conditioning-1)) else: assert torch.all(mask == torch.tril(mask)) # In the first layer, the last input unit must have no # connection with the first hidden layer. if layer_idx == 0: assert torch.all(mask[:,-1] == False) assert mask.shape == (inner_layer_dim, first_layer_dim) # In the last layer, the first output unit must be attached # only to conditioning node or be constant. elif is_output_layer: assert torch.all(mask[0, dimension_conditioning:] == False) assert mask.shape == (output_layer_dim, inner_layer_dim) else: assert mask.shape == (inner_layer_dim, inner_layer_dim) @pytest.mark.parametrize('degrees_in', ['input', 'reversed', 'random']) def test_MADE_get_dimensions(degrees_in, dimensions): """Test the method MADE._get_dimensions without blocks. The dimensions should be independent of the degrees_in option. """ if degrees_in == 'random': degrees_in = generate_random_degrees_in(dimensions[0], dimensions[3]) check_MADE_get_dimensions(1, degrees_in, *dimensions) def test_MADE_get_dimensions_blocks(blocked_dimensions): """Test the method MADE._get_dimensions with blocks.""" check_MADE_get_dimensions(*blocked_dimensions) def check_MADE_get_dimensions( blocks, degrees_in, dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, expected_dimensions_hidden, expected_out_dimensions ): """Used by test_MADE_get_dimensions and test_MADE_get_dimensions_blocks.""" n_hidden_layers, dimensions_hidden, out_dimension, expanded_blocks = MADE._get_dimensions( dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, degrees_in, blocks, shorten_last_block=True) assert n_hidden_layers == len(expected_dimensions_hidden) assert dimensions_hidden == expected_dimensions_hidden assert out_dimension == expected_out_dimensions @pytest.mark.parametrize(('dimension_in,dimension_conditioning,degrees_in,degrees_hidden_motif,blocks,' 'expected_degrees_in,expected_degrees_hidden_motif'), [ (5, 0, 'input', None, [3, 2], [0, 0, 0, 1, 1], [0, 0, 0]), (7, 2, 'input', None, [3, 2], [-1, -1, 0, 0, 0, 1, 1], [-1, -1, 0, 0, 0]), (5, 0, 'reversed', None, [3, 2], [1, 1, 1, 0, 0], [0, 0]), (7, 2, 'reversed', None, [3, 2], [-1, -1, 1, 1, 1, 0, 0], [-1, -1, 0, 0]), (6, 0, [2, 0, 1], None, [1, 3, 2], [2, 0, 0, 0, 1, 1], [0, 0, 0, 1, 1]), (7, 1, [2, 0, 1], None, [1, 3, 2], [-1, 2, 0, 0, 0, 1, 1], [-1, 0, 0, 0, 1, 1]), ]) def test_MADE_generate_degrees(dimension_in, dimension_conditioning, degrees_in, degrees_hidden_motif, blocks, expected_degrees_in, expected_degrees_hidden_motif): """Test that the input degrees and the motif for the hidden nodes are correct.""" # Create a mock MADE class with the blocks attribute. mock_made = mock.Mock(blocks=blocks) mock_made.degrees_in = MADE._assign_degrees_in(mock_made, dimension_in, dimension_conditioning, degrees_in) motif = MADE._generate_degrees_hidden_motif(mock_made, degrees_hidden_motif) assert np.all(mock_made.degrees_in == np.array(expected_degrees_in)) assert np.all(motif == np.array(expected_degrees_hidden_motif)) @pytest.mark.parametrize('weight_norm', [False, True]) def test_MADE_mask_dimensions(weight_norm, dimensions): """Test that the dimension of the hidden layers without blocks follow the init options correctly.""" check_MADE_mask_dimensions(1, 'input', *dimensions[:-2], weight_norm=weight_norm) @pytest.mark.parametrize('weight_norm', [False, True]) def test_MADE_mask_dimensions_blocks(weight_norm, blocked_dimensions): """Test that the dimension of the hidden layers with blocks follow the init options correctly.""" check_MADE_mask_dimensions(*blocked_dimensions[:-2], weight_norm=weight_norm) def check_MADE_mask_dimensions(blocks, degrees_in, dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, weight_norm): """Used by test_MADE_mask_dimensions and test_MADE_mask_dimensions_blocks.""" made = MADE( dimension_in=dimension_in, dimensions_hidden=dimensions_hidden, out_per_dimension=out_per_dimension, dimension_conditioning=dimension_conditioning, degrees_in=degrees_in, weight_norm=weight_norm, blocks=blocks, shorten_last_block=True ) # Compute the expected dimensions. n_hidden_layers, dimensions_hidden, out_dimension, expanded_blocks = made._get_dimensions( dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, degrees_in, blocks, shorten_last_block=True) # Masked linear layers are alternated with nonlinearities. masked_linear_modules = made.layers[::2] # Check all dimensions. assert len(masked_linear_modules) == n_hidden_layers + 1 assert masked_linear_modules[0].in_features == dimension_in for layer_idx in range(n_hidden_layers): masked_linear_modules[layer_idx].out_features == dimensions_hidden[layer_idx] masked_linear_modules[layer_idx+1].in_features == dimensions_hidden[layer_idx] assert masked_linear_modules[-1].out_features == out_dimension # Test correct implementation of the Python properties. assert made.dimension_in == dimension_in assert made.n_layers == n_hidden_layers + 1 assert made.dimensions_hidden == dimensions_hidden assert made.dimension_conditioning == dimension_conditioning @pytest.mark.parametrize('weight_norm', [False, True]) @pytest.mark.parametrize('degrees_in', ['input', 'reversed', 'random']) def test_MADE_autoregressive_property(weight_norm, degrees_in, dimensions): """Test that MADE without blocks satisfies the autoregressive property. The test creates a random input for a MADE network and then perturbs it one a time, making sure that output k changes if and only if input with a smaller degrees have changed. """ # Generate a random permutation if requested. if degrees_in == 'random': degrees_in = generate_random_degrees_in(dimensions[0], dimensions[3]) check_MADE_autoregressive_property(1, degrees_in, *dimensions, weight_norm=weight_norm) @pytest.mark.parametrize('weight_norm', [False, True]) def test_MADE_autoregressive_property_blocks(weight_norm, blocked_dimensions): """Test that MADE with blocks satisfies the autoregressive property. The test creates a random input for a MADE network and then perturbs it one a time, making sure that output k changes if and only if input with a smaller degrees have changed. """ check_MADE_autoregressive_property(*blocked_dimensions, weight_norm=weight_norm) def check_MADE_autoregressive_property(blocks, degrees_in, dimension_in, dimensions_hidden, out_per_dimension, dimension_conditioning, _, out_dimension, weight_norm): """Used by test_MADE_autoregressive_property and test_MADE_autoregressive_property_blocks.""" made = MADE( dimension_in=dimension_in, dimensions_hidden=dimensions_hidden, out_per_dimension=out_per_dimension, dimension_conditioning=dimension_conditioning, degrees_in=degrees_in, blocks=blocks, shorten_last_block=True ) # Create a random input and make it go through the net. x = np.random.randn(1, dimension_in) input = torch.tensor(x, dtype=torch.float, requires_grad=True) output = made.forward(input) assert output.shape == (1, out_dimension) # Make sure that there are no duplicate degrees in the input/output. assert len(set(made.degrees_in)) == len(made.blocks) + int(dimension_conditioning > 0) for out_idx in range(out_dimension // out_per_dimension): # Compute the gradient of the out_idx-th dimension of the # output with respect to the gradient vector. loss = torch.sum(output[0, out_idx:out_dimension:out_dimension//out_per_dimension]) loss.backward(retain_graph=True) # In all cases, the conditioning features should affect the whole output. grad = input.grad[0] assert torch.all(grad[:dimension_conditioning] != 0.0) # Now consider the non-conditioning features only. grad = grad[dimension_conditioning:] degrees = made.degrees_in[dimension_conditioning:] # For the autoregressive property to hold, the k-th output should # have non-zero gradient only for the inputs with a smaller degree. degree_out = degrees[out_idx] for in_idx in range(len(degrees)): if degrees[in_idx] < degree_out: assert grad[in_idx] != 0 else: assert grad[in_idx] == 0 # Reset gradients for next iteration. made.zero_grad() input.grad.data.zero_()
{ "alphanum_fraction": 0.6148862655, "author": null, "avg_line_length": 38.3756906077, "converted": null, "ext": "py", "file": null, "hexsha": "a679c7eeefb87a97f8de3005b3a20fa7eae56ee5", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-07-22T00:53:56.000Z", "max_forks_repo_forks_event_min_datetime": "2021-07-22T00:53:56.000Z", "max_forks_repo_head_hexsha": "9a9aff61286be3111c4e70136620d0e3aac31318", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "andrrizzi/tfep-revisited-2021", "max_forks_repo_path": "scripts/modules/nets/tests/test_modules_autoregressive.py", "max_issues_count": 2, "max_issues_repo_head_hexsha": "9a9aff61286be3111c4e70136620d0e3aac31318", "max_issues_repo_issues_event_max_datetime": "2021-09-14T08:51:55.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-24T07:54:55.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "andrrizzi/tfep-revisited-2021", "max_issues_repo_path": "scripts/modules/nets/tests/test_modules_autoregressive.py", "max_line_length": 113, "max_stars_count": 7, "max_stars_repo_head_hexsha": "9a9aff61286be3111c4e70136620d0e3aac31318", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "andrrizzi/tfep-revisited-2021", "max_stars_repo_path": "scripts/modules/nets/tests/test_modules_autoregressive.py", "max_stars_repo_stars_event_max_datetime": "2022-03-11T07:29:36.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-22T00:53:37.000Z", "num_tokens": 3383, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 13892 }
struct LinearInterpolant{T,N} y::Array{T,N} Δx::Float64 end function (f::LinearInterpolant)(a::Float64) Δx = f.Δx s = a/(Δx); n₁ = floor(Int64,s); n₂ = ceil(Int64,s); Δn = (s - n₁) (1-Δn)*f.y[n₁+1] + Δn*f.y[n₂+1] end function qnms(;l=0,m=0,n=0,s=0,amax=0.99, ϵ = 0.01) as, ωs, Almss, Cllss, P, ϵ = GetModes(l,m,n,s;amax = amax,ϵ = ϵ) ωfunc = LinearInterpolant(ωs,ϵ) Afunc = LinearInterpolant(Almss,ϵ) Cllss = LinearInterpolant(Cllss,ϵ) ωfunc, Afunc, Cllss end function qnm(;l=0,m=0,n=0,s=0,a=0.0, ϵ = 0.01) _, ωs, Almss, Cllss, P, _ = GetModes(l,m,n,s;amax = a,ϵ = ϵ) #ωs[end], Almss[end], Cllss[:,end], P Alms, Cll = ComputeAₗₘ(P.s,P.m,P.a*P.ω, P.Alm,P.lmax) ωs[end], Alms, Cll, P end
{ "alphanum_fraction": 0.5736842105, "author": null, "avg_line_length": 24.5161290323, "converted": null, "ext": "jl", "file": null, "hexsha": "2cfaeb9539a3da73c89337b6bcff4f47d7ea4cd4", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f60d19bb3df8ce10a920d6cd720304ea61af9663", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Potatoasad/QuasinormalModes.jl", "max_forks_repo_path": "src/QnmRotationSeries.jl", "max_issues_count": 1, "max_issues_repo_head_hexsha": "f60d19bb3df8ce10a920d6cd720304ea61af9663", "max_issues_repo_issues_event_max_datetime": "2021-09-24T06:02:36.000Z", "max_issues_repo_issues_event_min_datetime": "2021-09-24T06:02:36.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Potatoasad/QuasinormalModes.jl", "max_issues_repo_path": "src/QnmRotationSeries.jl", "max_line_length": 68, "max_stars_count": 1, "max_stars_repo_head_hexsha": "f60d19bb3df8ce10a920d6cd720304ea61af9663", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Potatoasad/QuasinormalModes.jl", "max_stars_repo_path": "src/QnmRotationSeries.jl", "max_stars_repo_stars_event_max_datetime": "2021-05-22T07:39:08.000Z", "max_stars_repo_stars_event_min_datetime": "2021-05-22T07:39:08.000Z", "num_tokens": 375, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 760 }
# -*- coding: utf-8 -*- """ Created on Wed Apr 5 16:00:11 2017 @author: lracuna """ import numpy as np import cv2 import glob # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*7,3), np.float32) objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. images = glob.glob('*.png') for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (7,6),None) # If found, add object points, image points (after refining them) if ret == True: objpoints.append(objp) corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) imgpoints.append(corners2) # Draw and display the corners img = cv2.drawChessboardCorners(img, (7,6), corners2,ret) cv2.imshow('img',img) cv2.waitKey(50) break cv2.destroyAllWindows() #%% ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) img = cv2.imread('left-0000.png') h, w = img.shape[:2] newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h)) #%% # undistort dst = cv2.undistort(img, mtx, dist, None, newcameramtx) # crop the image x,y,w,h = roi dst = dst[y:y+h, x:x+w] cv2.imwrite('calibresult.png',dst) #%% mean_error = 0 tot_error = 0 for i in xrange(len(objpoints)): imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2) tot_error += error print "total error: ", tot_error/len(objpoints)
{ "alphanum_fraction": 0.6647307286, "author": null, "avg_line_length": 27.0571428571, "converted": null, "ext": "py", "file": null, "hexsha": "27ea807fbfda4660f366a3144595ea320011170e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-08-07T03:16:47.000Z", "max_forks_repo_forks_event_min_datetime": "2019-08-07T03:16:47.000Z", "max_forks_repo_head_hexsha": "95dc017ef2aec32173e73dc397ba00177d4f92ce", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Oilgrim/ivs_sim", "max_forks_repo_path": "python/helpful_scripts/cv_calibration.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "95dc017ef2aec32173e73dc397ba00177d4f92ce", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Oilgrim/ivs_sim", "max_issues_repo_path": "python/helpful_scripts/cv_calibration.py", "max_line_length": 100, "max_stars_count": null, "max_stars_repo_head_hexsha": "95dc017ef2aec32173e73dc397ba00177d4f92ce", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Oilgrim/ivs_sim", "max_stars_repo_path": "python/helpful_scripts/cv_calibration.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 615, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1894 }
str="Data Science" ;str str1='Data Science';str1 str2="Teacher guide's";str2 str3='Teacher guide"s';str3 str4="Data Science using R" str5='Data Science using python' paste("Hello","World",sep="$") paste(str1,str2,str3,str4,str5,sep=" ") paste(c("something","go's","wrong"),"in LPU",sep="+",collapse="#") format(78.5632892165,digits=5,nsmall=4) format(c(1,-14.63,78.5632892165),scientific=T) format(c(1,-14.63,78.5632892165),nsmall=5) format(c(1,-14.63,78.5632892165),width=10) format(6,width=12,justify="l") format(6) format("Hello",width=8,justify="r")
{ "alphanum_fraction": 0.6846689895, "author": null, "avg_line_length": 30.2105263158, "converted": null, "ext": "r", "file": null, "hexsha": "c402f03ea7fcda1755a9e85ae6c96e88ad9efb75", "include": null, "lang": "R", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "365664003655964b25bf84aee2e1c706efdff83f", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "hackwithabhishek/R-Programming-Knowledge", "max_forks_repo_path": "Codes/string_format.r", "max_issues_count": null, "max_issues_repo_head_hexsha": "365664003655964b25bf84aee2e1c706efdff83f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "hackwithabhishek/R-Programming-Knowledge", "max_issues_repo_path": "Codes/string_format.r", "max_line_length": 67, "max_stars_count": null, "max_stars_repo_head_hexsha": "365664003655964b25bf84aee2e1c706efdff83f", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "hackwithabhishek/R-Programming-Knowledge", "max_stars_repo_path": "Codes/string_format.r", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 201, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 574 }
from collections import OrderedDict from tempfile import TemporaryDirectory from typing import ( Tuple, Union, List, Iterable, Dict, Any, Type, Callable, Optional, Sequence, ) import numpy as np import torch from nebullvm import optimize_torch_model from nebullvm.api.frontend.utils import ifnone, QUANTIZATION_METRIC_MAP from nebullvm.base import DataType, ModelCompiler from nebullvm.inference_learners.base import ( PytorchBaseInferenceLearner, InferenceLearnerWrapper, LearnerMetadata, ) from nebullvm.optimizers.extra import HuggingFaceOptimizer try: from transformers import PreTrainedModel, PretrainedConfig from transformers.tokenization_utils import PreTrainedTokenizer except ImportError: # add placeholders for function definition PreTrainedModel = None PreTrainedTokenizer = None def _flatten_outputs( outputs: Union[torch.Tensor, Iterable] ) -> List[torch.Tensor]: new_outputs = [] for output in outputs: if isinstance(output, torch.Tensor): new_outputs.append(output) else: flatten_list = _flatten_outputs(output) new_outputs.extend(flatten_list) return new_outputs class _TransformerWrapper(torch.nn.Module): """Class for wrappering the Transformers and give them an API compatible with nebullvm. The class takes and input of the forward method positional arguments and transform them in the input dictionaries needed by transformers classes. At the end it also flattens their output. """ def __init__( self, core_model: torch.nn.Module, encoded_input: Dict[str, torch.Tensor], ): super().__init__() self.core_model = core_model self.inputs_types = OrderedDict() for key, value in encoded_input.items(): self.inputs_types[key] = value.dtype def forward(self, *args: torch.Tensor): inputs = { key: value for key, value in zip(self.inputs_types.keys(), args) } outputs = self.core_model(**inputs) return tuple(_flatten_outputs(outputs.values())) def _get_size_recursively( tensor_tuple: Union[torch.Tensor, Tuple] ) -> List[int]: if isinstance(tensor_tuple[0], torch.Tensor): return [len(tensor_tuple)] else: inner_size = _get_size_recursively(tensor_tuple[0]) return [len(tensor_tuple), *inner_size] def _get_output_structure( text: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, tokenizer_args: Dict, ) -> Tuple[OrderedDict, Type]: """Function needed for saving in a dictionary the output structure of the transformers model. """ encoded_input = tokenizer([text], **tokenizer_args) output = model(**encoded_input) structure = OrderedDict() for key, value in output.items(): if isinstance(value, torch.Tensor): structure[key] = None else: size = _get_size_recursively(value) structure[key] = size return structure, type(output) def _restructure_output( output: Tuple[torch.Tensor], structure: OrderedDict, output_type: Any = None, ): """Restructure the flatter output using the structure dictionary given as input. """ output_dict = {} idx = 0 for key, value in structure.items(): if value is None: output_dict[key] = output[idx] idx += 1 else: output_dict[key] = ( np.array( output[idx : int(np.prod(value)) + idx], # noqa E203 dtype=object, ) .reshape(value) .tolist() ) idx += np.prod(value) if output_type is not None: return output_type(**output_dict) return output_dict class HuggingFaceInferenceLearner(InferenceLearnerWrapper): """Class wrapping an InferenceLearner model and giving to it the huggingface interface. The class fuse both the InterfaceLearner and HuggingFace interfaces, giving to the final user a model which can be used whit the prefered API without the need of adapting the previous code. Attributes: network_parameters (ModelParams): Model parameters of the model. core_inference_learner (PytorchBaseInferenceLearner): Inference learner built using the Pytorch interface. output_structure (Dict): Original output structure of the HuggingFace model. input_names (List[str]): List of all the input keys used for the original HuggingFace model. output_type (Any, optional): Original output type of the HuggingFace model. """ def __init__( self, core_inference_learner: PytorchBaseInferenceLearner, output_structure: OrderedDict, input_names: List[str], output_type: Any = None, ): super().__init__(core_inference_learner) self.output_structure = output_structure self.input_names = input_names self.output_type = output_type def _save_wrapper_extra_info(self): pass @staticmethod def _load_wrapper_extra_info(builder_inputs: Dict) -> Dict: return builder_inputs def run(self, *args, **kwargs) -> Any: """Run the underlying optimized model for getting a prediction. The method has an hybrid interface. It accepts inputs either as positional or keyword arguments. If only positional arguments are given the method expects the inputs to be in the canonical nebullvm interface. If only keyword arguments are given the method expects them to be in the HuggingFace interface. Mixed representation is not allowed and will result in an error. """ if len(args) > 0 and len(kwargs) > 0: raise RuntimeError( "Not allowed usage of the predict method. " "Either the positional or the keyword arguments must be given." ) if len(args) > 0: return self.core_inference_learner(*args) inputs = (kwargs.pop(name) for name in self.input_names) outputs = self.core_inference_learner(*inputs) return _restructure_output( outputs, self.output_structure, self.output_type ) def _get_extra_metadata_kwargs(self) -> Dict: metadata_kwargs = { "output_structure": self.output_structure, "output_structure_keys": list(self.output_structure.keys()), "input_names": self.input_names, } if self.output_type is not None: metadata_kwargs.update( { "output_type": self.output_type.__name__, "output_type_module": self.output_type.__module__, } ) return metadata_kwargs @staticmethod def _convert_metadata_to_inputs(metadata: LearnerMetadata) -> Dict: # we need to guarantee the preservation of the output structure # elements order. output_structure = OrderedDict() for key in metadata["output_structure_keys"]: output_structure[key] = metadata["output_structure"][key] inputs = { "output_structure": output_structure, "input_names": metadata["input_names"], } if metadata["output_type"] is not None: exec( f"from {metadata['output_type_module']} " f"import {metadata['output_type']}" ) inputs["output_type"] = eval(metadata["output_type"]) return inputs def _get_dynamic_axis( text: str, tokenizer: PreTrainedTokenizer, model: PreTrainedModel, tokenizer_args: Dict, ) -> Dict[str, List[Dict[int, str]]]: input_1 = tokenizer([text], **tokenizer_args) input_2 = tokenizer([text + text], **tokenizer_args) input_dicts = [] for key in input_1.keys(): input_dict = {} for idx, (i, j) in enumerate( zip(input_1[key].shape, input_2[key].shape) ): if i != j: input_dict[idx] = f"val_{i}_{j}" input_dicts.append(input_dict) output_dicts = [] outputs_1 = _flatten_outputs(model(**input_1).values()) outputs_2 = _flatten_outputs(model(**input_2).values()) for o1, o2 in zip(outputs_1, outputs_2): output_dict = {} for idx, (i, j) in enumerate(zip(o1.shape, o2.shape)): if i != j: output_dict[idx] = f"val_{i}_{j}" output_dicts.append(output_dict) return {"inputs": input_dicts, "outputs": output_dicts} def _extract_input_type(input_value: torch.Tensor): if input_value.dtype is torch.float: return DataType.FLOAT elif input_value.dtype is torch.long: return DataType.INT else: raise NotImplementedError( f"Unsupported data format {input_value.dtype}." ) def _try_extraction(model_config: PretrainedConfig, keys: List[str]): for key in keys: if hasattr(model_config, key): return getattr(model_config, key) return def _get_extra_optimizer( model_config: PretrainedConfig, ) -> List[HuggingFaceOptimizer]: config_name = model_config.__class__.__name__.lower() for key in HuggingFaceOptimizer.get_accepted_types(): if key in config_name: input_dict = {"model_type": key, "opt_level": 2} hidden_dim = _try_extraction( model_config, ["n_embd", "d_model", "hidden_size"] ) if hidden_dim is not None: input_dict["hidden_size"] = hidden_dim n_heads = _try_extraction( model_config, ["n_head", "num_attention_heads", "encoder_attention_heads"], ) if n_heads is not None: input_dict["num_heads"] = n_heads return [HuggingFaceOptimizer(hugging_face_params=input_dict)] return [HuggingFaceOptimizer(hugging_face_params={})] class _HFDataset(Sequence): def __init__( self, input_texts: List, ys: Optional[List], keywords: List[str], batch_size: int, tokenizer: PreTrainedTokenizer, tokenizer_args: Dict, ): self._input_texts = input_texts self._ys = ys self._bs = batch_size self._keys = keywords self._tokenizer = tokenizer if self._tokenizer.pad_token is None: self._tokenizer.pad_token = self._tokenizer.eos_token _tokenizer_args = {"truncation": True, "padding": True} _tokenizer_args.update(tokenizer_args) self._tokenizer_args = _tokenizer_args def __getitem__(self, item: int): pointer = self._bs * item if pointer >= len(self): raise IndexError mini_batch = self._input_texts[ pointer : pointer + self._bs # noqa E203 ] if self._ys is not None: mini_batch_y = self._ys[pointer : pointer + self._bs] # noqa E203 else: mini_batch_y = None encoded_inputs = self._tokenizer(mini_batch, **self._tokenizer_args) return tuple(encoded_inputs[key] for key in self._keys), mini_batch_y def __len__(self): return len(self._input_texts) def optimize_huggingface_model( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, input_texts: List[str], batch_size: int, max_input_sizes: List[Tuple[int, ...]], save_dir: str, extra_input_info: List[Dict] = None, use_static_shape: bool = False, use_torch_api: bool = False, tokenizer_args: Dict = None, ignore_compilers: List[str] = None, perf_loss_ths: float = None, perf_metric: Union[str, Callable] = None, ys: List = None, ): """Optimize the HuggingFace model. This function saves the output model as well in a nebuly-readable format in order to avoid temporary-files corruptions which would prevent the model saving later in the process. Note that TensorRT compiler is currently disabled for Hugginface models since in some cases it can cause an untreatable error in the C++ code causing the interruption of the optimization. Args: model (PreTrainedModel): HuggingFace transformers model. tokenizer (PreTrainedTokenizer): Tokenizer used for building model's inputs. input_texts (List[str]): Texts either from the training set or similar to the ones contained in the text. If the perf_loss_ths is passed the input_text will be used for computing the drop in precision and for setting the quantization parameters. If you selected a quantization metric needing the input labels you need to provide them for each input in the `ys` argument. batch_size (int): Batch size needed for the model. max_input_sizes (List[Tuple[int]]): List containing the maximum size of all the input tensors of the model. Note that even just a single tensor is needed as model input, this field must be a list containing (in the exposed case) a single element). The tuple must contain the maximum value for all the input tensor dimensions excluding the batch size. This means that the final input tensor size will be considered as `(batch_size, *input_tensor_size)`, where `input_tensor_size` is one list element of `max_input_sizes`. save_dir (str): Path to the directory where saving the final model. extra_input_info (List[Dict], optional): List of extra information needed for defining the input tensors, e.g. max_value and min_value the tensors can get. use_static_shape (bool): Parameter for fixing the accepted input shape. use_torch_api (bool): Parameter for using the torch api of compilers when available. The actual implementation supports only the torch interface for TVM. Note that when running the torch interface nebullvm will ignore the ONNX one once the torch implementation succeeds. Clearly, in case of failure of the torch API, a second tentative will be done with the ONNX interface. tokenizer_args (Dict, optional): Extra args needed for the tokenizer. ignore_compilers (List[str], optional): List of DL compilers we want to ignore while running the optimization. Compiler name should be one between "tvm", "tensor RT", "openvino" and "onnxruntime". perf_loss_ths (float, optional): Tolerated relative error for performing approximation techniques before compiling the model. If no value is given, no optimization will be performed. Note that it will not be used for compilers using the torch API when `use_torch_api` is `True`. Just dynamic quantization will be performed, since no data is given as input. perf_metric (Union[Callable, str], optional): The metric to be used for accepting or refusing a precision-reduction optimization proposal. If none is given but a `perf_loss_ths` is received, the `nebullvm.measure.compute_relative_difference` metric will be used as default one. A user-defined metric can be passed as function accepting as inputs two tuples of tensors (produced by the baseline and the quantized model) and the related original labels. For more information see `nebullvm.measure.compute_relative_difference` and `nebullvm.measure.compute_accuracy_drop`. `perf_metric` accepts as value also a string containing the metric name. At the current stage the supported metrics are `"precision"` and `"accuracy"`. ys: List of target labels. For each input in `input_texts` there should be the corresponding label. Note that this feature is just used for estimating the accuracy drop while running precision-reduction techniques. It will be ignored if these techniques are not activated. """ if perf_loss_ths is not None and ys is None and perf_metric == "accuracy": raise ValueError( "You cannot select the accuracy as quantization metric without " "providing valid labels!" ) if isinstance(perf_metric, str): perf_metric = QUANTIZATION_METRIC_MAP.get(perf_metric) tokenizer_args = tokenizer_args or {} tokenizer_args.update({"return_tensors": "pt"}) output_structure, output_type = _get_output_structure( text=input_texts[0], model=model, tokenizer=tokenizer, tokenizer_args=tokenizer_args, ) input_example = tokenizer(input_texts[0], **tokenizer_args) input_types = [_extract_input_type(v) for v in input_example.values()] or [ "int" ] * len(input_example) # The wrapper model is needed for adapt the huggingface transformers API # to the one adopted by the nebullvm optimization. wrapper_model = _TransformerWrapper( core_model=model, encoded_input=input_example ) with TemporaryDirectory() as tmp_dir: optimized_model = optimize_torch_model( wrapper_model, batch_size=batch_size, input_sizes=max_input_sizes, save_dir=tmp_dir, input_types=input_types, extra_input_info=extra_input_info, use_torch_api=use_torch_api, dynamic_axis=_get_dynamic_axis( text=input_texts[0], tokenizer=tokenizer, model=model, tokenizer_args=tokenizer_args, ) if not use_static_shape else None, perf_loss_ths=perf_loss_ths, perf_metric=perf_metric, dataloader=_HFDataset( input_texts, ys, list(wrapper_model.inputs_types.keys()), batch_size, tokenizer, tokenizer_args, ), ignore_compilers=list( set( ( [ModelCompiler.TENSOR_RT.value] if use_static_shape else [ ModelCompiler.TENSOR_RT.value, ModelCompiler.APACHE_TVM.value, ] ) + ifnone(ignore_compilers, []) ) ), custom_optimizers=_get_extra_optimizer(model.config), ) final_model = HuggingFaceInferenceLearner( core_inference_learner=optimized_model, output_structure=output_structure, input_names=list(wrapper_model.inputs_types.keys()), output_type=output_type, ) final_model.save(save_dir) return final_model.load(save_dir)
{ "alphanum_fraction": 0.6392632291, "author": null, "avg_line_length": 38.0574257426, "converted": null, "ext": "py", "file": null, "hexsha": "f12581dc5bdaf922df320cb722ab7f5e9cd4e087", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 36, "max_forks_repo_forks_event_max_datetime": "2022-03-24T12:20:00.000Z", "max_forks_repo_forks_event_min_datetime": "2022-02-21T15:02:59.000Z", "max_forks_repo_head_hexsha": "5cbee58d63c0942b85cbf863bcc55fd92104756a", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "emilecourthoud/nebullvm", "max_forks_repo_path": "nebullvm/api/frontend/huggingface.py", "max_issues_count": 21, "max_issues_repo_head_hexsha": "5cbee58d63c0942b85cbf863bcc55fd92104756a", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:08:14.000Z", "max_issues_repo_issues_event_min_datetime": "2022-02-22T14:31:07.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "emilecourthoud/nebullvm", "max_issues_repo_path": "nebullvm/api/frontend/huggingface.py", "max_line_length": 79, "max_stars_count": 821, "max_stars_repo_head_hexsha": "5cbee58d63c0942b85cbf863bcc55fd92104756a", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "emilecourthoud/nebullvm", "max_stars_repo_path": "nebullvm/api/frontend/huggingface.py", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:09:21.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-21T13:21:24.000Z", "num_tokens": 4029, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 19219 }
C$PRAGMA SUN OPT=2 subroutine tabgen c $Id$ c c ***** computes and tabulates f0(x) to f5(x) ***** c ***** in range x = -0.24 to x = 26.4 ***** c ***** in units of x = 0.08 ***** c ***** the two electron integral sp routines ***** c ***** the table is generated only once for each entry ***** c implicit none double precision c, ppp(350) common/tabint/ c(1000,6) double precision pt184, pt5, six, tenm7, four, two, done, pt886 integer mm, l, i, m, j, notrms double precision q, qqq, a, term, ptlsum, b, temp1, temp2 double precision approx, fimult, fiprop data pt184,pt5/ 0.184d0,0.50d0/ data six,tenm7/6.0d0,1.0d-20 / data four,two,done/4.0d0,2.0d0,1.0d0/ data pt886/0.8862269254527d0/ c q=-done do 30 mm=1,6 m=mm-1 q=q+done qqq = -0.24d0 do 20 i=1,340 qqq = qqq+0.08d0 a=q c ***** change limit of approximate solution. ***** if(qqq-15.0d0) 1,1,10 1 a=a+pt5 term=done/a ptlsum=term do 2 l=2,50 a=a+done term=term*qqq/a ptlsum=ptlsum+term if( dabs(term/ptlsum)-tenm7)3,2,2 2 continue 3 ppp(i)=pt5*ptlsum* dexp(-qqq) go to 20 10 b=a+pt5 a=a-pt5 approx=pt886/(dsqrt(qqq)*qqq**m) if(m.eq.0) go to 13 do 12 l=1,m b=b-done 12 approx=approx*b 13 fimult=pt5* dexp(-qqq)/qqq fiprop=fimult/approx term=done ptlsum=term notrms=qqq notrms=notrms+m do 14 l=2,notrms term=term*a/qqq ptlsum=ptlsum+term if( dabs(term*fiprop/ptlsum)-tenm7)15,15,14 14 a=a-done 15 ppp(i)=approx-fimult*ptlsum 20 continue do 30 i=1,333 j=i+2 c(i,mm)=ppp(j) c(i+333,mm)=ppp(j+1)-ppp(j) temp1=-two*ppp(j)+ppp(j+1)+ppp(j-1) temp2=six*ppp(j)-four*ppp(j+1)-four*ppp(j-1)+ppp(j-2)+ppp(j+2) 30 c(i+666,mm) = (temp1-pt184*temp2)/six return end
{ "alphanum_fraction": 0.5271580989, "author": null, "avg_line_length": 28.2465753425, "converted": null, "ext": "f", "file": null, "hexsha": "94fc027a80e7b6b10cb813e6bffff07f9e1a23d0", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 135, "max_forks_repo_forks_event_max_datetime": "2022-03-31T02:28:49.000Z", "max_forks_repo_forks_event_min_datetime": "2017-11-19T18:36:44.000Z", "max_forks_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_forks_repo_licenses": [ "ECL-2.0" ], "max_forks_repo_name": "dinisAbranches/nwchem", "max_forks_repo_path": "src/NWints/ints_sp/tabgen.f", "max_issues_count": 356, "max_issues_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_issues_repo_issues_event_max_datetime": "2022-03-31T02:28:21.000Z", "max_issues_repo_issues_event_min_datetime": "2017-12-05T01:38:12.000Z", "max_issues_repo_licenses": [ "ECL-2.0" ], "max_issues_repo_name": "dinisAbranches/nwchem", "max_issues_repo_path": "src/NWints/ints_sp/tabgen.f", "max_line_length": 69, "max_stars_count": 317, "max_stars_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_stars_repo_licenses": [ "ECL-2.0" ], "max_stars_repo_name": "dinisAbranches/nwchem", "max_stars_repo_path": "src/NWints/ints_sp/tabgen.f", "max_stars_repo_stars_event_max_datetime": "2022-03-28T11:48:24.000Z", "max_stars_repo_stars_event_min_datetime": "2017-11-20T21:29:11.000Z", "num_tokens": 836, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2062 }
# Copyright 2020 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from dimod import BinaryQuadraticModel as BQM import networkx as nx import dwave.embedding from dwave.system import DWaveSampler, EmbeddingComposite # Create Q matrix Q = defaultdict(int) Q[(1,1)] = -91 Q[(1,2)] = 72 Q[(1,3)] = 72 Q[(2,2)] = -87 Q[(2,3)] = 72 Q[(3,3)] = -89 print("\nQUBO:\n") for i in range(1,4): row = '' for j in range(1,4): if (i,j) in Q: row += str(Q[(i,j)])+'\t' else: row += str(0) + '\t' print(row) qubo_model = BQM.from_qubo(Q) ising_model = qubo_model.to_ising() # Pause for the user to hint <enter> to continue input() print("\nConverting QUBO to Ising ...") print("\nIsing:\n") for i in range(1,4): row = '' for j in range(1,4): if j<i: row += str(0) + '\t' elif j==i: row += str(ising_model[0][i]) + '\t' else: row += str(ising_model[1][(j,i)]) + '\t' print(row) input() print("\nEmbedding logical problem into physical layout ...") # Construct logical problem graph prob_graph = nx.Graph() prob_graph.add_edges_from([(1,2),(2,3),(1,3)]) # Construct an embedding embedding = {1:[1], 2:[2], 3:[3,4]} # Map our Ising model onto the embedding qubits = list(i for x in embedding.values() for i in x) target = nx.cycle_graph(qubits) th, tJ = dwave.embedding.embed_ising(ising_model[0], ising_model[1], embedding, target) print("\nQMI (unscaled):\n") for i in range(1,5): row = '' for j in range(1,5): if j==i: row += str(th[i]) + '\t' elif (i,j) in tJ: row += str(tJ[(i,j)]) + '\t' else: row += str(0) + '\t' print(row) # J range is -1, +1 max_j = max(list(map(abs, tJ.values()))) # h range is -2, +2 max_h = max(list(map(abs, th.values()))) / 2 # Find our scale factor scale_factor = max(max_j, max_h) input() print("\nScaling physical problem by", scale_factor, "...") print("\nQMI (scaled):\n") # Scale QMI for i in range(1,5): row = '' for j in range(1,5): if j==i: th[i] = th[i]/scale_factor row += str(round(th[i], 2)) + '\t' elif (i,j) in tJ: tJ[(i,j)] = tJ[(i,j)]/scale_factor row += str(round(tJ[(i,j)], 2)) + '\t' else: row += str(0) + '\t' print(row) input() print("\nSending problem to QPU...") sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True})) # Use EmbeddingComposite to work around any missing qubits sampleset = sampler.sample_ising(th, tJ, num_reads=10, label='Training - QUBO Lifecycle') print("\nBest QMI solution found:\n") best_QMI_solution = sampleset.first.sample print(best_QMI_solution) input() print("\nConverting QMI solution to Ising ...") best_Ising_solution = dict(best_QMI_solution) del best_Ising_solution[4] # Resolve a potential chain break print("\nBest Ising solution found:\n") print(best_Ising_solution) input() print("\nConverting Ising solution to QUBO ...") best_QUBO_solution = dict(best_Ising_solution) for key, val in best_QUBO_solution.items(): if val == -1: best_QUBO_solution[key] = 0 print("\nBest QUBO solution found:\n") print(best_QUBO_solution)
{ "alphanum_fraction": 0.6283302559, "author": null, "avg_line_length": 25.7891156463, "converted": null, "ext": "py", "file": null, "hexsha": "d23e858d234635e53e88fe8e93559c0a3484a719", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-06-11T03:58:31.000Z", "max_forks_repo_forks_event_min_datetime": "2020-12-28T16:49:14.000Z", "max_forks_repo_head_hexsha": "3aaaf2afb3214a3d70e2730378da44e2d0efc070", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dwave-training/QUBO-lifecycle", "max_forks_repo_path": "qubo-lifecycle.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "3aaaf2afb3214a3d70e2730378da44e2d0efc070", "max_issues_repo_issues_event_max_datetime": "2020-03-11T07:49:10.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-11T07:49:10.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dwave-training/QUBO-lifecycle", "max_issues_repo_path": "qubo-lifecycle.py", "max_line_length": 123, "max_stars_count": null, "max_stars_repo_head_hexsha": "3aaaf2afb3214a3d70e2730378da44e2d0efc070", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dwave-training/QUBO-lifecycle", "max_stars_repo_path": "qubo-lifecycle.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1122, "path": null, "reason": "import networkx", "repo": null, "save_path": null, "sha": null, "size": 3791 }
# =========================================== # # mian Heatmap Library # @author: tbj128 # # =========================================== # # Imports # from scipy.stats import stats from mian.model.otu_table import OTUTable import numpy as np from mian.analysis.alpha_diversity import AlphaDiversity class Heatmap(object): def run(self, user_request): table = OTUTable(user_request.user_id, user_request.pid) base, headers, sample_labels = table.get_table_after_filtering_and_aggregation_and_low_count_exclusion(user_request) metadata = table.get_sample_metadata() phylogenetic_tree = table.get_phylogenetic_tree() return self.analyse(user_request, base, headers, sample_labels, metadata, phylogenetic_tree) def get_numeric_metadata_table(self, metadata, metadata_headers): metadata = np.array(metadata) metadata_headers = np.array(metadata_headers) cols_to_keep = [] j = 0 while j < len(metadata_headers): all_are_numeric = True i = 0 while i < len(metadata): if not metadata[i][j].isnumeric(): all_are_numeric = False i += 1 if all_are_numeric: cols_to_keep.append(j) j += 1 new_metadata = metadata[:, cols_to_keep] new_metadata_headers = metadata_headers[cols_to_keep] return new_metadata, new_metadata_headers def analyse(self, user_request, base, headers, sample_labels, metadata, phylogenetic_tree): corrvar1 = user_request.get_custom_attr("corrvar1") corrvar2 = user_request.get_custom_attr("corrvar2") corrMethod = user_request.get_custom_attr("corrMethod") cluster = user_request.get_custom_attr("cluster") minSamplesPresent = int(user_request.get_custom_attr("minSamplesPresent")) metadata_otu_order, metadata_headers, _ = metadata.get_as_table_in_table_order(sample_labels) numeric_metadata, numeric_metadata_headers = self.get_numeric_metadata_table(metadata_otu_order, metadata_headers) numeric_metadata = numeric_metadata.astype(float) alpha = AlphaDiversity() corrvar1Base = [] corrvar1Headers = [] if corrvar1 == "Taxonomy": corrvar1Base = base.toarray() corrvar1Headers = headers elif corrvar1 == "Metadata": corrvar1Base = numeric_metadata corrvar1Headers = numeric_metadata_headers.tolist() elif corrvar1 == "Alpha": alpha_params = user_request.get_custom_attr("corrvar1Alpha") if int(user_request.level) == -1: # OTU tables are returned as a CSR matrix base = base.toarray() alpha_vals = alpha.calculate_alpha_diversity(base, sample_labels, headers, phylogenetic_tree, alpha_params[1], alpha_params[0]) corrvar1Base = [] i = 0 while i < len(alpha_vals): corrvar1Base.append([alpha_vals[i]]) i += 1 corrvar1Headers = ["Alpha Diversity"] corrvar2Base = [] corrvar2Headers = [] if corrvar2 == "Taxonomy": corrvar2Base = base.toarray() corrvar2Headers = headers elif corrvar2 == "Metadata": corrvar2Base = numeric_metadata corrvar2Headers = numeric_metadata_headers.tolist() elif corrvar2 == "Alpha": alpha_params = user_request.get_custom_attr("corrvar2Alpha") if int(user_request.level) == -1: # OTU tables are returned as a CSR matrix base = base.toarray() alpha_vals = alpha.calculate_alpha_diversity(base, sample_labels, headers, phylogenetic_tree, alpha_params[1], alpha_params[0]) corrvar2Base = [] i = 0 while i < len(alpha_vals): corrvar2Base.append([alpha_vals[i]]) i += 1 corrvar2Headers = ["Alpha Diversity"] if corrvar1 != corrvar2: X = np.array(corrvar1Base) non_zero = np.count_nonzero(X, axis=0) X = X[:, non_zero >= minSamplesPresent] headers = np.array(corrvar1Headers) headers = headers[non_zero >= minSamplesPresent] Y = np.array(corrvar2Base) non_zero = np.count_nonzero(Y, axis=0) Y = Y[:, non_zero >= minSamplesPresent] y_headers = np.array(corrvar2Headers) y_headers = y_headers[non_zero >= minSamplesPresent] X = np.concatenate((X, Y), axis=1) if corrMethod == "spearman": correlations, _ = stats.spearmanr(X) elif corrMethod == "pearson": correlations = np.corrcoef(X, rowvar=False) else: raise NotImplementedError("Correlation method not implemented") correlations = correlations[len(headers):len(headers) + len(y_headers), 0:len(headers)] row_headers = y_headers.tolist() col_headers = headers.tolist() else: X = np.array(corrvar1Base) non_zero = np.count_nonzero(X, axis=0) X = X[:, non_zero >= minSamplesPresent] if corrMethod == "spearman": correlations, _ = stats.spearmanr(X) elif corrMethod == "pearson": correlations = np.corrcoef(X, rowvar=False) else: raise NotImplementedError("Correlation method not implemented") row_headers = headers col_headers = headers if cluster == "Yes": # Perform some simple clustering by ordering by the col sums col_sums = np.sum(correlations, axis=0).tolist() col_sums = sorted(range(len(col_sums)), key=col_sums.__getitem__, reverse=True) if corrvar1 != corrvar2: row_sums = np.sum(correlations, axis=1).tolist() row_sums = sorted(range(len(row_sums)), key=row_sums.__getitem__, reverse=True) else: row_sums = col_sums correlations = correlations[:, col_sums] correlations = correlations[row_sums, :] row_headers = np.array(row_headers) row_headers = row_headers[row_sums].tolist() col_headers = np.array(col_headers) col_headers = col_headers[col_sums].tolist() correlations_list = [] if corrvar1 == corrvar2: correlations = correlations.tolist() i = 0 while i < len(correlations): row = [] j = i while j < len(correlations[i]): row.append(round(correlations[i][j], 2)) j += 1 correlations_list.append(row) i += 1 else: correlations_list = correlations.tolist() abundances_obj = { "row_headers": row_headers, "col_headers": col_headers, "data": correlations_list, } return abundances_obj
{ "alphanum_fraction": 0.5802758621, "author": null, "avg_line_length": 39.8351648352, "converted": null, "ext": "py", "file": null, "hexsha": "ec5bad93306e12afb4ba34a29afded81189e5f27", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2d1487fe8bf55a30ba983694ab5edaac39ae3e22", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tbj128/mian", "max_forks_repo_path": "mian/analysis/heatmap.py", "max_issues_count": 8, "max_issues_repo_head_hexsha": "2d1487fe8bf55a30ba983694ab5edaac39ae3e22", "max_issues_repo_issues_event_max_datetime": "2020-12-12T06:35:09.000Z", "max_issues_repo_issues_event_min_datetime": "2019-04-03T05:37:44.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tbj128/mian", "max_issues_repo_path": "mian/analysis/heatmap.py", "max_line_length": 124, "max_stars_count": 1, "max_stars_repo_head_hexsha": "2d1487fe8bf55a30ba983694ab5edaac39ae3e22", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tbj128/mian", "max_stars_repo_path": "mian/analysis/heatmap.py", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:06:39.000Z", "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:06:39.000Z", "num_tokens": 1526, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 7250 }
# This file conists of the functions which are used in SLMC/Restricted-SLMC training. import numpy as np import numpy.random as rnd from sklearn import linear_model from Configuration import Configuration from Hamiltonian import first_NN_interaction, second_NN_interaction, third_NN_interaction from LocalUpdate import LocalUpdate from SelfLearningUpdate import SelfLearningUpdate from RestrictedSelfLearningUpdate import RestrictedSelfLearningUpdate def Make_Samples_Local(size, J, K, T, Nsamples, Nsteps): """Generate samples from Local Update Method. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; Nsteps: steps taken in MC simulation to reach equilibrium; Output: A list of samples, each sample has the form: [1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum, energy] """ #initiate sample list samples = [] for n in range(Nsamples): spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for i in range(Nsteps): for k in range(size * size): update = LocalUpdate(spins, J, K, T) spins = update.local_update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1) % 100 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def Make_tSamples_Local(size, J, K, T, Nsamples, warmup, interval): """Generate samples based on local update method, all the samples are taken from one Markov chain. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; Output: A list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] """ #initiate sample list samples = [] spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for n in range(warmup): for k in range(size**2): update = LocalUpdate(spins, J, K, T) spins = update.local_update() for n in range(Nsamples): for i in range(interval): for k in range(size**2): update = LocalUpdate(spins, J, K, T) spins = update.local_update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1)%10 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def Make_Samples_SelfLearning(size, J, K, T, Nsamples, Nsteps, eff_param): """Generate samples based on self learning update Method. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; Nstep: steps taken in MC simulation to reach equilibrium; eff_param: parameters of effective Hamiltonian. Output: A list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] """ #initiate sample list samples = [] for n in range(Nsamples): spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for i in range(Nsteps): update = SelfLearningUpdate(spins, J, K, T, eff_param) spins = update.SLMC_Update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1) % 100 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def Make_tSamples_SelfLearning(size, J, K, T, Nsamples, warmup, interval, eff_param): """Generate samples based on self learning update method, all the samples are taken from one Markov chain. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: parameters of effective Hamiltonian. Output: A list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] """ #initiate sample list samples = [] spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for n in range(warmup): update = SelfLearningUpdate(spins, J, K, T, eff_param) spins = update.SLMC_Update() for n in range(Nsamples): for i in range(interval): update = SelfLearningUpdate(spins, J, K, T, eff_param) spins = update.SLMC_Update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1)%100 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def Make_Samples_Restricted_SelfLearning(size, J, K, T, Nsamples, Nsteps, eff_param, restriction): """Generate samples from restricted self learning update Method. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; Nstep: steps taken in MC simulation to reach equilibrium; eff_param: parameters of effective Hamiltonian; restriction: restricted size of the cluster. Output: A list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] """ #initiate sample list samples = [] for n in range(Nsamples): spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for i in range(Nsteps): update = RestrictedSelfLearningUpdate(spins, J, K, T, eff_param, restriction) spins = update.Restricted_SLMC_Update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1) % 100 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def Make_tSamples_Restricted_SelfLearning(size, J, K, T, Nsamples, warmup, interval, eff_param, restriction): """Generate samples based on restricted self learning update method, all the samples are taken from one Markov chain. Input parameters: size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: parameters of effective Hamiltonian. restriction: restricted size of the cluster. Output: A list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] """ #initiate sample list samples = [] spins = rnd.choice([-1, 1], size=(size, size)) # either +1 or -1 for n in range(warmup): update = RestrictedSelfLearningUpdate(spins, J, K, T, eff_param, restriction) spins = update.Restricted_SLMC_Update() for n in range(Nsamples): for i in range(interval): update = RestrictedSelfLearningUpdate(spins, J, K, T, eff_param, restriction) spins = update.Restricted_SLMC_Update() config = Configuration(spins, size, J, K, T) C1 = first_NN_interaction(spins) C2 = second_NN_interaction(spins) C3 = third_NN_interaction(spins) Energy = config.energy samples.append([Energy, C1, C2, C3]) # print info because progress can be slow if (n+1)%100 == 0: print("Sample %.0f: %.2f percent done."%(n+1, 100*(n+1)/Nsamples)) return samples def train_eff_Hamil(samples, n): """Train effective Hamiltonian from the samples. Input parameters: samples: a list of samples, each sample has the form: [energy, 1st NN interaction sum, 2nd NN interaction sum, 3rd NN interaction sum] n: the order of interactions that is considered in H_eff. Output: eff_param: parameters of effective Hamiltonian, a list with the first term being E0 and next terms being J coefficients """ eff_param = [] samples = np.array(samples) energy = samples[:, 0:1] interaction = samples[:,1:n+1] #use linear model to get E0 and Js reg = linear_model.LinearRegression() reg.fit(interaction, energy) coef = reg.coef_ eff_param = np.append(reg.intercept_, -coef) return eff_param def self_optimization(Iter, size, J, K, T, Nsamples, warmup, interval, eff_param): """ To complete the self optimization procedure in the self-learning Monte Carlo. Input: Iter: the number of iteration steps to optimize effective Hamiltonian; size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: effective Hamiltonian obtained from local update monte carlo at T > Tc. Output: optimized parameters of effective Hamiltonian. """ # the order of interactions in effective Hamiltonian n = len(eff_param) - 1 for k in range(Iter): # for every iteration, # create Nsamples samples with the eff_param obtained from the last step samples = Make_tSamples_SelfLearning(size, J, K, T, Nsamples, warmup, interval, eff_param) # use the samples to train new eff_param eff_param = train_eff_Hamil(samples, n) print('Iteration %.0f, %.2f percent done.'%(k+1, 100*(k+1)/Iter)) print('eff_param is:', eff_param) return eff_param def Temp_descend_opt(Temp, size, J, K, Nsamples, warmup, interval, eff_param): """ To complete the self optimization procedure in the self-learning Monte Carlo. Input: Temp: a list of temperature, namely the way how temperature is descending. size: size of the lattice; J, K: parameters of the Hamiltonian; Nsamples: the number of the samples trained at each temperature; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: effective Hamiltonian obtained from local update monte carlo at T > Tc. Return: optimized parameters of effective Hamiltonian at the last temperature. Output: intermediate temp and corresponding eff_param. """ # the order of interactions in effective Hamiltonian n = len(eff_param) - 1 for i in range(len(Temp)): samples = Make_tSamples_SelfLearning(size, J, K, Temp[i], Nsamples, warmup, interval, eff_param) eff_param = train_eff_Hamil(samples, n) print('Temp %.3f, %.2f percent done.'%(Temp[i], 100*(i+1)/len(Temp))) print('eff_param is:', eff_param) return eff_param def restricted_self_optimization(Iter, size, J, K, T, Nsamples, warmup, interval, eff_param, restriction): """ To complete the self optimization procedure in the restricted self-learning Monte Carlo. Input: Iter: the number of iteration steps to optimize effective Hamiltonian; size: size of the lattice; J, K: parameters of the Hamiltonian; T: temperature; Nsamples: the number of the samples; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: effective Hamiltonian obtained from local update monte carlo at T > Tc; restriction: restricted size of the cluster. Output: optimized parameters of effective Hamiltonian. """ # the order of interactions in effective Hamiltonian n = len(eff_param) - 1 for k in range(Iter): # for every iteration, # create Nsamples samples with the eff_param obtained from the last step samples = Make_tSamples_Restricted_SelfLearning(size, J, K, T, Nsamples, warmup, interval, eff_param, restriction) # use the samples to train new eff_param eff_param = train_eff_Hamil(samples, n) print('Iteration %.0f, %.2f percent done.'%(k+1, 100*(k+1)/Iter)) print('eff_param is', eff_param) return eff_param def Restricted_Temp_descend_opt(Temp, size, J, K, Nsamples, warmup, interval, eff_param, restriction): """ To complete the self optimization procedure in the self-learning Monte Carlo. Input: Temp: a list of temperature, namely the way how temperature is descending. size: size of the lattice; J, K: parameters of the Hamiltonian; Nsamples: the number of the samples trained at each temperature; warmup: steps taken in MC simulation to reach equilibrium; interval: steps between two cuts of samples; eff_param: effective Hamiltonian obtained from local update monte carlo at T > Tc; restriction: restricted size of the cluster. Return: optimized parameters of effective Hamiltonian at the last temperature. Output: intermediate temp and corresponding eff_param. """ # the order of interactions in effective Hamiltonian n = len(eff_param) - 1 for i in range(len(Temp)): samples = Make_tSamples_Restricted_SelfLearning(size, J, K, Temp[i], Nsamples, warmup, interval, eff_param, restriction) eff_param = train_eff_Hamil(samples, n) print('Temp %.3f, %.2f percent done.'%(Temp[i], 100*(i+1)/len(Temp))) print('eff_param is:', eff_param) return eff_param
{ "alphanum_fraction": 0.6694571969, "author": null, "avg_line_length": 42.32, "converted": null, "ext": "py", "file": null, "hexsha": "46701c3fa0f21cfc7f180e36d59567caef34ad47", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c9715ff5fa1e70800ed810556d5d82651d3bf370", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JaySchon/PHY571-Project", "max_forks_repo_path": "Codes/SLMC_Training_Lib.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "c9715ff5fa1e70800ed810556d5d82651d3bf370", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JaySchon/PHY571-Project", "max_issues_repo_path": "Codes/SLMC_Training_Lib.py", "max_line_length": 138, "max_stars_count": null, "max_stars_repo_head_hexsha": "c9715ff5fa1e70800ed810556d5d82651d3bf370", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JaySchon/PHY571-Project", "max_stars_repo_path": "Codes/SLMC_Training_Lib.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3729, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 14812 }
ctp 7/27/02 reverse the arguments in arg c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_QG(massin,NSANG) implicit none integer n,n1,n2,n3,n4,n5,ndim real*8 theta_s3 real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG2_EXT,ANG2 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG2(n4,n5,n1,n2,n3)=ANG2_EXT(n1,n2,arg(1,n4,n3),arg(1,n5,n3)) c n=0 fixes the mandelstam variables to u7j/t1j,u6j/u1j n = 0 ndim = 2 theta_s3 = 0.D0 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c normalization NSANG(0,0, 0, 0) = ANG2(0,0, 0, 0,1) c tp[1] -> (A) NSANG(1,0,-1, 0) = 0.D0 NSANG(1,0,-2, 0) = ANG2(1,0,-2, 0,1) c tp[1], up[2] -> (A) NSANG(1,2,-1,-1) = ANG2(1,2,-1,-1,1) c tp[1], s5[5] -> (A) NSANG(1,5,-1,-1) = ANG2(1,5,-1,-1,1) NSANG(1,5,-1,-2) = ANG2(1,5,-1,-2,1) NSANG(1,5,-2,-1) = ANG2(1,5,-2,-1,1) NSANG(1,5,-2,-2) = ANG2(1,5,-2,-2,1) c tp[1], u7[7] -> (A) NSANG(1,7,-1, 1) = ANG2(1,7,-1, 1,1) c up[2] NSANG(2,0,-1, 0) = 0.D0 c up[2], s5[5] -> (B) NSANG(2,5,-1,-1) = ANG2(2,5,-1,-1,2) NSANG(2,5,-1,-2) = ANG2(2,5,-1,-2,2) c up[2], u6[6] -> (B) NSANG(2,6,-1,-1) = ANG2(2,6,-1,-1,2) NSANG(2,6,-1,-2) = ANG2(2,6,-1,-2,2) c s5[5] -> (C) NSANG(5,0,-1, 0) = ANG2(5,0,-1, 0,3) NSANG(5,0,-2, 0) = ANG2(5,0,-2, 0,3) c s5[5], u6[6] -> (C) NSANG(5,6,-1,-1) = ANG2(5,6,-1,-1,3) c s5[5], u6[7] -> (C) NSANG(5,7,-1, 1) = ANG2(5,7,-1, 1,3) NSANG(5,7,-2, 1) = ANG2(5,7,-2, 1,3) c u6[6] -> (A) NSANG(6,0,-1, 0) = ANG2(6,0,-1, 0,1) NSANG(6,0,-2, 0) = ANG2(6,0,-2, 0,1) c u6[6], u[7] -> (A) NSANG(6,7,-1, 1) = ANG2(6,7,-1, 1,1) return end c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_GG(massin,NSANG) implicit none integer n,n1,n2,n3,n4,n5,ndim real*8 theta_s3 real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG2_EXT,ANG2 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG2(n4,n5,n1,n2,n3)=ANG2_EXT(n1,n2,arg(1,n4,n3),arg(1,n5,n3)) c n should not matter n = 0 ndim = 2 theta_s3 = 0.D0 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c tp[1] NSANG(1,0,-1, 0) = 0.D0 c tp[1], up[2] -> (A) NSANG(1,2,-1,-1) = ANG2(1,2,-1,-1,1) c tp[1], s5[5] -> (A) NSANG(1,5,-1,-1) = ANG2(1,5,-1,-1,1) NSANG(1,5,-1,-2) = ANG2(1,5,-1,-2,1) c tp[1], u7[7] -> (A) NSANG(1,7,-1,-1) = ANG2(1,7,-1,-1,1) NSANG(1,7,-1,-2) = ANG2(1,7,-1,-2,1) c tp[1], s3[8] -> (A) NSANG(1,8,-1,-1) = ANG2(1,8,-1,-1,1) c up[2] NSANG(2,0,-1, 0) = 0.D0 c up[2], s5[5] -> (B) NSANG(2,5,-1,-1) = ANG2(2,5,-1,-1,2) NSANG(2,5,-1,-2) = ANG2(2,5,-1,-2,2) c up[2], u6[6] -> (B) NSANG(2,6,-1,-1) = ANG2(2,6,-1,-1,2) NSANG(2,6,-1,-2) = ANG2(2,6,-1,-2,2) c up[2], s3[8] -> (B) NSANG(2,8,-1,-1) = ANG2(2,8,-1,-1,2) c s5[5] -> (C) NSANG(5,0,-1, 0) = ANG2(5,0,-1, 0,3) NSANG(5,0,-2, 0) = ANG2(5,0,-2, 0,3) c s5[5], u6[6] -> (C) NSANG(5,6,-1,-1) = ANG2(5,6,-1,-1,3) NSANG(5,6,-1, 1) = ANG2(5,6,-1, 1,3) NSANG(5,6,-2, 1) = ANG2(5,6,-2, 1,3) NSANG(5,6,-2, 2) = ANG2(5,6,-2, 2,3) c s5[5], u6[6] -> u6[6], s5[5] -> (A) NSANG(5,6,-1, 2) = ANG2(6,5, 2,-1,1) c s5[5], u7[7] -> (C) NSANG(5,7,-1,-1) = ANG2(5,7,-1,-1,3) c u6[6] -> (A) NSANG(6,0,-1, 0) = ANG2(6,0,-1, 0,1) c u6[6], u[7] -> (A) NSANG(6,7,-1,-1) = ANG2(6,7,-1,-1,1) c u6[6], s3[8] -> s3[8], u6[6] -> (C) NSANG(6,8,-1,-1) = ANG2(8,6,-1,-1,3) NSANG(6,8,-2,-1) = ANG2(8,6,-1,-2,3) NSANG(6,8, 1,-1) = ANG2(8,6,-1, 1,3) c u6[6], s3[8] -> (A) NSANG(6,8, 2,-1) = ANG2(6,8, 2,-1,1) c u7[7] -> (B) NSANG(7,0,-1, 0) = ANG2(7,0,-1, 0,2) c u7[7], s3[8] -> (B) NSANG(7,8,-1,-1) = ANG2(7,8,-1,-1,2) NSANG(7,8,-2,-1) = ANG2(7,8,-2,-1,2) NSANG(7,8, 1,-1) = ANG2(7,8, 1,-1,2) c s3[8] -> (C) NSANG(8,0,-1, 0) = ANG2(8,0,-1, 0,3) return end c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_GGOS(massin,theta_s3,NSANG) implicit none integer n,n1,n2,n3,n4,ndim real*8 theta_s3,s3,s3s,ms,m2,gams real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG1_EXT,ANG1 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG1(n1,n2) =ANG1_EXT(n2,arg_x(n1),arg_y(n1)) c n should not matter n = 0 ndim = 1 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) s3 = massin(4) ms = massin(6) m2 = massin(7) gams = massin(25) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c the correct denominator for a numerical s3 integration s3s = s3 + m2**2 - ms**2 s3s = sign(1.D0,s3s) * sqrt( s3s**2 + ms**2*gams**2 ) c n.b. no fixed cms for ANG1 c u6[6], s3[8] NSANG(6,8,-2,-2) = ANG1(6,-2)/s3s**2 NSANG(6,8,-1,-2) = ANG1(6,-1)/s3s**2 c u7[7], s3[8] NSANG(7,8,-2,-2) = ANG1(7,-2)/s3s**2 NSANG(7,8,-1,-2) = ANG1(7,-1)/s3s**2 NSANG(7,8, 1,-2) = ANG1(7, 1)/s3s**2 NSANG(7,8, 2,-2) = ANG1(7, 2)/s3s**2 c s3[8] NSANG(8,0,-2, 0) = ANG1(0, 0)/s3s**2 return end c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_QQ(massin,NSANG) implicit none integer n,n1,n2,n3,n4,n5,ndim real*8 theta_s3 real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG2_EXT,ANG2 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG2(n4,n5,n1,n2,n3)=ANG2_EXT(n1,n2,arg(1,n4,n3),arg(1,n5,n3)) c n should not matter n = 0 ndim = 2 theta_s3 = 0.D0 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c tp[1] -> (A) NSANG(1,0,-1, 0) = 0.D0 NSANG(1,0,-2, 0) = ANG2(1,0,-2, 0,1) c tp[1], up[2] -> (A) NSANG(1,2,-1,-1) = ANG2(1,2,-1,-1,1) c tp[1], u7g[4] -> (A) NSANG(1,4,-1,-1) = ANG2(1,4,-1,-1,1) c tp[1], s5[5] -> (A) NSANG(1,5,-1,-1) = ANG2(1,5,-1,-1,1) NSANG(1,5,-1,-2) = ANG2(1,5,-1,-2,1) NSANG(1,5,-2,-1) = ANG2(1,5,-2,-1,1) NSANG(1,5,-2,-2) = ANG2(1,5,-2,-2,1) c up[2] -> (B) NSANG(2,0,-1, 0) = 0.D0 NSANG(2,0,-2, 0) = ANG2(2,0,-2, 0,2) c up[2], s5[5] -> (B) NSANG(2,5,-1,-1) = ANG2(2,5,-1,-1,2) NSANG(2,5,-1,-2) = ANG2(2,5,-1,-2,2) NSANG(2,5,-2,-1) = ANG2(2,5,-2,-1,2) NSANG(2,5,-2,-2) = ANG2(2,5,-2,-2,2) c s5[5] -> (C) NSANG(5,0,-1, 0) = ANG2(5,0,-1, 0,3) NSANG(5,0,-2, 0) = ANG2(5,0,-2, 0,3) return end c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_QB(massin,NSANG) implicit none integer n,n1,n2,n3,n4,n5,ndim real*8 theta_s3 real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG2_EXT,ANG2 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG2(n4,n5,n1,n2,n3)=ANG2_EXT(n1,n2,arg(1,n4,n3),arg(1,n5,n3)) c n should not matter n = 0 ndim = 2 theta_s3 = 0 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c normalization NSANG(0,0, 0, 0) = ANG2(0,0, 0, 0,1) c tp[1] -> (A) NSANG(1,0,-1, 0) = 0.D0 NSANG(1,0,-2, 0) = ANG2(1,0,-2, 0,1) c tp[1], s5[5] -> (A) NSANG(1,5,-1,-1) = ANG2(1,5,-1,-1,1) NSANG(1,5,-1,-2) = ANG2(1,5,-1,-2,1) NSANG(1,5,-2,-1) = ANG2(1,5,-2,-1,1) NSANG(1,5,-2,-2) = ANG2(1,5,-2,-2,1) c tp[1], s3[8] -> (A) NSANG(1,8,-1,-1) = ANG2(1,8,-1,-1,1) c s5[5] -> (C) NSANG(5,0,-1, 0) = ANG2(5,0,-1, 0,3) NSANG(5,0,-2, 0) = ANG2(5,0,-2, 0,3) c s5[5], u7[7] -> (C) NSANG(5,7,-1, 1) = ANG2(5,7,-1, 1,3) NSANG(5,7,-2, 2) = ANG2(5,7,-2, 2,3) NSANG(5,7,-2, 1) = ANG2(5,7,-2, 1,3) c s5[5], u7[7] -> u7[7], s5[5] -> (B) NSANG(5,7,-1, 2) = ANG2(7,5, 2,-1,2) c u7[7] -> (B) NSANG(7,0, 1, 0) = ANG2(7,0, 1, 0,2) c u7[7], s3[8] -> (B) NSANG(7,8, 1,-1) = ANG2(7,8, 1,-1,2) NSANG(7,8, 2,-1) = ANG2(7,8, 2,-1,2) c s3[8] -> (C) NSANG(8,0,-1, 0) = ANG2(8,0,-1, 0,3) return end c ---------------------------------------------------------------------- subroutine ANGULAR_ARRAY_LE_QBOS(massin,theta_s3,NSANG) implicit none integer n,n1,n2,n3,n4,ndim real*8 theta_s3,s3,s3s,ms,m2,gams real*8 arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) real*8 NSANG(0:9,0:9,-2:2,-2:2),massin(1:30) real*8 ANG1_EXT,ANG1 logical ldebug parameter( ldebug=.false. ) c to make the conversion simple ANG1(n1,n2) =ANG1_EXT(n2,arg_x(n1),arg_y(n1)) c n should not matter n = 0 ndim = 1 call MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) s3 = massin(4) ms = massin(6) m2 = massin(7) gams = massin(25) if (ldebug) then do n1=0,9,1 do n2=0,9,1 do n3=-2,2,1 do n4=-2,2,1 NSANG(n1,n2,n3,n4) = 9.99999D+99 end do end do end do end do end if c the correct denominator for a numerical s3 integration s3s = s3 + m2**2 - ms**2 s3s = sign(1.D0,s3s) * sqrt( s3s**2 + ms**2*gams**2 ) c n.b. no fixed cms for ANG1 NSANG(0,0, 0, 0) = ANG1(0, 0) NSANG(7,8, 1,-2) = ANG1(7, 1)/s3s**2 NSANG(7,8, 2,-2) = ANG1(7, 2)/s3s**2 c s3[8] -> (C) NSANG(8,0,-1, 0) = ANG1(0, 0)/s3s NSANG(8,0,-2, 0) = ANG1(0, 0)/s3s**2 return end cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc c c c new? me wim definition c c c c for qg and gg channel [n=0] c c 1 tp = tp = (k2-k3)^2 c c 2 up = up = (k1-k3)^2 c c 3 s3 = s3j = (k3-p2)^2 - m2^2 c c * 4 t1j = u7j = (k1-p1)^2 - m1^2 c c 5 s5 = s5 = (p1+p2)^2 is cm connected to s3[3] c c 6 u1 = u6 = (k2-p1)^2 - m1^2 is cm connected to tp[1] c c 7 t1 = u7 = (k1-p1)^2 - m1^2 is cm connected to up[2] c c * 8 s3s = s3 = (k3-p2)^2 - m1^2 c c * 9 u1j = u6j = (k2-p1)^2 - m1^2 c c c c different for crossed channels [n=1] c c * 4 t1g = u7g = (k1-p1)^2 - mg^2 is never needed c c * 9 u1g = u6g = (k1-p1)^2 - mg^2 is never needed c cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc subroutine MAND_TO_ANG_LE(n,ndim,massin,arg,theta_s3,arg_x,arg_y) integer n,n1,n2,n3,ndim real*8 massin(1:30),arg(1:3,0:9,1:3),arg_x(0:9),arg_y(0:9) & ,theta_s3,s,t2,s4,m1,m2,mg,s3,s3s & ,gams & ,m12,m22,u2,s42,root & ,norm,w1,w2,w3,e1,e2,p,cpa,cpb,spa,spb,c1 & ,dum1,dum2 logical ldebug parameter( ldebug=.false. ) c initialize the output array do n1=1,3 do n2=0,9 do n3=1,3 arg(n1,n2,n3) = 0.D0 end do end do end do do n3=0,9 arg_x(n3) = 0.D0 arg_y(n3) = 0.D0 end do c define the different variables s = massin(1) t2 = massin(2) s4 = massin(3) m1 = massin(6) m2 = massin(7) mg = massin(10) gams = massin(25) c the two outgoing masses set to m1,m2 [see my thesis] m12 = m1**2 m22 = m2**2 u2 = s4 - s - t2 + m1**2 - m2**2 !tp u2 = u1 + m1**2 - m2**2 s42 = s4 + m1**2 - m2**2 c define in the three reference frames (A,B,C): c (1) and calculate the angular polynomial for tp,up,s3 root = (t2+u2)**2 - 4.D0*m22*s if (root.ge.0.D0) then root = sqrt(root) else print*," MAND_TO_ANG_LE: fixing problem, root= ",root root = 1.D-8 end if norm = s4+m12 if (norm.ge.0.D0) then norm = 2.D0*sqrt(norm) else print*, " MAND_TO_ANG_LE: serious problem with s4 ",s4 call HARD_STOP end if w1 = (s + u2) /norm w2 = (s + t2) /norm w3 = s4 /norm e1 = (s4 + 2.D0*m12) /norm e2 = -(t2 + u2 + 2.D0*m22) /norm p = root /norm c$$$ if (w1.lt.0.D0) then c$$$ print*, " MAND_TO_ANG_LE: w1 < 0 ",w1 c$$$ stop c$$$ end if c$$$ if (w2.lt.0.D0) then c$$$ print*, " MAND_TO_ANG_LE: w2 < 0 ",w2 c$$$ stop c$$$ end if c$$$ if (w3.lt.0.D0) then c$$$ print*, " MAND_TO_ANG_LE: w3 < 0 ",w3 c$$$ stop c$$$ end if c reference frames (A,B) ctp cpa =(t2*s42 -s*(u2+2.D0*m22))/(s+t2)/sqrt((t2+u2)**2-4.D0*m22*s) ctp cpb =(u2*s42 -s*(t2+2.D0*m22))/(s+u2)/sqrt((u2+t2)**2-4.D0*m22*s) cpa =(t2*s42 -s*(u2+2.D0*m22))/(s+t2)/root cpb =(u2*s42 -s*(t2+2.D0*m22))/(s+u2)/root spa = 1.D0 - cpa**2 if (spa.ge.0.D0) then spa = sqrt(spa) else print*," MAND_TO_ANG_LE: fixing problem, spa**2= ",spa spa = 1.D-8 end if spb = 1.D0 - cpb**2 if (spb.ge.0.D0) then spb = sqrt(spb) else print*," MAND_TO_ANG_LE: fixing problem, spb**2= ",spb spb = 1.D-8 end if c reference frame (A): k2||z, cm(k3,p1) c two terms : tp,u6,u6s,u6j,u6g,... c a=-b : tp c A^2=B^2+C^2 : up arg(1,1,1) = -2.D0*w2*w3 arg(2,1,1) = -arg(1,1,1) arg(3,1,1) = 0.D0 arg(1,2,1) = -2.D0*w1*w3 arg(2,2,1) = +2.D0*w3*p*cpa-2.D0*w2*w3 arg(3,2,1) = +2.D0*w3*p*spa arg(1,3,1) = +2.D0*w3*e2 arg(2,3,1) = -2.D0*w3*p*cpa arg(3,3,1) = -2.D0*w3*p*spa c reference frame (B): k1||z, cm(k3,p1) c two terms : up,u7,u7s,u7j,u7g,... c a=-b : up c A^2=B^2+C^2 : tp arg(1,1,2) = -2.D0*w2*w3 arg(2,1,2) = +2.D0*w3*p*cpb-2.D0*w1*w3 arg(3,1,2) = +2.D0*w3*p*spb arg(1,2,2) = -2.D0*w1*w3 arg(2,2,2) = -arg(1,2,2) arg(3,2,2) = 0.D0 arg(1,3,2) = +2.D0*w3*e2 arg(2,3,2) = -2.D0*w3*p*cpb arg(3,3,2) = -2.D0*w3*p*spb c reference frame (C): p2||z, cm(k3,p1) c two terms : s3,s5,s3s c a=-b : - c A^2=B^2+C^2 : tp,up arg(1,1,3) = -2.D0*w2*w3 arg(2,1,3) = +2.D0*w2*w3*cpa arg(3,1,3) = +2.D0*w2*w3*spa arg(1,2,3) = -2.D0*w1*w3 arg(2,2,3) = -2.D0*w2*w3*cpa+2.D0*w3*p arg(3,2,3) = -2.D0*w2*w3*spa arg(1,3,3) = +2.D0*w3*e2 arg(2,3,3) = -2.D0*w3*p arg(3,3,3) = 0.D0 c calculate all the other angular polynomials do n1=1,3 c cm(k3,p1) system s3 -> s5 arg(1,5,n1) = +2.D0*e1*e2 + m12 + m22 arg(2,5,n1) = -arg(2,3,n1) arg(3,5,n1) = -arg(3,3,n1) c cm(k3,p1) system tp -> u6 arg(1,6,n1) = -2.D0*w2*e1 arg(2,6,n1) = -arg(2,1,n1) arg(3,6,n1) = -arg(3,1,n1) c cm(k3,p1) system up -> u7 arg(1,7,n1) = -2.D0*w1*e1 arg(2,7,n1) = -arg(2,2,n1) arg(3,7,n1) = -arg(3,2,n1) c derive u6 -> u6j,u6g if (n.eq.0) then arg(1,9,n1) = arg(1,6,n1) + m1**2 - m2**2 else if (n.eq.1) then arg(1,9,n1) = arg(1,6,n1) + m1**2 - mg**2 end if arg(2,9,n1) = arg(2,6,n1) arg(3,9,n1) = arg(3,6,n1) c derive u7 -> u7j,u7g if (n.eq.0) then arg(1,4,n1) = arg(1,7,n1) + m1**2 - m2**2 else if (n.eq.1) then arg(1,4,n1) = arg(1,7,n1) + m1**2 - mg**2 end if arg(2,4,n1) = arg(2,7,n1) arg(3,4,n1) = arg(3,7,n1) c derive s3 -> s3s, includes regularization ctp CHECK THIS IF STATEMENT!!! if ((n.eq.99).and.(theta_s3.eq.1.D0)) then arg(1,8,n1) = arg(1,3,n1) + m2**2 - m1**2 - gams**2 else arg(1,8,n1) = arg(1,3,n1) + m2**2 - m1**2 end if arg(2,8,n1) = arg(2,3,n1) arg(3,8,n1) = arg(3,3,n1) end do c special case of reference frame (C) c only for theta_s3=1 and asked for s3 subtraction c only for the intgerals including u6s,u7s if (ndim.eq.1) then s3 = massin(4) s3s = s3 + m2**2 - m1**2 s3s = sqrt( s3s**2 + m1**2*gams**2 ) c1 = ( 2.D0*w3*e2 - s3 )/(2.D0*p*w3) do n1=0,9 arg_x(n1) = arg(1,n1,3) + arg(2,n1,3)*c1 if (c1**2.lt.1.D0) then arg_y(n1) = arg(3,n1,3)*sqrt(1.D0-c1**2) else print*," MAND_TO_ANG_LE: fixing problem, c1= ",c1 arg_y(n1) = 1.D-8 end if end do end if c check of the structure in debug mode if ( (ndim.eq.2).and.(ldebug) ) then do n1=1,3,1 do n2=1,9,1 if (abs(arg(3,n2,n1)).le.1.D-16) then print*, " only a,b : ",n1,n2,arg(3,n2,n1) end if end do end do c special case a=-b do n1=1,3,1 do n2=1,9,1 dum1 = arg(1,n2,n1) + arg(2,n2,n1) if ((abs(arg(3,n2,n1)).le.1.D-16).and. & (abs(dum1).le.1.D-16)) then print*, " a = -b : ",n1,n2,dum1 end if end do end do c special case a=+b do n1=1,3,1 do n2=1,9,1 dum1 = arg(1,n2,n1) - arg(2,n2,n1) if ((abs(arg(3,n2,n1)).le.1.D-16).and. & (abs(dum1).le.1.D-16)) then print*, " a = +b : ",n1,n2,dum1 end if end do end do c special case A^2=B^2+C^2 c n.b. the dum2 cutoff has to be bigger than 1.e-12 do n1=1,3,1 do n2=1,9,1 dum1 = arg(1,n2,n1) + arg(2,n2,n1) dum2 = (arg(1,n2,n1)**2-arg(2,n2,n1)**2-arg(3,n2,n1)**2) dum2 = dum2/arg(1,n2,n1)**2 if ((abs(dum1).gt.1.D-16).and. & (abs(dum2).le.1.D-12)) then print*, "A^2=B^2+C^2 : ",n1,n2,dum2 end if end do end do end if return end
{ "alphanum_fraction": 0.4008169064, "author": null, "avg_line_length": 32.5021645022, "converted": null, "ext": "f", "file": null, "hexsha": "ebe91c040803cb54bb4e79a69634c387d8561592", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "845de4c15e273cb68bc3a70a3bac1255474631be", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "sliem/docker-hep", "max_forks_repo_path": "prospino/on_the_web_10_17_14/Pro2_subroutines/Xangular_array_le.f", "max_issues_count": null, "max_issues_repo_head_hexsha": "845de4c15e273cb68bc3a70a3bac1255474631be", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "sliem/docker-hep", "max_issues_repo_path": "prospino/on_the_web_10_17_14/Pro2_subroutines/Xangular_array_le.f", "max_line_length": 74, "max_stars_count": null, "max_stars_repo_head_hexsha": "845de4c15e273cb68bc3a70a3bac1255474631be", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "sliem/docker-hep", "max_stars_repo_path": "prospino/on_the_web_10_17_14/Pro2_subroutines/Xangular_array_le.f", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 9570, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 22524 }
from second_step_on_vertex_visit import second_step_on_vertex_visit import numpy as np import pandas as pd from initialize_graph import Vertex, build_graph, find_shortest_path from initialize_graph import Robot from collections import defaultdict import networkx as nx import matplotlib.pyplot as plt from first_step_on_vertex_visit import Id,what_to_do_if_next_node_known,first_step_on_arriving_at_vertex from get_incidence_matrix import get_incidence_matrix import pprint pp = pprint.PrettyPrinter(indent=8) ''' A standalone implementation of the algorithm on big graph 10 Leaf Nodes 24 Nodes ''' #Topography count = 0 #local flag to declare overall completion K = 10 #Number of Leaf nodes = Number of Robots J = 24 # Number of nodes vertex = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X"]# Vertex Node name list edges = ["AC","BC","CD","DE","EF","EK","FG","GH","HI","HJ","KL","KO","LM","LN","OP","PQ","PS","QR","ST","SU","UV","VW","VX"] # Edge name list robo_vertex = ["A","B","I","J","M","N","R","T","W","X"] # Spawn Locations of robots XData = [2.50, 3.50,3,3,3,1.50,1.50,1.50,1,2,3.50,3,2.50,3.50,4.50,4.50,3.50,3.50,5,4.50,5.50,5.50,5,6] #Robot Spawn X cordinates YData = [1,1,2,3,4,5,6,7,8,8,5,6,7,7,6,7,8,9,8,9,9,10,11,11] #Robot Spwn Y cordinates [graph,edges_decomp] = build_graph(edges) ''' Graph making for visualization ''' G = nx.Graph() for i in range(J): G.add_node(chr(i+65)) for ed in edges_decomp: print(*ed) G.add_edge(*ed) nx.draw(G,with_labels = True, font_weight = 'bold') plt.show # print(G.nodes) # print(G.edges) ''' Incidence Matrix Making ''' incidence_matrix = get_incidence_matrix(XData,YData,G) pp.pprint(incidence_matrix) ''' Initializations ''' #initializing of R : list of Robot objects R = [] for j in range(K): R.append(Robot(j,robo_vertex,incidence_matrix)) #initializing of V : list of Vertex objects V = [] for j in range(J): V.append(Vertex(vertex[j],edges,incidence_matrix))#asdf ''' CORE ALGORITHM : Each robot explores until its personal count flag turns ON. And the loop runs till the overall count flag turns ON. ''' ''' As the robots are placed on leaf nodes thus the vertex will have only one neighbour at that vertex Thus all the robots are given an initial puch to there immediate neighbors ''' print("The first mandatory push:") print('') for k in range(K): start = R[k].present_location end = V[ord(R[k].present_location) - 65].neighbors[0] top = np.array([-1*incidence_matrix[ord(start) - 65,ord(end)-65]]) bottom = np.array([-1*incidence_matrix[ord(end) - 65,ord(start)-65]]) col_vector = np.vstack((top,bottom)) R[k].path.append('Base Station') # hovering over the launchpad R[k].path.append(R[k].present_location) # leaf node name designated R[k].path.append(V[ord(R[k].present_location) - 65].neighbors[0]) print("The {e} robot is currently at {f}".format(e=k,f=R[k].present_location)) print("The next node chosen is {}".format(V[ord(R[k].present_location) - 65].neighbors[0])) id,R,V = what_to_do_if_next_node_known(R,k,V,1,R[k].present_location,V[ord(R[k].present_location) - 65].neighbors[0],incidence_matrix=incidence_matrix) R[k].next_edge_decided,count = second_step_on_vertex_visit(graph, V,R,k,count) print('The next edge selected by - ' + str(k) + '- robot is' + str(R[k].next_edge_decided)) print('') print("This is the loop part which continues till the declaration of completion") while(count != K): for z in range(K): if R[z].count != 1: print("{z} robot Travelling to the selected edge :{e}".format(z = z , e = R[z].next_edge_decided) ) R[z].path.append(R[z].next_edge_decided.replace(R[z].present_location,'')) if R[z].next_edge_decided !=0: id,R,V = what_to_do_if_next_node_known(R,z,V,2,R[z].present_location, R[z].next_edge_decided.replace(R[z].present_location,''),incidence_matrix = incidence_matrix) R[z].next_edge_decided,count = second_step_on_vertex_visit(graph, V,R,z,count) if R[z].next_edge_decided != 0: print('The next edge selected by : ' + str(z) + ' : robot is :' + str(R[z].next_edge_decided)) print('') ''' To get an overall view of the setpoints decided for each robot ''' for k in range(K): R[k].path.append('Base Station') print("Setpoint list of : " +str(k) + " robot is : " ) print((R[k].path))
{ "alphanum_fraction": 0.6719964468, "author": null, "avg_line_length": 39.8495575221, "converted": null, "ext": "py", "file": null, "hexsha": "900a8edbe308980f7ae27c4734f41fcdf6f1c53b", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f6e19f0593760061c561e627c6868ed9f352323e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Ayush8120/MR-DFS", "max_forks_repo_path": "Python Implementation/Python Implementation_General/main.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "f6e19f0593760061c561e627c6868ed9f352323e", "max_issues_repo_issues_event_max_datetime": "2021-09-01T06:30:26.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-31T13:49:52.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Ayush8120/MR-DFS", "max_issues_repo_path": "Python Implementation/Python Implementation_General/main.py", "max_line_length": 178, "max_stars_count": null, "max_stars_repo_head_hexsha": "f6e19f0593760061c561e627c6868ed9f352323e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Ayush8120/MR-DFS", "max_stars_repo_path": "Python Implementation/Python Implementation_General/main.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1316, "path": null, "reason": "import numpy,import networkx", "repo": null, "save_path": null, "sha": null, "size": 4503 }
/* * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include "bgp/routing-instance/service_chaining.h" #include <boost/foreach.hpp> #include <algorithm> #include "base/task_annotations.h" #include "base/task_trigger.h" #include "bgp/bgp_config.h" #include "bgp/bgp_log.h" #include "bgp/bgp_peer_membership.h" #include "bgp/extended-community/load_balance.h" #include "bgp/extended-community/site_of_origin.h" #include "bgp/inet6vpn/inet6vpn_route.h" #include "bgp/l3vpn/inetvpn_route.h" #include "bgp/origin-vn/origin_vn.h" #include "bgp/routing-instance/routing_instance.h" #include "bgp/routing-instance/service_chaining_types.h" #include "net/community_type.h" using boost::bind; using boost::system::error_code; using std::make_pair; using std::sort; using std::string; using std::vector; template<> int ServiceChainMgr<ServiceChainInet>::service_chain_task_id_ = -1; template<> int ServiceChainMgr<ServiceChainInet6>::service_chain_task_id_ = -1; static int GetOriginVnIndex(const BgpTable *table, const BgpRoute *route) { const BgpPath *path = route->BestPath(); if (!path) return 0; const BgpAttr *attr = path->GetAttr(); const ExtCommunity *ext_community = attr->ext_community(); if (ext_community) { BOOST_FOREACH(const ExtCommunity::ExtCommunityValue &comm, ext_community->communities()) { if (!ExtCommunity::is_origin_vn(comm)) continue; OriginVn origin_vn(comm); return origin_vn.vn_index(); } } if (path->IsVrfOriginated()) return table->routing_instance()->virtual_network_index(); return 0; } template <typename T> ServiceChain<T>::ServiceChain(ServiceChainMgrT *manager, RoutingInstance *src, RoutingInstance *dest, RoutingInstance *connected, const vector<string> &subnets, AddressT addr) : manager_(manager), src_(src), dest_(dest), connected_(connected), connected_route_(NULL), service_chain_addr_(addr), connected_table_unregistered_(false), dest_table_unregistered_(false), aggregate_(false), src_table_delete_ref_(this, src_table()->deleter()) { for (vector<string>::const_iterator it = subnets.begin(); it != subnets.end(); ++it) { error_code ec; PrefixT ipam_subnet = PrefixT::FromString(*it, &ec); if (ec != 0) continue; prefix_to_routelist_map_[ipam_subnet] = RouteList(); } } template <typename T> BgpTable *ServiceChain<T>::src_table() const { return src_->GetTable(GetFamily()); } template <typename T> BgpTable *ServiceChain<T>::connected_table() const { return connected_->GetTable(GetFamily()); } template <typename T> BgpTable *ServiceChain<T>::dest_table() const { return dest_->GetTable(GetFamily()); } // // Compare this ServiceChain against the ServiceChainConfig. // Return true if the configuration has not changed, false otherwise. // template <typename T> bool ServiceChain<T>::CompareServiceChainConfig( const ServiceChainConfig &config) { if (dest_->name() != config.routing_instance) return false; if (connected_->name() != config.source_routing_instance) return false; if (service_chain_addr_.to_string() != config.service_chain_address) return false; if (prefix_to_routelist_map_.size() != config.prefix.size()) return false; for (vector<string>::const_iterator it = config.prefix.begin(); it != config.prefix.end(); ++it) { error_code ec; PrefixT ipam_subnet = PrefixT::FromString(*it, &ec); if (prefix_to_routelist_map_.find(ipam_subnet) == prefix_to_routelist_map_.end()) { return false; } } return true; } // // Match function called from BgpConditionListener // Concurrency : db::DBTable // For the purpose of route aggregation, two condition needs to be matched // 1. More specific route present in any of the Dest BgpTable partition // 2. Connected route(for nexthop) present in Src BgpTable // template <typename T> bool ServiceChain<T>::Match(BgpServer *server, BgpTable *table, BgpRoute *route, bool deleted) { CHECK_CONCURRENCY("db::DBTable"); typename ServiceChainRequestT::RequestType type; PrefixT aggregate_match; if (table == dest_table() && !dest_table_unregistered()) { if (IsConnectedRoute(route)) return false; if (aggregate_enable() && IsAggregate(route)) return false; if (aggregate_enable() && IsMoreSpecific(route, &aggregate_match)) { // More specific if (deleted) { type = ServiceChainRequestT::MORE_SPECIFIC_DELETE; } else { type = ServiceChainRequestT::MORE_SPECIFIC_ADD_CHG; } } else { // External connecting routes if (!deleted) { if (!route->BestPath() || !route->BestPath()->IsFeasible()) { deleted = true; } else { const BgpAttr *attr = route->BestPath()->GetAttr(); const Community *comm = attr ? attr->community() : NULL; if (comm) { if ((comm->ContainsValue(CommunityType::NoAdvertise)) || (comm->ContainsValue(CommunityType::NoReOriginate))) deleted = true; } int vn_index = GetOriginVnIndex(table, route); int src_vn_index = src_->virtual_network_index(); int dest_vn_index = dest_->virtual_network_index(); if (!vn_index || dest_vn_index != vn_index) { if (src_vn_index == vn_index) deleted = true; if (!dest_->virtual_network_allow_transit()) deleted = true; if (!dest_vn_index) deleted = true; } OriginVn src_origin_vn( server->autonomous_system(), src_vn_index); const OriginVnPath *ovnpath = attr ? attr->origin_vn_path() : NULL; if (ovnpath && ovnpath->Contains(src_origin_vn.GetExtCommunity())) { deleted = true; } } } if (deleted) { type = ServiceChainRequestT::EXT_CONNECT_ROUTE_DELETE; } else { type = ServiceChainRequestT::EXT_CONNECT_ROUTE_ADD_CHG; } } } else if ((table == connected_table()) && !connected_table_unregistered() && IsConnectedRoute(route)) { if (!deleted) { if (!route->IsValid() || route->BestPath()->GetSource() != BgpPath::BGP_XMPP) { deleted = true; } } // Connected route for service chain if (deleted) { type = ServiceChainRequestT::CONNECTED_ROUTE_DELETE; } else { type = ServiceChainRequestT::CONNECTED_ROUTE_ADD_CHG; } } else { return false; } BgpConditionListener *listener = server->condition_listener(GetFamily()); ServiceChainState *state = static_cast<ServiceChainState *>( listener->GetMatchState(table, route, this)); if (!deleted) { // MatchState is added to the Route to ensure that DBEntry is not // deleted before the ServiceChain module processes the WorkQueue // request. if (!state) { state = new ServiceChainState(ServiceChainPtr(this)); listener->SetMatchState(table, route, this, state); } } else { // MatchState is set on all the Routes that matches the conditions // Retrieve to check and ignore delete of unseen Add Match if (state == NULL) { // Not seen ADD ignore DELETE return false; } } // The MatchState reference is taken to ensure that the route is not // deleted when request is still present in the queue // This is to handle the case where MatchState already exists and // deleted entry gets reused or reused entry gets deleted. state->IncrementRefCnt(); // Post the Match result to ServiceChain task to take Action // More_Specific_Present + Connected_Route_exists ==> Add Aggregate Route // and stitch the nexthop from connected route ServiceChainRequestT *req = new ServiceChainRequestT( type, table, route, aggregate_match, ServiceChainPtr(this)); manager_->Enqueue(req); return true; } template <typename T> string ServiceChain<T>::ToString() const { return (string("ServiceChain " ) + service_chain_addr_.to_string()); } template <typename T> void ServiceChain<T>::SetConnectedRoute(BgpRoute *connected) { connected_route_ = connected; connected_path_ids_.clear(); if (!connected_route_) return; for (Route::PathList::iterator it = connected->GetPathList().begin(); it != connected->GetPathList().end(); ++it) { BgpPath *path = static_cast<BgpPath *>(it.operator->()); // Infeasible paths are not considered. if (!path->IsFeasible()) break; // Bail if it's not ECMP with the best path. if (connected_route_->BestPath()->PathCompare(*path, true)) break; // Use nexthop attribute of connected path as path id. uint32_t path_id = path->GetAttr()->nexthop().to_v4().to_ulong(); connected_path_ids_.insert(path_id); } } template <typename T> bool ServiceChain<T>::IsConnectedRouteValid() const { return (connected_route_ && connected_route_->IsValid()); } template <typename T> bool ServiceChain<T>::IsMoreSpecific(BgpRoute *route, PrefixT *aggregate_match) const { const RouteT *ip_route = static_cast<RouteT *>(route); const PrefixT &ip_prefix = ip_route->GetPrefix(); for (typename PrefixToRouteListMap::const_iterator it = prefix_to_route_list_map()->begin(); it != prefix_to_route_list_map()->end(); ++it) { if (ip_prefix.IsMoreSpecific(it->first)) { *aggregate_match = it->first; return true; } } return false; } template <typename T> bool ServiceChain<T>::IsAggregate(BgpRoute *route) const { RouteT *ip_route = dynamic_cast<RouteT *>(route); for (typename PrefixToRouteListMap::const_iterator it = prefix_to_route_list_map()->begin(); it != prefix_to_route_list_map()->end(); ++it) { if (it->first == ip_route->GetPrefix()) return true; } return false; } template <typename T> bool ServiceChain<T>::IsConnectedRoute(BgpRoute *route) const { RouteT *ip_route = dynamic_cast<RouteT *>(route); return (service_chain_addr() == ip_route->GetPrefix().addr()); } template <typename T> void ServiceChain<T>::RemoveMatchState(BgpRoute *route, ServiceChainState *state) { if (deleted() || route->IsDeleted()) { // At this point we are ready to release the MatchState on the DBEntry // So mark it as deleted.. Actual removal of the state is done when // ref count is 0 state->set_deleted(); } } // RemoveServiceChainRoute template <typename T> void ServiceChain<T>::RemoveServiceChainRoute(PrefixT prefix, bool aggregate) { CHECK_CONCURRENCY("bgp::ServiceChain"); BgpTable *bgptable = src_table(); RouteT rt_key(prefix); DBTablePartition *partition = static_cast<DBTablePartition *>(bgptable->GetTablePartition(&rt_key)); BgpRoute *service_chain_route = static_cast<BgpRoute *>(partition->Find(&rt_key)); if (!service_chain_route || service_chain_route->IsDeleted()) return; for (ConnectedPathIdList::const_iterator it = GetConnectedPathIds().begin(); it != GetConnectedPathIds().end(); ++it) { uint32_t path_id = *it; service_chain_route->RemovePath(BgpPath::ServiceChain, NULL, path_id); BGP_LOG_STR(BgpMessage, SandeshLevel::SYS_DEBUG, BGP_LOG_FLAG_TRACE, "Removed " << (aggregate ? "Aggregate" : "ExtConnected") << " ServiceChain path " << service_chain_route->ToString() << " path_id " << BgpPath::PathIdString(path_id) << " in table " << bgptable->name()); } if (!service_chain_route->BestPath()) { partition->Delete(service_chain_route); } else { partition->Notify(service_chain_route); } } // AddServiceChainRoute template <typename T> void ServiceChain<T>::AddServiceChainRoute(PrefixT prefix, const RouteT *orig_route, const ConnectedPathIdList &old_path_ids, bool aggregate) { CHECK_CONCURRENCY("bgp::ServiceChain"); BgpTable *bgptable = src_table(); RouteT rt_key(prefix); DBTablePartition *partition = static_cast<DBTablePartition *>(bgptable->GetTablePartition(&rt_key)); BgpRoute *service_chain_route = static_cast<BgpRoute *>(partition->Find(&rt_key)); if (service_chain_route == NULL) { service_chain_route = new RouteT(prefix); partition->Add(service_chain_route); } else { service_chain_route->ClearDelete(); } int vn_index = dest_routing_instance()->virtual_network_index(); BgpServer *server = dest_routing_instance()->server(); OriginVn origin_vn(server->autonomous_system(), vn_index); SiteOfOrigin soo; ExtCommunity::ExtCommunityList sgid_list; LoadBalance load_balance; bool load_balance_present = false; const Community *orig_community = NULL; const OriginVnPath *orig_ovnpath = NULL; if (orig_route) { const BgpPath *orig_path = orig_route->BestPath(); const BgpAttr *orig_attr = NULL; const ExtCommunity *ext_community = NULL; if (orig_path) orig_attr = orig_path->GetAttr(); if (orig_attr) { orig_community = orig_attr->community(); ext_community = orig_attr->ext_community(); orig_ovnpath = orig_attr->origin_vn_path(); } if (ext_community) { BOOST_FOREACH(const ExtCommunity::ExtCommunityValue &comm, ext_community->communities()) { if (ExtCommunity::is_security_group(comm)) sgid_list.push_back(comm); if (ExtCommunity::is_site_of_origin(comm) && soo.IsNull()) soo = SiteOfOrigin(comm); if (ExtCommunity::is_load_balance(comm)) { load_balance = LoadBalance(comm); load_balance_present = true; } } } } BgpAttrDB *attr_db = server->attr_db(); CommunityDB *comm_db = server->comm_db(); CommunityPtr new_community = comm_db->AppendAndLocate( orig_community, CommunityType::AcceptOwnNexthop); ExtCommunityDB *extcomm_db = server->extcomm_db(); PeerRibMembershipManager *membership_mgr = server->membership_mgr(); OriginVnPathDB *ovnpath_db = server->ovnpath_db(); OriginVnPathPtr new_ovnpath = ovnpath_db->PrependAndLocate(orig_ovnpath, origin_vn.GetExtCommunity()); ConnectedPathIdList new_path_ids; for (Route::PathList::iterator it = connected_route()->GetPathList().begin(); it != connected_route()->GetPathList().end(); ++it) { BgpPath *connected_path = static_cast<BgpPath *>(it.operator->()); // Infeasible paths are not considered if (!connected_path->IsFeasible()) break; // take snapshot of all ECMP paths if (connected_route()->BestPath()->PathCompare(*connected_path, true)) break; // Skip paths with duplicate forwarding information. This ensures // that we generate only one path with any given next hop and label // when there are multiple connected paths from the original source // received via different peers e.g. directly via XMPP and via BGP. if (connected_route()->DuplicateForwardingPath(connected_path)) continue; const BgpAttr *attr = connected_path->GetAttr(); ExtCommunityPtr new_ext_community; // Strip any RouteTargets from the connected attributes. new_ext_community = extcomm_db->ReplaceRTargetAndLocate( attr->ext_community(), ExtCommunity::ExtCommunityList()); // Replace the SGID list with the list from the original route. new_ext_community = extcomm_db->ReplaceSGIDListAndLocate( new_ext_community.get(), sgid_list); // Replace SiteOfOrigin with value from original route if any. if (soo.IsNull()) { new_ext_community = extcomm_db->RemoveSiteOfOriginAndLocate( new_ext_community.get()); } else { new_ext_community = extcomm_db->ReplaceSiteOfOriginAndLocate( new_ext_community.get(), soo.GetExtCommunity()); } // Inherit load balance attribute of orig_route if connected path // does not have one already. if (!LoadBalance::IsPresent(connected_path) && load_balance_present) { new_ext_community = extcomm_db->AppendAndLocate( new_ext_community.get(), load_balance.GetExtCommunity()); } // Replace the OriginVn with the value from the original route // or the value associated with the dest routing instance. new_ext_community = extcomm_db->ReplaceOriginVnAndLocate( new_ext_community.get(), origin_vn.GetExtCommunity()); // Replace extended community, community and origin vn path. BgpAttrPtr new_attr = attr_db->ReplaceExtCommunityAndLocate( attr, new_ext_community); new_attr = attr_db->ReplaceCommunityAndLocate(new_attr.get(), new_community); new_attr = attr_db->ReplaceOriginVnPathAndLocate(new_attr.get(), new_ovnpath); // Strip aspath. This is required when the connected route is // learnt via BGP. new_attr = attr_db->ReplaceAsPathAndLocate(new_attr.get(), AsPathPtr()); // If the connected path is learnt via XMPP, construct RD based on // the id registered with source table instead of connected table. // This allows chaining of multiple in-network service instances // that are on the same compute node. const IPeer *peer = connected_path->GetPeer(); if (src_ != connected_ && peer && peer->IsXmppPeer()) { int instance_id = -1; bool is_registered = membership_mgr->GetRegistrationInfo(peer, bgptable, &instance_id); if (!is_registered) continue; RouteDistinguisher connected_rd = attr->source_rd(); if (connected_rd.Type() != RouteDistinguisher::TypeIpAddressBased) continue; RouteDistinguisher rd(connected_rd.GetAddress(), instance_id); new_attr = attr_db->ReplaceSourceRdAndLocate(new_attr.get(), rd); } // Replace the source rd if the connected path is a secondary path // of a primary path in the l3vpn table. Use the RD of the primary. if (connected_path->IsReplicated()) { const BgpSecondaryPath *spath = static_cast<const BgpSecondaryPath *>(connected_path); const RoutingInstance *ri = spath->src_table()->routing_instance(); if (ri->IsMasterRoutingInstance()) { const VpnRouteT *vpn_route = static_cast<const VpnRouteT *>(spath->src_rt()); new_attr = attr_db->ReplaceSourceRdAndLocate(new_attr.get(), vpn_route->GetPrefix().route_distinguisher()); } } // Check whether we already have a path with the associated path id. uint32_t path_id = connected_path->GetAttr()->nexthop().to_v4().to_ulong(); BgpPath *existing_path = service_chain_route->FindPath(BgpPath::ServiceChain, NULL, path_id); bool is_stale = false; bool path_updated = false; if (existing_path != NULL) { // Existing path can be reused. if ((new_attr.get() == existing_path->GetAttr()) && (connected_path->GetLabel() == existing_path->GetLabel())) { new_path_ids.insert(path_id); continue; } // Remove existing path, new path will be added below. path_updated = true; is_stale = existing_path->IsStale(); service_chain_route->RemovePath( BgpPath::ServiceChain, NULL, path_id); } BgpPath *new_path = new BgpPath(path_id, BgpPath::ServiceChain, new_attr.get(), connected_path->GetFlags(), connected_path->GetLabel()); if (is_stale) new_path->SetStale(); new_path_ids.insert(path_id); service_chain_route->InsertPath(new_path); partition->Notify(service_chain_route); BGP_LOG_STR(BgpMessage, SandeshLevel::SYS_DEBUG, BGP_LOG_FLAG_TRACE, (path_updated ? "Updated " : "Added ") << (aggregate ? "Aggregate" : "ExtConnected") << " ServiceChain path " << service_chain_route->ToString() << " path_id " << BgpPath::PathIdString(path_id) << " in table " << bgptable->name()); } // Remove stale paths. for (ConnectedPathIdList::const_iterator it = old_path_ids.begin(); it != old_path_ids.end(); ++it) { uint32_t path_id = *it; if (new_path_ids.find(path_id) != new_path_ids.end()) continue; service_chain_route->RemovePath(BgpPath::ServiceChain, NULL, path_id); partition->Notify(service_chain_route); BGP_LOG_STR(BgpMessage, SandeshLevel::SYS_DEBUG, BGP_LOG_FLAG_TRACE, "Removed " << (aggregate ? "Aggregate" : "ExtConnected") << " ServiceChain path " << service_chain_route->ToString() << " path_id " << BgpPath::PathIdString(path_id) << " in table " << bgptable->name()); } // Delete the route if there's no paths. if (!service_chain_route->BestPath()) partition->Delete(service_chain_route); } template <typename T> bool ServiceChain<T>::AddMoreSpecific(PrefixT aggregate, BgpRoute *more_specific) { typename PrefixToRouteListMap::iterator it = prefix_to_routelist_map_.find(aggregate); assert(it != prefix_to_routelist_map_.end()); bool ret = false; if (it->second.empty()) { // Add the aggregate for the first time ret = true; } it->second.insert(more_specific); return ret; } template <typename T> bool ServiceChain<T>::DeleteMoreSpecific(PrefixT aggregate, BgpRoute *more_specific) { typename PrefixToRouteListMap::iterator it = prefix_to_routelist_map_.find(aggregate); assert(it != prefix_to_routelist_map_.end()); it->second.erase(more_specific); return it->second.empty(); } template <typename T> void ServiceChain<T>::FillServiceChainInfo(ShowServicechainInfo *info) const { info->set_src_rt_instance(src_routing_instance()->name()); info->set_connected_rt_instance(connected_routing_instance()->name()); info->set_dest_rt_instance(dest_routing_instance()->name()); info->set_state(deleted() ? "deleted" : "active"); ConnectedRouteInfo connected_rt_info; connected_rt_info.set_service_chain_addr( service_chain_addr().to_string()); if (connected_route()) { ShowRoute show_route; connected_route()->FillRouteInfo(connected_table(), &show_route); connected_rt_info.set_connected_rt(show_route); } info->set_connected_route(connected_rt_info); vector<PrefixToRouteListInfo> more_vec; for (typename PrefixToRouteListMap::const_iterator it = prefix_to_route_list_map()->begin(); it != prefix_to_route_list_map()->end(); ++it) { PrefixToRouteListInfo prefix_list_info; prefix_list_info.set_prefix(it->first.ToString()); BgpTable *bgptable = src_table(); RouteT rt_key(it->first); BgpRoute *aggregate = static_cast<BgpRoute *>(bgptable->Find(&rt_key)); if (aggregate) { prefix_list_info.set_aggregate(true); ShowRoute show_route; aggregate->FillRouteInfo(bgptable, &show_route); prefix_list_info.set_aggregate_rt(show_route); } else { prefix_list_info.set_aggregate(false); } vector<string> rt_list; for (RouteList::iterator rt_it = it->second.begin(); rt_it != it->second.end(); ++rt_it) { rt_list.push_back((*rt_it)->ToString()); } prefix_list_info.set_more_specific_list(rt_list); more_vec.push_back(prefix_list_info); } info->set_more_specifics(more_vec); vector<ExtConnectRouteInfo> ext_connecting_rt_info_list; for (ExtConnectRouteList::const_iterator it = ext_connecting_routes().begin(); it != ext_connecting_routes().end(); ++it) { ExtConnectRouteInfo ext_rt_info; ext_rt_info.set_ext_rt_prefix((*it)->ToString()); BgpTable *bgptable = src_table(); RouteT *ext_route = static_cast<RouteT *>(*it); RouteT rt_key(ext_route->GetPrefix()); BgpRoute *ext_connecting = static_cast<BgpRoute *>(bgptable->Find(&rt_key)); if (ext_connecting) { ShowRoute show_route; ext_connecting->FillRouteInfo(bgptable, &show_route); ext_rt_info.set_ext_rt_svc_rt(show_route); } ext_connecting_rt_info_list.push_back(ext_rt_info); } info->set_ext_connecting_rt_info_list(ext_connecting_rt_info_list); info->set_aggregate_enable(aggregate_enable()); } template <typename T> bool ServiceChainMgr<T>::RequestHandler(ServiceChainRequestT *req) { CHECK_CONCURRENCY("bgp::ServiceChain"); BgpTable *table = NULL; BgpRoute *route = NULL; PrefixT aggregate_match = req->aggregate_match_; ServiceChainT *info = NULL; table = req->table_; route = req->rt_; info = static_cast<ServiceChainT *>(req->info_.get()); // Table where the aggregate route needs to be added aggregate_match = req->aggregate_match_; ServiceChainState *state = NULL; if (route) { state = static_cast<ServiceChainState *> (listener_->GetMatchState(table, route, info)); } switch (req->type_) { case ServiceChainRequestT::MORE_SPECIFIC_ADD_CHG: { assert(state); if (state->deleted()) { state->reset_deleted(); } if (info->AddMoreSpecific(aggregate_match, route) && info->IsConnectedRouteValid()) { // Add the aggregate route typename ServiceChainT::ConnectedPathIdList path_ids; info->AddServiceChainRoute( aggregate_match, NULL, path_ids, true); } break; } case ServiceChainRequestT::MORE_SPECIFIC_DELETE: { assert(state); if (info->DeleteMoreSpecific(aggregate_match, route)) { // Delete the aggregate route info->RemoveServiceChainRoute(aggregate_match, true); } info->RemoveMatchState(route, state); break; } case ServiceChainRequestT::CONNECTED_ROUTE_ADD_CHG: { assert(state); if (route->IsDeleted() || !route->BestPath() || !route->BestPath()->IsFeasible()) { break; } if (state->deleted()) { state->reset_deleted(); } // Store the old path id list and populate the new one. typename ServiceChainT::ConnectedPathIdList path_ids = info->GetConnectedPathIds(); info->SetConnectedRoute(route); typename ServiceChainT::PrefixToRouteListMap *vnprefix_list = info->prefix_to_route_list_map(); for (typename ServiceChainT::PrefixToRouteListMap::iterator it = vnprefix_list->begin(); it != vnprefix_list->end(); ++it) { // Add aggregate route.. Or if the route exists // sync the path and purge old paths if (!it->second.empty()) info->AddServiceChainRoute(it->first, NULL, path_ids, true); } for (typename ServiceChainT::ExtConnectRouteList::iterator it = info->ext_connecting_routes()->begin(); it != info->ext_connecting_routes()->end(); ++it) { // Add ServiceChain route for external connecting route RouteT *ext_route = static_cast<RouteT *>(*it); info->AddServiceChainRoute( ext_route->GetPrefix(), ext_route, path_ids, false); } break; } case ServiceChainRequestT::CONNECTED_ROUTE_DELETE: { assert(state); // Delete ServiceChain route for aggregate. typename ServiceChainT::PrefixToRouteListMap *vnprefix_list = info->prefix_to_route_list_map(); for (typename ServiceChainT::PrefixToRouteListMap::iterator it = vnprefix_list->begin(); it != vnprefix_list->end(); ++it) { info->RemoveServiceChainRoute(it->first, true); } // Delete ServiceChain routes for external connecting routes. for (typename ServiceChainT::ExtConnectRouteList::iterator it = info->ext_connecting_routes()->begin(); it != info->ext_connecting_routes()->end(); ++it) { RouteT *ext_route = static_cast<RouteT *>(*it); info->RemoveServiceChainRoute(ext_route->GetPrefix(), false); } info->RemoveMatchState(route, state); info->SetConnectedRoute(NULL); break; } case ServiceChainRequestT::EXT_CONNECT_ROUTE_ADD_CHG: { assert(state); if (state->deleted()) { state->reset_deleted(); } info->ext_connecting_routes()->insert(route); if (info->IsConnectedRouteValid()) { RouteT *ext_route = dynamic_cast<RouteT *>(route); typename ServiceChainT::ConnectedPathIdList path_ids; info->AddServiceChainRoute( ext_route->GetPrefix(), ext_route, path_ids, false); } break; } case ServiceChainRequestT::EXT_CONNECT_ROUTE_DELETE: { assert(state); if (info->ext_connecting_routes()->erase(route)) { RouteT *inet_route = dynamic_cast<RouteT *>(route); info->RemoveServiceChainRoute(inet_route->GetPrefix(), false); } info->RemoveMatchState(route, state); break; } case ServiceChainRequestT::UPDATE_ALL_ROUTES: { if (info->dest_table_unregistered()) break; if (info->connected_table_unregistered()) break; if (!info->connected_route()) break; typename ServiceChainT::ConnectedPathIdList path_ids = info->GetConnectedPathIds(); typename ServiceChainT::PrefixToRouteListMap *vnprefix_list = info->prefix_to_route_list_map(); for (typename ServiceChainT::PrefixToRouteListMap::iterator it = vnprefix_list->begin(); it != vnprefix_list->end(); ++it) { // Add aggregate route.. Or if the route exists // sync the path and purge old paths if (!it->second.empty()) info->AddServiceChainRoute( it->first, NULL, path_ids, true); } for (typename ServiceChainT::ExtConnectRouteList::iterator it = info->ext_connecting_routes()->begin(); it != info->ext_connecting_routes()->end(); ++it) { // Add ServiceChain route for external connecting route RouteT *ext_route = static_cast<RouteT *>(*it); info->AddServiceChainRoute( ext_route->GetPrefix(), ext_route, path_ids, false); } break; } case ServiceChainRequestT::STOP_CHAIN_DONE: { if (table == info->connected_table()) { info->set_connected_table_unregistered(); if (!info->num_matchstate()) { listener_->UnregisterMatchCondition(table, info); } } if (table == info->dest_table()) { info->set_dest_table_unregistered(); if (!info->num_matchstate()) { listener_->UnregisterMatchCondition(table, info); } } if (info->unregistered()) { chain_set_.erase(info->src_routing_instance()); StartResolve(); } break; } default: { assert(false); break; } } if (state) { state->DecrementRefCnt(); if (state->refcnt() == 0 && state->deleted()) { listener_->RemoveMatchState(table, route, info); delete state; if (!info->num_matchstate()) { if (info->dest_table_unregistered()) { listener_->UnregisterMatchCondition( info->dest_table(), info); } if (info->connected_table_unregistered()) { listener_->UnregisterMatchCondition( info->connected_table(), info); } if (info->unregistered()) { chain_set_.erase(info->src_routing_instance()); StartResolve(); } } } } delete req; return true; } template <typename T> ServiceChainMgr<T>::ServiceChainMgr(BgpServer *server) : server_(server), listener_(server_->condition_listener(GetFamily())), resolve_trigger_(new TaskTrigger( bind(&ServiceChainMgr::ResolvePendingServiceChain, this), TaskScheduler::GetInstance()->GetTaskId("bgp::Config"), 0)), aggregate_host_route_(false) { if (service_chain_task_id_ == -1) { TaskScheduler *scheduler = TaskScheduler::GetInstance(); service_chain_task_id_ = scheduler->GetTaskId("bgp::ServiceChain"); } process_queue_ = new WorkQueue<ServiceChainRequestT *>(service_chain_task_id_, 0, bind(&ServiceChainMgr::RequestHandler, this, _1)); id_ = server->routing_instance_mgr()->RegisterInstanceOpCallback( bind(&ServiceChainMgr::RoutingInstanceCallback, this, _1, _2)); PeerRibMembershipManager *membership_mgr = server->membership_mgr(); registration_id_ = membership_mgr->RegisterPeerRegistrationCallback( bind(&ServiceChainMgr::PeerRegistrationCallback, this, _1, _2, _3)); } template <typename T> ServiceChainMgr<T>::~ServiceChainMgr() { delete process_queue_; server_->routing_instance_mgr()->UnregisterInstanceOpCallback(id_); PeerRibMembershipManager *membership_mgr = server_->membership_mgr(); membership_mgr->UnregisterPeerRegistrationCallback(registration_id_); } template <> Address::Family ServiceChainMgr<ServiceChainInet>::GetFamily() const { return Address::INET; } template <> Address::Family ServiceChainMgr<ServiceChainInet6>::GetFamily() const { return Address::INET6; } template <typename T> void ServiceChainMgr<T>::Enqueue(ServiceChainRequestT *req) { process_queue_->Enqueue(req); } template <typename T> bool ServiceChainMgr<T>::IsPending(RoutingInstance *rtinstance) const { return pending_chains_.find(rtinstance) != pending_chains_.end(); } template <typename T> bool ServiceChainMgr<T>::FillServiceChainInfo(RoutingInstance *rtinstance, ShowServicechainInfo *info) const { if (IsPending(rtinstance)) { info->set_state("pending"); return true; } const ServiceChain<T> *service_chain = FindServiceChain(rtinstance); if (!service_chain) return false; service_chain->FillServiceChainInfo(info); return true; } template <typename T> bool ServiceChainMgr<T>::LocateServiceChain(RoutingInstance *rtinstance, const ServiceChainConfig &config) { CHECK_CONCURRENCY("bgp::Config"); // Verify whether the entry already exists ServiceChainMap::iterator it = chain_set_.find(rtinstance); if (it != chain_set_.end()) { ServiceChainT *chain = static_cast<ServiceChainT *>(it->second.get()); if (chain->CompareServiceChainConfig(config)) { BGP_LOG_STR(BgpMessage, SandeshLevel::SYS_DEBUG, BGP_LOG_FLAG_TRACE, "No update in ServiceChain config : " << rtinstance->name()); return true; } // Entry already exists. Update of match condition // The routing instance to pending resolve such that // service chain is created after stop done cb AddPendingServiceChain(rtinstance); if (it->second->deleted()) { // Wait for the delete complete cb return false; } BgpConditionListener::RequestDoneCb callback = bind(&ServiceChainMgr::StopServiceChainDone, this, _1, _2); listener_->RemoveMatchCondition( chain->dest_table(), it->second.get(), callback); listener_->RemoveMatchCondition( chain->connected_table(), it->second.get(), callback); return true; } RoutingInstanceMgr *mgr = server_->routing_instance_mgr(); RoutingInstance *dest = mgr->GetRoutingInstance(config.routing_instance); // // Destination routing instance is not yet created. // Or Destination routing instance is deleted Or // virtual network index is not yet calculated (due missing virtual network // link) // if (dest == NULL || dest->deleted() || !dest->virtual_network_index()) { // Wait for the creation of RoutingInstance AddPendingServiceChain(rtinstance); return false; } RoutingInstance *connected_ri = NULL; if (config.source_routing_instance == "") { connected_ri = rtinstance; assert(!rtinstance->deleted()); } else { connected_ri = mgr->GetRoutingInstance(config.source_routing_instance); } // routing instance to search for connected route is not yet created. if (connected_ri == NULL || connected_ri->deleted()) { // Wait for the creation of RoutingInstance where connected route // will be published AddPendingServiceChain(rtinstance); return false; } // Add to pending queue if the service chain address is invalid. error_code ec; AddressT chain_addr = AddressT::from_string(config.service_chain_address, ec); if (ec != 0) { AddPendingServiceChain(rtinstance); return false; } // Get the BGP Tables to add condition BgpTable *connected_table = connected_ri->GetTable(GetFamily()); assert(connected_table); BgpTable *dest_table = dest->GetTable(GetFamily()); assert(dest_table); // Allocate the new service chain and verify whether one already exists ServiceChainPtr chain = ServiceChainPtr(new ServiceChainT( this, rtinstance, dest, connected_ri, config.prefix, chain_addr)); if (aggregate_host_route()) { ServiceChainT *obj = static_cast<ServiceChainT *>(chain.get()); obj->set_aggregate_enable(); } // Add the new service chain request chain_set_.insert(make_pair(rtinstance, chain)); listener_->AddMatchCondition( connected_table, chain.get(), BgpConditionListener::RequestDoneCb()); listener_->AddMatchCondition( dest_table, chain.get(), BgpConditionListener::RequestDoneCb()); // Delete from the pending list. The instance would already have been // removed from the pending list if this method is called when trying // to resolve items in the pending list. However, if this method is // called when processing a change in the service chain config, then // we may need to remove it from the pending list. DeletePendingServiceChain(rtinstance); return true; } template <typename T> ServiceChain<T> *ServiceChainMgr<T>::FindServiceChain( const string &instance) const { RoutingInstance *rtinstance = server_->routing_instance_mgr()->GetRoutingInstance(instance); if (!rtinstance) return NULL; ServiceChainMap::const_iterator it = chain_set_.find(rtinstance); if (it == chain_set_.end()) return NULL; ServiceChainT *chain = static_cast<ServiceChainT *>(it->second.get()); return chain; } template <typename T> ServiceChain<T> *ServiceChainMgr<T>::FindServiceChain( RoutingInstance *rtinstance) const { ServiceChainMap::const_iterator it = chain_set_.find(rtinstance); if (it == chain_set_.end()) return NULL; ServiceChainT *chain = static_cast<ServiceChainT *>(it->second.get()); return chain; } template <typename T> bool ServiceChainMgr<T>::ResolvePendingServiceChain() { CHECK_CONCURRENCY("bgp::Config"); for (PendingServiceChainList::iterator it = pending_chains_.begin(), next; it != pending_chains_.end(); it = next) { next = it; ++next; RoutingInstance *rtinstance = *it; pending_chains_.erase(it); const ServiceChainConfig *sc_config = rtinstance->config()->service_chain_info(GetFamily()); if (sc_config) LocateServiceChain(rtinstance, *sc_config); } return true; } template <typename T> void ServiceChainMgr<T>::RoutingInstanceCallback(string name, int op) { if (op != RoutingInstanceMgr::INSTANCE_DELETE) StartResolve(); } template <typename T> void ServiceChainMgr<T>::StartResolve() { if (pending_chains_.empty() == false) { resolve_trigger_->Set(); } } template <typename T> void ServiceChainMgr<T>::StopServiceChainDone(BgpTable *table, ConditionMatch *info) { // Post the RequestDone event to ServiceChain task to take Action ServiceChainRequestT *req = new ServiceChainRequestT(ServiceChainRequestT::STOP_CHAIN_DONE, table, NULL, PrefixT(), ServiceChainPtr(info)); Enqueue(req); return; } template <typename T> void ServiceChainMgr<T>::StopServiceChain(RoutingInstance *rtinstance) { // Remove the routing instance from pending chains list. pending_chains_.erase(rtinstance); ServiceChainMap::iterator it = chain_set_.find(rtinstance); if (it == chain_set_.end()) return; if (it->second->deleted()) return; BgpConditionListener::RequestDoneCb callback = bind(&ServiceChainMgr::StopServiceChainDone, this, _1, _2); ServiceChainT *obj = static_cast<ServiceChainT *>(it->second.get()); listener_->RemoveMatchCondition(obj->dest_table(), obj, callback); listener_->RemoveMatchCondition(obj->connected_table(), obj, callback); } template <typename T> void ServiceChainMgr<T>::PeerRegistrationCallback(IPeer *peer, BgpTable *table, bool unregister) { CHECK_CONCURRENCY("bgp::PeerMembership"); // Bail if it's not an XMPP peer. if (!peer->IsXmppPeer()) return; // Bail if there's no service chain for the instance. ServiceChainT *chain = FindServiceChain(table->routing_instance()); if (!chain) return; // Post event to ServiceChain task to update all routes. ServiceChainRequestT *req = new ServiceChainRequestT(ServiceChainRequestT::UPDATE_ALL_ROUTES, NULL, NULL, PrefixT(), ServiceChainPtr(chain)); Enqueue(req); } template <typename T> void ServiceChainMgr<T>::DisableResolveTrigger() { resolve_trigger_->set_disable(); } template <typename T> void ServiceChainMgr<T>::EnableResolveTrigger() { resolve_trigger_->set_enable(); } template <typename T> uint32_t ServiceChainMgr<T>::GetDownServiceChainCount() const { uint32_t count = 0; for (ServiceChainMap::const_iterator it = chain_set_.begin(); it != chain_set_.end(); ++it) { const ServiceChainT *chain = static_cast<const ServiceChainT *>(it->second.get()); if (!chain->IsConnectedRouteValid()) count++; } return count; } // Explicit instantiation of ServiceChainMgr for INET and INET6. template class ServiceChainMgr<ServiceChainInet>; template class ServiceChainMgr<ServiceChainInet6>;
{ "alphanum_fraction": 0.6259728806, "author": null, "avg_line_length": 38.0494966443, "converted": null, "ext": "cc", "file": null, "hexsha": "287c625f6108f6c68119cdaef83c308a2808876d", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "80c4a7e8515f7296b18ba4c21a439bd3daefcc4a", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "biswajit-mandal/contrail-controller", "max_forks_repo_path": "src/bgp/routing-instance/service_chaining.cc", "max_issues_count": null, "max_issues_repo_head_hexsha": "80c4a7e8515f7296b18ba4c21a439bd3daefcc4a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "biswajit-mandal/contrail-controller", "max_issues_repo_path": "src/bgp/routing-instance/service_chaining.cc", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "80c4a7e8515f7296b18ba4c21a439bd3daefcc4a", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "biswajit-mandal/contrail-controller", "max_stars_repo_path": "src/bgp/routing-instance/service_chaining.cc", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 9917, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 45355 }
import numpy as np from second.core.anchor_generator import ( AnchorGeneratorStride, AnchorGeneratorRange) def build(anchor_config): """Create optimizer based on config. Args: optimizer_config: A Optimizer proto message. Returns: An optimizer and a list of variables for summary. Raises: ValueError: when using an unsupported input data type. """ ag_type = anchor_config.WhichOneof('anchor_generator') if ag_type == 'anchor_generator_stride': config = anchor_config.anchor_generator_stride ag = AnchorGeneratorStride( sizes=list(config.sizes), anchor_strides=list(config.strides), anchor_offsets=list(config.offsets), rotations=list(config.rotations), match_threshold=config.matched_threshold, unmatch_threshold=config.unmatched_threshold, class_name=config.class_name) return ag elif ag_type == 'anchor_generator_range': config = anchor_config.anchor_generator_range ag = AnchorGeneratorRange( sizes=list(config.sizes), anchor_ranges=list(config.anchor_ranges), rotations=list(config.rotations), match_threshold=config.matched_threshold, unmatch_threshold=config.unmatched_threshold, class_name=config.class_name) return ag else: raise ValueError(" unknown anchor generator type")
{ "alphanum_fraction": 0.6830449827, "author": null, "avg_line_length": 32.8409090909, "converted": null, "ext": "py", "file": null, "hexsha": "13b828363dfa816cecf2e182ca5e1d0dfc9b24a0", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d10f0994cdefd035c9830a3119b0b264d8ddbd1e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "yukitsuji/second_tmp", "max_forks_repo_path": "second/builder/anchor_generator_builder.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "d10f0994cdefd035c9830a3119b0b264d8ddbd1e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "yukitsuji/second_tmp", "max_issues_repo_path": "second/builder/anchor_generator_builder.py", "max_line_length": 58, "max_stars_count": null, "max_stars_repo_head_hexsha": "d10f0994cdefd035c9830a3119b0b264d8ddbd1e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "yukitsuji/second_tmp", "max_stars_repo_path": "second/builder/anchor_generator_builder.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 264, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1445 }
# ------------------------------------------------------------------------------- # Copyright IBM Corp. 2016 # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------- import pixiedust_flightpredict.training as training from pixiedust.display.chart.renderers.baseChartDisplay import BaseChartDisplay import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from pyspark.sql import Row from functools import reduce import pixiedust myLogger = pixiedust.getLogger(__name__) def makeList(l): return l if isinstance(l, list) else [l] class VizualizeFeatures(BaseChartDisplay): def doRender(self, handlerId): f1="departureWeather.temp" f2="arrivalWeather.temp" f1=f1.split(".") f2=f2.split(".") handler=training.getTrainingHandler() darr=self.entity.rdd.map(lambda s: ( handler.computeClassification(s),(\ reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f1) if len(f1)>1 else getattr(s,f1[0]),\ reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f2) if len(f2)>1 else getattr(s,f2[0])\ )))\ .reduceByKey(lambda x,y: makeList(x) + makeList(y))\ .collect() numClasses=handler.numClasses() citer=iter(cm.rainbow(np.linspace(0, 1, numClasses))) colors = [next(citer) for i in range(0, numClasses)] legends= [handler.getClassLabel(i) for i in range(0,numClasses)] sets=[] fig, ax = plt.subplots(figsize=(12,8)) for t in darr: sets.append((ax.scatter([x[0] for x in t[1]],[x[1] for x in t[1]],color=colors[t[0]],alpha=0.5),legends[t[0]])) ax.set_ylabel("Departure Airport Temp") ax.set_xlabel("Arrival Airport Temp") ax.legend([x[0] for x in sets], [x[1] for x in sets], scatterpoints=1, loc='lower left', ncol=numClasses, fontsize=12) def doRenderChart(self): pass
{ "alphanum_fraction": 0.6155021, "author": null, "avg_line_length": 40.921875, "converted": null, "ext": "py", "file": null, "hexsha": "deb809048daeb51e3f4e837d64980c194a21a757", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aa810580787d88dd3bb8785c1f01d69bc69827bb", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "ibm-watson-data-lab/simple-data-pipe-connector-flightstats", "max_forks_repo_path": "pixiedust_flightpredict/pixiedust_flightpredict/vizFeatures.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "aa810580787d88dd3bb8785c1f01d69bc69827bb", "max_issues_repo_issues_event_max_datetime": "2017-08-31T14:44:12.000Z", "max_issues_repo_issues_event_min_datetime": "2017-08-31T14:44:12.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "ibm-watson-data-lab/simple-data-pipe-connector-flightstats", "max_issues_repo_path": "pixiedust_flightpredict/pixiedust_flightpredict/vizFeatures.py", "max_line_length": 136, "max_stars_count": 2, "max_stars_repo_head_hexsha": "aa810580787d88dd3bb8785c1f01d69bc69827bb", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "ibm-watson-data-lab/simple-data-pipe-connector-flightstats", "max_stars_repo_path": "pixiedust_flightpredict/pixiedust_flightpredict/vizFeatures.py", "max_stars_repo_stars_event_max_datetime": "2017-08-14T17:17:39.000Z", "max_stars_repo_stars_event_min_datetime": "2017-07-28T07:05:43.000Z", "num_tokens": 626, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2619 }
!***************************************************************************************** !> author: GasinAn ! ! Simplified Modern Fortran Edition of the DOP853 ODE Solver. ! !### License ! ! Simplified Modern Fortran Edition of the DOP853 ODE Solver ! https://github.com/GasinAn/easydop853 ! ! Copyright (c) 2020, GasinAn ! All rights reserved. ! ! Redistribution and use in source and binary forms, with or without modification, ! are permitted provided that the following conditions are met: ! ! * Redistributions of source code must retain the above copyright notice, this ! list of conditions and the following disclaimer. ! ! * Redistributions in binary form must reproduce the above copyright notice, this ! list of conditions and the following disclaimer in the documentation and/or ! other materials provided with the distribution. ! ! * The names of its contributors may not be used to endorse or promote products ! derived from this software without specific prior written permission. ! ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ! ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ! WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ! ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ! (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ! LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ! ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ! SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ! !***************************************************************************************** module easydop853_module use dop853_module use dop853_constants implicit none contains subroutine easydop853(fcn,x,xf,y) implicit none procedure(deriv_func) :: fcn !! subroutine computing the value of \(dy/dx=f(x,y)\) real(wp),intent(inout) :: x !! `x` value (input is initial value and output is final value) real(wp),intent(in) :: xf !! endpoint of integration (final value of `x`) real(wp),dimension(:),intent(inout) :: y !! `y` value (input is initial value and output is final value) integer :: nstiff = 1 !! nstiff parameter for stiffness detection, !! which will occur at step 1*nstiff, 2*nstiff, 3*nstiff ... if nstiff>0 !! and will not occur if nstiff<=0 integer :: nmax = 2250000 !! maximal number of allowed steps real(wp),dimension(1) :: rtol = 1.0e-12_wp !! relative tolerance real(wp),dimension(1) :: atol = 1.0e-24_wp !! absolute tolerance type(dop853_class) :: prop logical :: status_ok integer :: idid call prop%initialize(n=size(y),fcn=fcn,nstiff=nstiff,nmax=nmax,& status_ok=status_ok) if (.not.status_ok) error stop 'initialization error' call prop%integrate(x,y,xf,rtol,atol,iout=0,idid=idid) if (idid<0) error stop 'integration failure' end subroutine easydop853 end module easydop853_module
{ "alphanum_fraction": 0.6192596716, "author": null, "avg_line_length": 41.2988505747, "converted": null, "ext": "f90", "file": null, "hexsha": "a98b8edd6eec2f778978e13c75f560638b6f4173", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b65193dbe43c1000958b77c1050296ef916d17f1", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "GasinAn/easydop853", "max_forks_repo_path": "easydop853_module.f90", "max_issues_count": null, "max_issues_repo_head_hexsha": "b65193dbe43c1000958b77c1050296ef916d17f1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "GasinAn/easydop853", "max_issues_repo_path": "easydop853_module.f90", "max_line_length": 91, "max_stars_count": 1, "max_stars_repo_head_hexsha": "b65193dbe43c1000958b77c1050296ef916d17f1", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "GasinAn/easydop853", "max_stars_repo_path": "easydop853_module.f90", "max_stars_repo_stars_event_max_datetime": "2021-03-29T23:19:51.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-29T23:19:51.000Z", "num_tokens": 811, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 3593 }
struct TikzFigure <: AbstractTikzFigure axes::Vector{AbstractTikzAxis} end function TikzFigure() return TikzFigure([EmptyTikzAxis()]) end function EmptyTikzFigure() return TikzFigure() end
{ "alphanum_fraction": 0.7948717949, "author": null, "avg_line_length": 15, "converted": null, "ext": "jl", "file": null, "hexsha": "20b4ac9370813cb68f6a20fa2172b5f14886eacd", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "569eb74d1be4c55991913b3f2c2a592b7b4ad70b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fergu/Julia2Tikz", "max_forks_repo_path": "src/TikzFigure.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "569eb74d1be4c55991913b3f2c2a592b7b4ad70b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fergu/Julia2Tikz", "max_issues_repo_path": "src/TikzFigure.jl", "max_line_length": 39, "max_stars_count": null, "max_stars_repo_head_hexsha": "569eb74d1be4c55991913b3f2c2a592b7b4ad70b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fergu/Julia2Tikz", "max_stars_repo_path": "src/TikzFigure.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 56, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 195 }
[STATEMENT] lemma times_inf [simp]: "x * y = x \<sqinter> y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. x * y = x \<sqinter> y [PROOF STEP] by simp
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Stone_Relation_Algebras_Relation_Algebras", "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 71, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
abstract type AbstractBoolDomain <: AbstractDomain end """ struct BoolDomain <: AbstractDomain Boolean domain, uses a IntDomain in it. (true is 1 and false is 0) """ struct BoolDomain <: AbstractBoolDomain inner::IntDomain function BoolDomain(trailer::Trailer) return new(IntDomain(trailer, 2, -1)) end end """ reset_domain!(dom::BoolDomain) Used in `reset_model!`. """ reset_domain!(dom::BoolDomain) = reset_domain!(dom.inner) function Base.show(io::IO, dom::BoolDomain) print(io, "[", join(dom, " "), "]") end function Base.show(io::IO, ::MIME"text/plain", dom::BoolDomain) print(io, typeof(dom), ": [", join(dom, " "), "]") end """ isempty(dom::BoolDomain) Return `true` iff `dom` is an empty set. Done in constant time. """ Base.isempty(dom::BoolDomain) = Base.isempty(dom.inner) """ length(dom::BoolDomain) Return the size of `dom`. Done in constant time. """ Base.length(dom::SeaPearl.BoolDomain) = Base.length(dom.inner) """ Base.in(value::Int, dom::BoolDomain) Check if an integer is in the domain. Done in constant time. """ function Base.in(value::Bool, dom::BoolDomain) intValue = convert(Int, value) return Base.in(intValue, dom.inner) end """ remove!(dom::BoolDomain, value::Int) Remove `value` from `dom`. Done in constant time. """ function remove!(dom::BoolDomain, value::Bool) if !(value in dom) return Bool[] end intValue = convert(Int, value) remove!(dom.inner, intValue) return [value] end """ removeAll!(dom::BoolDomain) Remove every value from `dom`. Return the removed values. Done in constant time. """ removeAll!(dom::BoolDomain) = convert.(Bool, removeAll!(dom.inner)) """ assign!(dom::BoolDomain, value::Int) Remove everything from the domain but `value`. Return the removed values. Return the pruned values. Done in *constant* time. """ function assign!(dom::BoolDomain, value::Bool) @assert value in dom return convert.(Bool, assign!(dom.inner, convert(Int, value))) end """ Base.iterate(dom::BoolDomain, state=1) Iterate over the domain in an efficient way. The order may not be consistent. WARNING: Do **NOT** update the domain you are iterating on. """ function Base.iterate(dom::BoolDomain, state=1) returned = iterate(dom.inner, state) if isnothing(returned) return nothing end value, newState = returned return convert(Bool, value), newState end
{ "alphanum_fraction": 0.6486908949, "author": null, "avg_line_length": 23.4770642202, "converted": null, "ext": "jl", "file": null, "hexsha": "bcef5df773a7f0322d15b686162609329fbc7f27", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-02-15T02:44:34.000Z", "max_forks_repo_forks_event_min_datetime": "2021-05-10T23:32:49.000Z", "max_forks_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "pitmonticone/SeaPearl.jl", "max_forks_repo_path": "src/CP/variables/BoolDomain.jl", "max_issues_count": 65, "max_issues_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:42:24.000Z", "max_issues_repo_issues_event_min_datetime": "2021-04-23T17:20:56.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "pitmonticone/SeaPearl.jl", "max_issues_repo_path": "src/CP/variables/BoolDomain.jl", "max_line_length": 100, "max_stars_count": 44, "max_stars_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "pitmonticone/SeaPearl.jl", "max_stars_repo_path": "src/CP/variables/BoolDomain.jl", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:17:03.000Z", "max_stars_repo_stars_event_min_datetime": "2021-04-20T16:29:52.000Z", "num_tokens": 622, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2559 }
[STATEMENT] lemma lspasl_starr_der: "(h1,h2\<triangleright>h0) \<Longrightarrow> \<not> ((A ** B) h0) \<Longrightarrow> ((h1,h2\<triangleright>h0) \<and> \<not> ((A h1) \<or> ((A ** B) h0)) \<and> (starr_applied h1 h2 h0 (A ** B))) \<or> ((h1,h2\<triangleright>h0) \<and> \<not> ((B h2) \<or> ((A ** B) h0)) \<and> (starr_applied h1 h2 h0 (A ** B)))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>h1,h2\<triangleright>h0; \<not> (A \<and>* B) h0\<rbrakk> \<Longrightarrow> (h1,h2\<triangleright>h0) \<and> \<not> (A h1 \<or> (A \<and>* B) h0) \<and> starr_applied h1 h2 h0 (A \<and>* B) \<or> (h1,h2\<triangleright>h0) \<and> \<not> (B h2 \<or> (A \<and>* B) h0) \<and> starr_applied h1 h2 h0 (A \<and>* B) [PROOF STEP] by (simp add: lspasl_starl_eq starr_applied_def)
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Separata_Separata", "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 394, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import os import sys import json import pickle import random import torch # from torch.utils.tensorboard.summary import image from tqdm import tqdm import matplotlib.pyplot as plt import numpy as np import pylab as pl from mpl_toolkits.axes_grid1.inset_locator import inset_axes import torchvision.transforms.functional as F from torchvision.transforms import RandomCrop as RandomCrop import cv2 import albumentations from PIL import Image class Random_Dropout: def __init__(self,probility): self.probility=probility def __call__(self,image): if random.uniform(0,1) > self.probility: return image w, h = image.size return Image.fromarray(cv2.cvtColor(albumentations.CoarseDropout(max_holes=2,max_height=h/2, max_width=w/2,p=self.probility)(image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR))['image'],cv2.COLOR_BGR2RGB)) class CLAHE: def __init__(self,prob): self.probility = prob def __call__(self,image): if random.uniform(0,1) > self.probility: return image return Image.fromarray(cv2.cvtColor(albumentations.CLAHE(clip_limit=4.0, tile_grid_size=(8, 8), always_apply=False, p=self.probility)(image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR))['image'],cv2.COLOR_BGR2RGB)) class Random_blur: def __init__(self,prob): self.probility = prob def __call__(self,image): if random.uniform(0,1) > self.probility: return image return Image.fromarray(cv2.cvtColor(albumentations.Blur(blur_limit = 5,always_apply = False,p = self.probility)(image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR))['image'],cv2.COLOR_BGR2RGB)) class random_paste: def __init__(self,image_size,prob): self.prob = prob g = os.walk(r'background_image') self.background_image_path = [] self.image_size = image_size for path,dir_list,file_list in g: for file_name in file_list: self.background_image_path.append(os.path.join(path,file_name)) def __call__(self,image): if random.uniform(0,1) > self.prob: return image # select_image_path = background_image_path[random.randint(0,len(self.background_image_path)-1)] background_image = Image.open(self.background_image_path[random.randint(0,len(self.background_image_path)-1)]) background_image = Image.fromarray(np.array(background_image)).resize((self.image_size,self.image_size),0) w, h = image.size max_wh = np.max([w, h]) ratial = (max_wh/self.image_size) * random.uniform(1,2) new_w = int(w/ratial) new_h = int(h/ratial) image = image.resize((new_w,new_h),0) new_x = int(random.uniform(0,self.image_size - new_w)) new_y = int(random.uniform(0,self.image_size - new_h)) background_image.paste(image, (new_x,new_y)) return background_image class letterbox: def __init__(self,color): self.color = color def __call__(self, image): w, h = image.size max_wh = np.max([w, h]) hp = int((max_wh - w) / 2) vp = int((max_wh - h) / 2) padding = (hp, vp, hp, vp) self.color2 = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) if self.color == 0: self.color2 = (0,0,0) return F.pad(image, padding, self.color2, 'constant') class cropping_image_ramdomly: def __init__(self,size,p): self.size = size self.prob = p self.cropping_fucntion = RandomCrop(size=(self.size,self.size)) def __call__(self,image): # p = 30 # size = img_size[num_model][0] if random.randint(0,99) <= self.prob: # crop_func = RandomCrop(size=(self.size,self.size)) return self.cropping_fucntion(image) else: return image def read_split_data(root: str, val_rate: float = 0.2): random.seed(0) # 保证随机结果可复现 assert os.path.exists(root), "dataset root: {} does not exist.".format(root) # 遍历文件夹,一个文件夹对应一个类别 flower_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))] # 排序,保证顺序一致 flower_class.sort() # 生成类别名称以及对应的数字索引 class_indices = dict((k, v) for v, k in enumerate(flower_class)) json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4) with open('class_indices.json', 'w') as json_file: json_file.write(json_str) train_images_path = [] # 存储训练集的所有图片路径 train_images_label = [] # 存储训练集图片对应索引信息 val_images_path = [] # 存储验证集的所有图片路径 val_images_label = [] # 存储验证集图片对应索引信息 every_class_num = [] # 存储每个类别的样本总数 supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型 # 遍历每个文件夹下的文件 for cla in flower_class: cla_path = os.path.join(root, cla) # 遍历获取supported支持的所有文件路径 images = [os.path.join(root, cla, i) for i in os.listdir(cla_path) if os.path.splitext(i)[-1] in supported] # 获取该类别对应的索引 image_class = class_indices[cla] # 记录该类别的样本数量 every_class_num.append(len(images)) # 按比例随机采样验证样本 val_path = random.sample(images, k=int(len(images) * val_rate)) for img_path in images: if img_path in val_path: # 如果该路径在采样的验证集样本中则存入验证集 val_images_path.append(img_path) val_images_label.append(image_class) else: # 否则存入训练集 train_images_path.append(img_path) train_images_label.append(image_class) print("{} images were found in the dataset.".format(sum(every_class_num))) print("{} images for training.".format(len(train_images_path))) print("{} images for validation.".format(len(val_images_path))) plot_image = False if plot_image: # 绘制每种类别个数柱状图 plt.bar(range(len(flower_class)), every_class_num, align='center') # 将横坐标0,1,2,3,4替换为相应的类别名称 plt.xticks(range(len(flower_class)), flower_class) # 在柱状图上添加数值标签 for i, v in enumerate(every_class_num): plt.text(x=i, y=v + 5, s=str(v), ha='center') # 设置x坐标 plt.xlabel('image class') # 设置y坐标 plt.ylabel('number of images') # 设置柱状图的标题 plt.title('flower class distribution') plt.show() return train_images_path, train_images_label, val_images_path, val_images_label def plot_data_loader_image(data_loader): batch_size = data_loader.batch_size plot_num = min(batch_size, 4) json_path = './class_indices.json' assert os.path.exists(json_path), json_path + " does not exist." json_file = open(json_path, 'r') class_indices = json.load(json_file) for data in data_loader: images, labels = data for i in range(plot_num): # [C, H, W] -> [H, W, C] img = images[i].numpy().transpose(1, 2, 0) # 反Normalize操作 img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255 label = labels[i].item() plt.subplot(1, plot_num, i+1) plt.xlabel(class_indices[str(label)]) plt.xticks([]) # 去掉x轴的刻度 plt.yticks([]) # 去掉y轴的刻度 plt.imshow(img.astype('uint8')) plt.show() def write_pickle(list_info: list, file_name: str): with open(file_name, 'wb') as f: pickle.dump(list_info, f) def read_pickle(file_name: str) -> list: with open(file_name, 'rb') as f: info_list = pickle.load(f) return info_list class LabelSmoothingCrossEntropy(torch.nn.Module): def __init__(self, eps=0.1, reduction='mean'): super(LabelSmoothingCrossEntropy, self).__init__() self.eps = eps self.reduction = reduction def forward(self, output, target): c = output.size()[-1] log_preds = torch.nn.functional.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * torch.nn.functional.nll_loss(log_preds, target, reduction=self.reduction) def train_siamese_network_one_epoch(model1,model2,optimizer,dataloader,device,epoch,label_smothing_eps = 0.1): model1.eval() model2.efficientnet_module.load_state_dict(model1.state_dict(),strict=False) for name,para in model2.efficientnet_module.named_parameters(): if "head" not in name: para.requires_grad_(False) model2.train() if label_smothing_eps != 0: loss_function = LabelSmoothingCrossEntropy(eps=label_smothing_eps) else: loss_function=torch.nn.CrossEntropyLoss() accu_loss = torch.zeros(1).to(device) accu_num = torch.zeros(1).to(device) optimizer.zero_grad() sample_num = 0 dataloader = tqdm(dataloader) for step,data in enumerate(dataloader): images,labels = data sample_num += images.shape[0] pred = model2(images.to(device)) pred_classes = torch.max(pred,dim=1)[1] accu_num += torch.eq(pred_classes,labels.to(device)).sum() loss = loss_function(pred,labels.to(device)) loss.backward() accu_loss += loss.detach() dataloader.desc = "[train siamese_network epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch, accu_loss.item() / (step + 1), accu_num.item() / sample_num) optimizer.step() optimizer.zero_grad() model2.eval() return accu_loss.item() / (step + 1),accu_num.item() / sample_num def val_the_dataset(model2,dataloader,device): model2.eval() prob_of_classes_list = [] prediction_cls_list = [] lables_list = [] output_list=[] for step,data in enumerate(dataloader): images,labels = data with torch.no_grad(): output = torch.squeeze(model2(images.to(device))) prediction = torch.softmax(output,dim=1) prob_of_classes = torch.max(prediction,dim=1)[0] * 100 prediction_cls = torch.max(prediction,dim=1)[1] prob_of_classes_list += prob_of_classes.cpu().numpy().tolist() prediction_cls_list += prediction_cls.cpu().numpy().tolist() lables_list += labels.cpu().numpy().tolist() output_list.append([prediction_cls_list[i],prob_of_classes_list[i],lables_list[i]] for i in range(len(lables_list))) return np.mean(prob_of_classes_list),np.var(prob_of_classes_list),np.std(prob_of_classes_list),np.sum(list(map(lambda funct:funct<=80, prob_of_classes_list))),output_list class vector_loss(torch.nn.Module): def __init__(self,th=20): super(vector_loss, self).__init__() self.thresh = th # self.eps = eps # self.reduction = reduction def forward(self, output, target): if torch.equal(target[0],target[1]): return torch.nn.functional.pdist(output,p=2) else: dis = torch.nn.functional.pdist(output,p=2) if self.thresh - dis <= 0: return dis * (10 ** -10) else: return self.thresh - dis def train_one_epoch_vector_loss(model,optimizer, data_loader,data_loader_shuffle, device, epoch,label_smothing_eps = 0.1): model.train() # loss_function = torch.nn.CrossEntropyLoss() feature_map = {} def forward_hook(module, inp, outp): feature_map['feature'] = outp extract_feature_layers = list(model.children())[-1][3] extract_feature_layers.register_forward_hook(forward_hook) if label_smothing_eps != 0: loss_func = vector_loss() else: loss_func = vector_loss() accu_loss = torch.zeros(1).to(device) # 累计损失 accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数 optimizer.zero_grad() sample_num = 0 pos_sample = 0 nag_sample = 0 data_loader = tqdm(data_loader) data_loader_shuffle = tqdm(data_loader_shuffle) for step, data in enumerate(data_loader): images, labels = data # print(np.array(labels.size()) != 2) if np.array(labels.size()) != 2: break # print(images.size()) if torch.equal(labels[0],labels[1]): pos_sample+=1 else: nag_sample += 1 sample_num += images.shape[0] pred = model(images.to(device)) # pred_classes = torch.max(pred, dim=1)[1] # accu_num += torch.eq(pred_classes, labels.to(device)).sum() loss = loss_func(feature_map['feature'],labels.to(device)) # loss = loss_function(pred, labels.to(device)) loss.backward() accu_loss += loss.detach() data_loader.desc = "[train epoch {}] loss: {:.3f}, pos_sample: {:.3f}".format(epoch, accu_loss.item() / (step + 1), pos_sample/(step + 1)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() for step, data in enumerate(data_loader_shuffle): images, labels = data if np.array(labels.size()) != 2: break # print(images.size()) if torch.equal(labels[0],labels[1]): pos_sample+=1 else: nag_sample += 1 sample_num += images.shape[0] pred = model(images.to(device)) # pred_classes = torch.max(pred, dim=1)[1] # accu_num += torch.eq(pred_classes, labels.to(device)).sum() loss = loss_func(feature_map['feature'],labels.to(device)) # loss = loss_function(pred, labels.to(device)) loss.backward() accu_loss += loss.detach() data_loader_shuffle.desc = "[train shuffle epoch {}] loss: {:.3f}, pos_sample: {:.3f}".format(epoch, accu_loss.item() / (step + 1), pos_sample/(step + 1)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() return accu_loss.item() / (step + 1), accu_num.item() / sample_num def train_one_epoch(model,optimizer, data_loader, device, epoch,label_smothing_eps = 0.1): model.train() # loss_function = torch.nn.CrossEntropyLoss() if label_smothing_eps != 0: loss_func = LabelSmoothingCrossEntropy(eps=label_smothing_eps) else: loss_func = torch.nn.CrossEntropyLoss() accu_loss = torch.zeros(1).to(device) # 累计损失 accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数 optimizer.zero_grad() sample_num = 0 data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data # print(images.size()) sample_num += images.shape[0] pred = model(images.to(device)) pred_classes = torch.max(pred, dim=1)[1] accu_num += torch.eq(pred_classes, labels.to(device)).sum() loss = loss_func(pred,labels.to(device)) # loss = loss_function(pred, labels.to(device)) loss.backward() accu_loss += loss.detach() data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch, accu_loss.item() / (step + 1), accu_num.item() / sample_num) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() return accu_loss.item() / (step + 1), accu_num.item() / sample_num def compare_feature_vectors(total_feature_list,total_label_list): total_featuremap_in_classes = [] mean_vector_dis = [] for i in set(total_label_list): feature_map_in_cls = [] for j in [index for index,x in enumerate(total_label_list) if x == i]: feature_map_in_cls.append(total_feature_list[j]) total_featuremap_in_classes.append(feature_map_in_cls) for item in total_featuremap_in_classes: dis = [] for index1 in range(len(item)-1): for index2 in range(index1+1,len(item)): dis.append(np.linalg.norm(np.array(item[index1]) - np.array(item[index2]))) mean_vector_dis.append(np.mean(dis)) return mean_vector_dis @torch.no_grad() def evaluate_feature(model, data_loader, device, epoch): # loss_function = torch.nn.CrossEntropyLoss() model.eval() feature_map = {} def forward_hook(module, inp, outp): feature_map['feature'] = outp extract_feature_layers = list(model.children())[-1][3] extract_feature_layers.register_forward_hook(forward_hook) # accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数 # accu_loss = torch.zeros(1).to(device) # 累计损失 total_feature_list = [] total_label_list = [] sample_num = 0 data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data # sample_num += images.shape[0] model(images.to(device)) feature = feature_map['feature'].cpu().numpy().tolist() total_feature_list += feature total_label_list += labels.cpu().numpy().tolist() # pred_classes = torch.max(pred, dim=1)[1] # accu_num += torch.eq(pred_classes, labels.to(device)).sum() # loss = loss_function(pred, labels.to(device)) # accu_loss += los data_loader.desc = "[valid epoch {}] step: {:.3f}".format(epoch,step) # compare_feature_vectors() return compare_feature_vectors(total_feature_list,total_label_list) @torch.no_grad() def evaluate(model, data_loader, device, epoch): loss_function = torch.nn.CrossEntropyLoss() model.eval() accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数 accu_loss = torch.zeros(1).to(device) # 累计损失 sample_num = 0 data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data sample_num += images.shape[0] pred = model(images.to(device)) pred_classes = torch.max(pred, dim=1)[1] accu_num += torch.eq(pred_classes, labels.to(device)).sum() loss = loss_function(pred, labels.to(device)) accu_loss += loss data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch, accu_loss.item() / (step + 1), accu_num.item() / sample_num) return accu_loss.item() / (step + 1), accu_num.item() / sample_num def small_obj_dataset_reading(train_csv_file,val_csv_file): import csv train_csv_file_reader = csv.reader(open(train_csv_file,'r')) val_csv_file_reader = csv.reader(open(val_csv_file,'r')) train_image_path_list = [] train_image_class_list = [] val_image_path_list = [] val_image_class_list = [] train_every_class_count = {} root_path = os.path.split(train_csv_file)[0] for item in train_csv_file_reader: if train_csv_file_reader.line_num == 1: continue if len(item[0].split()) == 1: train_image_path_list.append(os.path.join(root_path,item[0])) else: # print(item[0]) train_image_path_list.append(os.path.join(root_path,item[0].split()[0])+' '+os.path.join(root_path,item[0].split()[1])) train_image_class_list.append(int(item[1])-1) for item in val_csv_file_reader: if val_csv_file_reader.line_num == 1: continue val_image_path_list.append(os.path.join(root_path,item[0])) val_image_class_list.append(int(item[1])-1) from collections import Counter intotal_class = train_image_class_list + val_image_class_list # intotal_class.append(val_image_class_list) # print(intotal_class) train_every_class_count = Counter(intotal_class) print(train_every_class_count) return train_image_path_list, train_image_class_list, val_image_path_list, val_image_class_list # def plot_figure(nums,name): # fig = plt.figure(figsize = (10,10)) #figsize是图片的大小` # ax1 = fig.add_subplot(1, 1, 1) # ax1是子图的名字` # pl.plot(nums,'g-',label=u'Dense_Unet(block layer=5)') # plt.title(name) # fig.savefig(name + '.png') # plt.close(fig) def plot_figure(nums,name): x1,y1= [],[] for i,j in enumerate(nums): x1.append(i) y1.append(j) plt.plot(x1, y1, color='r',markerfacecolor='blue',marker='.') for a, b in zip(x1,y1): plt.text(a, b, (a,b),ha='center', va='bottom', fontsize=10) plt.title(name) plt.savefig(name + '.png') plt.close()
{ "alphanum_fraction": 0.6143327068, "author": null, "avg_line_length": 40, "converted": null, "ext": "py", "file": null, "hexsha": "95bf2cc99700a54292e148ea4070c366ae1bb889", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3132950f629fcaf9fb92dca9104c3f99c2d6a9b1", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "tengyunlai/SWIN_transformer_pytorch", "max_forks_repo_path": "data/utils.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "3132950f629fcaf9fb92dca9104c3f99c2d6a9b1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "tengyunlai/SWIN_transformer_pytorch", "max_issues_repo_path": "data/utils.py", "max_line_length": 229, "max_stars_count": null, "max_stars_repo_head_hexsha": "3132950f629fcaf9fb92dca9104c3f99c2d6a9b1", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "tengyunlai/SWIN_transformer_pytorch", "max_stars_repo_path": "data/utils.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5399, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 21280 }
import argparse import asyncio import functools import io import json import re import subprocess import time from concurrent.futures import ThreadPoolExecutor from pathlib import Path from threading import Thread import janus import numpy as np import tesserocr import websockets from skimage.color import rgb2gray from skimage import filters from skimage import util from scipy import ndimage as ndi from PIL import Image WEBSOCKET_HOST = 'localhost' WEBSOCKET_PORT = 8779 TESSDATA = '/usr/share/tesseract-ocr/tessdata' def _normalize_whitespace(string): return re.sub(r'(\s)\1{1,}', r'\1', string).strip() def invert_button_colors(img): """ Find the buttons, invert their colors """ # Thanks, Andras options = util.invert(img) label, num_features = ndi.label(options) for feat in range(1, num_features + 1): inds = np.where(label == feat) if (0 in inds[0] or options.shape[0]-1 in inds[0] or 0 in inds[1] or options.shape[1]-1 in inds[1]): options[inds] = 0 return options def optimize(img): """ Convert to grayscale and apply the threshold """ img = rgb2gray(img) return img >= filters.threshold_minimum(img) def ocr(question, answers): """ Perform the OCR """ start = time.perf_counter() question = Image.fromarray((question * 255).astype(np.uint8)) answers = Image.fromarray((answers * 255).astype(np.uint8)) with ThreadPoolExecutor() as executor: a = executor.submit(tesserocr.image_to_text, question, lang='rus+eng', path=TESSDATA, psm=6) b = executor.submit(tesserocr.image_to_text, answers, lang='rus+eng', path=TESSDATA, psm=4) question, answers = a.result(), b.result() question = _normalize_whitespace(question.lower()) # The first line is noise try: _, question = question.split('\n', 1) except ValueError: pass question = re.sub(r'\bне\b', '', question, flags=re.I) question = question.translate(str.maketrans('«»\n', '"" ')) answers = _normalize_whitespace(answers.lower()) answers = answers.split('\n') print('OCR completed in', time.perf_counter() - start) print(f'Clean question: {question!r}') print('Answers:', answers) return question, answers def frame_processor(queue, done): prev_loaded = None while True: frame = queue.get() frame = np.asarray(frame) height, width, _ = frame.shape # Once the bottom part of the third button is white, we know # the answers (and the question) have finished loading if np.any(frame[int(0.54 * height), width // 4:width // 4 * 3] != 255): continue # Excludes the viewer count and the countdown timer question = optimize(frame[int(0.11 * height):int(0.32 * height)]) # Check similarity # Each question should be processed once if prev_loaded is None or np.sum(prev_loaded == question) / question.size <= 0.99: prev_loaded = question # Empty the queue for _ in range(queue.qsize()): try: queue.get_nowait() except janus.SyncQueueEmpty: break buttons = optimize(frame[int(0.32 * height):int(0.56 * height)]) answers = invert_button_colors(buttons) result = ocr(question, answers) done(result) async def ws_handler(queues, websocket, path): """ Handle WebSocket connections """ result_queue = janus.Queue() queues.append(result_queue) try: while True: question, answers = await result_queue.async_q.get() # Generate search queries queries = [question] queries += [f'{question} {a}' for a in answers] asyncio.ensure_future(websocket.send(json.dumps(queries))) finally: queues.remove(result_queue) def notify_all(queues, result): """ Send the result to all connected clients """ for x in queues: x.sync_q.put_nowait(result) def create_stream(queue): """ Start the stream, extract JPEG frames, send them to the queue """ script = Path(__file__).with_name('stream.sh') stream = subprocess.Popen(['sh', str(script)], stdout=subprocess.PIPE) content = b'' frame_count = 0 last_frame = time.perf_counter() while True: chunk = stream.stdout.read(8_192) content += chunk soi = content.find(b'\xFF\xD8') eoi = content.find(b'\xFF\xD9') if soi != -1 and eoi != -1: frame_count += 1 end = time.perf_counter() print(f'[#{frame_count:>5}]', 'Since last frame:', end - last_frame) last_frame = end img = Image.open(io.BytesIO(content[soi:eoi+2])) queue.put(img) content = content[eoi+2:] async def main(): frame_queue = janus.Queue(maxsize=100) client_queues = [] # Wait for frames in another thread on_done = functools.partial(notify_all, client_queues) Thread(target=frame_processor, args=(frame_queue.sync_q, on_done)).start() # Actually start the stream Thread(target=create_stream, args=(frame_queue.sync_q,)).start() # Start the WS server ws = functools.partial(ws_handler, client_queues) server = await websockets.serve(ws, WEBSOCKET_HOST, WEBSOCKET_PORT) # Keep it running await server.wait_closed() if __name__ == '__main__': asyncio.run(main())
{ "alphanum_fraction": 0.6319582057, "author": null, "avg_line_length": 29.8440860215, "converted": null, "ext": "py", "file": null, "hexsha": "131d84a848ca0f6032fc44895d3af45614514888", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "84226c2906045a3bb88c315ea7dea40d3a77f881", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "vaultah/smart", "max_forks_repo_path": "smart.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "84226c2906045a3bb88c315ea7dea40d3a77f881", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "vaultah/smart", "max_issues_repo_path": "smart.py", "max_line_length": 90, "max_stars_count": null, "max_stars_repo_head_hexsha": "84226c2906045a3bb88c315ea7dea40d3a77f881", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "vaultah/smart", "max_stars_repo_path": "smart.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1316, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 5551 }
# Pipeline.py # Author: Marcus D. Bloice <https://github.com/mdbloice> # Licensed under the terms of the MIT Licence. """ The Pipeline module is the user facing API for the Augmentor package. It contains the :class:`~Augmentor.Pipeline.Pipeline` class which is used to create pipeline objects, which can be used to build an augmentation pipeline by adding operations to the pipeline object. For a good overview of how to use Augmentor, along with code samples and example images, can be seen in the :ref:`mainfeatures` section. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * from .Operations import * from .ImageUtilities import scan_directory, scan, AugmentorImage import os import sys import random import uuid import warnings import numbers import numpy as np from tqdm import tqdm from PIL import Image class Pipeline(object): """ The Pipeline class handles the creation of augmentation pipelines and the generation of augmented data by applying operations to this pipeline. """ # Some class variables we use often _probability_error_text = "The probability argument must be between 0 and 1." _threshold_error_text = "The value of threshold must be between 0 and 255." _valid_formats = ["PNG", "BMP", "GIF", "JPEG"] _legal_filters = ["NEAREST", "BICUBIC", "ANTIALIAS", "BILINEAR"] def __init__(self, source_directory=None, output_directory="output", save_format="JPEG"): """ Create a new Pipeline object pointing to a directory containing your original image dataset. Create a new Pipeline object, using the :attr:`source_directory` parameter as a source directory where your original images are stored. This folder will be scanned, and any valid file files will be collected and used as the original dataset that should be augmented. The scan will find any image files with the extensions JPEG/JPG, PNG, and GIF (case insensitive). :param source_directory: A directory on your filesystem where your original images are stored. :param output_directory: Specifies where augmented images should be saved to the disk. Default is the directory **source** relative to the path where the original image set was specified. If it does not exist it will be created. :param save_format: The file format to use when saving newly created, augmented images. Default is JPEG. Legal options are BMP, PNG, and GIF. :return: A :class:`Pipeline` object. """ random.seed() # TODO: Allow a single image to be added when initialising. # Initialise some variables for the Pipeline object. self.image_counter = 0 self.augmentor_images = [] self.distinct_dimensions = set() self.distinct_formats = set() self.save_format = save_format self.operations = [] self.class_labels = [] # Now we populate some fields, which we may need to do again later if another # directory is added, so we place it all in a function of its own. if source_directory is not None: self._populate(source_directory=source_directory, output_directory=output_directory, ground_truth_directory=None, ground_truth_output_directory=output_directory) def _populate(self, source_directory, output_directory, ground_truth_directory, ground_truth_output_directory): """ Private method for populating member variables with AugmentorImage objects for each of the images found in the source directory specified by the user. It also populates a number of fields such as the :attr:`output_directory` member variable, used later when saving images to disk. This method is used by :func:`__init__`. :param source_directory: The directory to scan for images. :param output_directory: The directory to set for saving files. Defaults to a directory named output relative to :attr:`source_directory`. :param ground_truth_directory: A directory containing ground truth files for the associated images in the :attr:`source_directory` directory. :param ground_truth_output_directory: A path to a directory to store the output of the operations on the ground truth data set. :type source_directory: String :type output_directory: String :type ground_truth_directory: String :type ground_truth_output_directory: String :return: None """ # Check if the source directory for the original images to augment exists at all if not os.path.exists(source_directory): raise IOError("The source directory you specified does not exist.") # If a ground truth directory is being specified we will check here if the path exists at all. if ground_truth_directory: if not os.path.exists(ground_truth_directory): raise IOError("The ground truth source directory you specified does not exist.") # Get absolute path for output abs_output_directory = os.path.join(source_directory, output_directory) # Scan the directory that user supplied. self.augmentor_images, self.class_labels = scan(source_directory, abs_output_directory) # Make output directory/directories if len(self.class_labels) <= 1: # This may be 0 in the case of a folder generated if not os.path.exists(abs_output_directory): try: os.makedirs(abs_output_directory) except IOError: print("Insufficient rights to read or write output directory (%s)" % abs_output_directory) else: for class_label in self.class_labels: if not os.path.exists(os.path.join(abs_output_directory, str(class_label[0]))): try: os.makedirs(os.path.join(abs_output_directory, str(class_label[0]))) except IOError: print("Insufficient rights to read or write output directory (%s)" % abs_output_directory) # Check the images, read their dimensions, and remove them if they cannot be read # TODO: Do not throw an error here, just remove the image and continue. for augmentor_image in self.augmentor_images: try: with Image.open(augmentor_image.image_path) as opened_image: self.distinct_dimensions.add(opened_image.size) self.distinct_formats.add(opened_image.format) except IOError: print("There is a problem with image %s in your source directory. " "It is unreadable and will not be included when augmenting." % augmentor_image.image_path) self.augmentor_images.remove(augmentor_image) # Finally, we will print some informational messages. sys.stdout.write("Initialised with %s image(s) found.\n" % len(self.augmentor_images)) sys.stdout.write("Output directory set to %s." % abs_output_directory) #print("Initialised with %s image(s) found in selected directory." % len(self.augmentor_images)) #print("Output directory set to %s." % abs_output_directory) def _execute(self, augmentor_image, save_to_disk=True): """ Private method. Used to pass an image through the current pipeline, and return the augmented image. The returned image can then either be saved to disk or simply passed back to the user. Currently this is fixed to True, as Augmentor has only been implemented to save to disk at present. :param augmentor_image: The image to pass through the pipeline. :param save_to_disk: Whether to save the image to disk. Currently fixed to true. :type augmentor_image: :class:`ImageUtilities.AugmentorImage` :type save_to_disk: Boolean :return: The augmented image. """ self.image_counter += 1 # TODO: See if I can remove this... if augmentor_image.image_path is not None: image = Image.open(augmentor_image.image_path) else: image = augmentor_image.image_PIL for operation in self.operations: r = round(random.uniform(0, 1), 1) if r <= operation.probability: image = operation.perform_operation(image) if save_to_disk: file_name = str(uuid.uuid4()) + "." + self.save_format try: # A strange error is forcing me to do this at the moment, but will fix later properly # TODO: Fix this! if image.mode != "RGB": image = image.convert("RGB") image.save(os.path.join(augmentor_image.output_directory, file_name), self.save_format) except IOError: print("Error writing %s." % file_name) return image def sample(self, n): """ Generate :attr:`n` number of samples from the current pipeline. This function samples from the pipeline, using the original images defined during instantiation. All images generated by the pipeline are by default stored in an ``output`` directory, relative to the path defined during the pipeline's instantiation. :param n: The number of new samples to produce. :type n: Integer :return: None """ if len(self.augmentor_images) == 0: raise IndexError("There are no images in the pipeline. " "Add a directory using add_directory(), " "pointing it to a directory containing images.") if len(self.operations) == 0: raise IndexError("There are no operations associated with this pipeline.") sample_count = 1 progress_bar = tqdm(total=n, desc="Executing Pipeline", unit=' Samples', leave=False) while sample_count <= n: for augmentor_image in self.augmentor_images: if sample_count <= n: self._execute(augmentor_image) file_name_to_print = os.path.basename(augmentor_image.image_path) # This is just to shorten very long file names which obscure the progress bar. if len(file_name_to_print) >= 30: file_name_to_print = file_name_to_print[0:10] + "..." + \ file_name_to_print[-10: len(file_name_to_print)] progress_bar.set_description("Processing %s" % file_name_to_print) progress_bar.update(1) sample_count += 1 progress_bar.close() def apply_current_pipeline(self, image_path, save_to_disk=False): """ .. warning:: This function has been deprecated in favour of :func:`apply_from_path()`. Apply the current pipeline to a single image, returning the transformed image. By default, the transformed image is not saved to disk, and is returned to the user. This method can be used to pass a single image through the pipeline, but will not save the transformed image to disk by default. To save to disk, supply a :attr:`save_to_disk` argument set to True. :param image_path: The path to the image to pass through the current pipeline. :param save_to_disk: Whether to save the image to disk. Defaults to False. :type image_path: String :type save_to_disk: Boolean :return: The transformed image. """ warnings.warn("This function has been deprecated in favour of sample_with_path() and sample_with_array().", DeprecationWarning) return self.apply_from_path(image_path=image_path, save_to_disk=save_to_disk) def sample_with_path(self, image_path, save_to_disk=False): raise NotImplementedError("This method is currently not implemented.") def sample_with_array(self, image_array, save_to_disk=False): a = AugmentorImage(image_path=None, output_directory=None) a.image_PIL = Image.fromarray(image_array) return self._execute(a, save_to_disk) def sample_with_image(self, image, save_to_disk=False): raise NotImplementedError("This method is currently not implemented.") def categorical_labels(self): class_labels_np = np.array([x.class_label_int for x in self.augmentor_images]) one_hot_encoding = np.zeros((class_labels_np.size, class_labels_np.max() + 1)) one_hot_encoding[np.arange(class_labels_np.size), class_labels_np] = 1 one_hot_encoding = one_hot_encoding.astype(np.uint) return one_hot_encoding def image_generator(self): while True: im_index = random.randint(0, len(self.augmentor_images)) yield self._execute(self.augmentor_images[im_index], save_to_disk=False), \ self.augmentor_images[im_index].class_label_int def image_generator_with_replacement(self): while True: batch_indices = list(range(0, len(self.augmentor_images))) for i in range(0, len(self.augmentor_images)): im_index = random.choice(batch_indices) batch_indices.remove(im_index) yield self._execute(self.augmentor_images[im_index], save_to_disk=False), self.augmentor_images[im_index].class_label_int def keras_image_generator(self, image_format='channels_first'): """ Returns an image generator that will sample from the current pipeline indefinitely, as long as it is called. .. warning:: This function returns images from the current pipleline **without replacement**. .. seealso:: See :func:`keras_image_generator_with_replacement()` for a generator that samples with replacement. You must configure the generator to provide data in the same format that Keras is configured for. You can use the functions :func:`keras.backend.image_data_format()` and :func:`keras.backend.set_image_data_format()` to get and set Keras' image format at runtime. .. code-block:: python >>> from keras import backend as K >>> K.image_data_format() 'channels_first' >>> K.set_image_data_format('channels_last') >>> K.image_data_format() 'channels_last' By default, Augmentor uses ``'channels_first'``. :param image_format: Either ``'channels_first'`` (default) or ``'channels_last'``. :type image_format: String :return: An image generator. """ # TODO: Always return at least the original dataset as well as the augmented dataset while True: batch_indices = list(range(0, len(self.augmentor_images))) for i in range(0, len(self.augmentor_images)): im_index = random.choice(batch_indices) batch_indices.remove(im_index) im_PIL = self._execute(self.augmentor_images[im_index], save_to_disk=False) im_array = np.asarray(im_PIL) if image_format == 'channels_first': num_of_channels = len(im_PIL.getbands()) im_array = im_array.reshape(1, num_of_channels, im_PIL.width, im_PIL.height) yield im_array, self.augmentor_images[im_index].categorical_label elif image_format == 'channels_last': num_of_channels = len(im_PIL.getbands()) im_array = im_array.reshape(1, im_PIL.width, im_PIL.height, num_of_channels) yield im_array, self.augmentor_images[im_index].categorical_label def keras_image_generator_with_replacement(self, image_format='channels_first'): raise NotImplementedError("This method is currently not implemented. Use keras_image_generator().") #while True: # im_index = random.randint(0, len(self.augmentor_images)) # im_PIL = self._execute(self.augmentor_images[im_index], save_to_disk=False) # im_array = np.asarray(im_PIL) # yield im_array, self.augmentor_images[im_index].class_label_int def add_operation(self, operation): """ Add an operation directly to the pipeline. Can be used to add custom operations to a pipeline. To add custom operations to a pipeline, subclass from the Operation abstract base class, overload its methods, and insert the new object into the pipeline using this method. .. seealso:: The :class:`.Operation` class. :param operation: An object of the operation you wish to add to the pipeline. Will accept custom operations written at run-time. :type operation: Operation :return: None """ if isinstance(operation, Operation): self.operations.append(operation) else: raise TypeError("Must be of type Operation to be added to the pipeline.") def remove_operation(self, operation_index=-1): """ Remove the operation specified by :attr:`operation_index`, if supplied, otherwise it will remove the latest operation added to the pipeline. .. seealso:: Use the :func:`status` function to find an operation's index. :param operation_index: The index of the operation to remove. :type operation_index: Integer :return: The removed operation. You can reinsert this at end of the pipeline using :func:`add_operation` if required. """ # Python's own List exceptions can handle erroneous user input. self.operations.pop(operation_index) def add_further_directory(self, new_source_directory, new_output_directory="output"): """ Add a further directory containing images you wish to scan for augmentation. :param new_source_directory: The directory to scan for images. :param new_output_directory: The directory to use for outputted, augmented images. :type new_source_directory: String :type new_output_directory: String :return: None """ if not os.path.exists(new_source_directory): raise IOError("The path does not appear to exist.") self._populate(source_directory=new_source_directory, output_directory=new_output_directory, ground_truth_directory=None, ground_truth_output_directory=new_output_directory) def status(self): """ Prints the status of the pipeline to the console. If you want to remove an operation, use the index shown and the :func:`remove_operation` method. .. seealso:: The :func:`remove_operation` function. .. seealso:: The :func:`add_operation` function. The status includes the number of operations currently attached to the pipeline, each operation's parameters, the number of images in the pipeline, and a summary of the images' properties, such as their dimensions and formats. :return: None """ # TODO: Return this as a dictionary of some kind and print from the dict if in console print("Operations: %s" % len(self.operations)) if len(self.operations) != 0: operation_index = 0 for operation in self.operations: print("\t%s: %s (" % (operation_index, operation), end="") for operation_attribute, operation_value in operation.__dict__.items(): print("%s=%s " % (operation_attribute, operation_value), end="") print(")") operation_index += 1 print("Images: %s" % len(self.augmentor_images)) label_pairs = sorted(set([x.label_pair for x in self.augmentor_images])) print("Classes: %s" % len(label_pairs)) for label_pair in label_pairs: print ("\tClass index: %s Class label: %s " % (label_pair[0], label_pair[1])) if len(self.augmentor_images) != 0: print("Dimensions: %s" % len(self.distinct_dimensions)) for distinct_dimension in self.distinct_dimensions: print("\tWidth: %s Height: %s" % (distinct_dimension[0], distinct_dimension[1])) print("Formats: %s" % len(self.distinct_formats)) for distinct_format in self.distinct_formats: print("\t %s" % distinct_format) print("\nYou can remove operations using the appropriate index and the remove_operation(index) function.") @staticmethod def set_seed(seed): """ Set the seed of Python's internal random number generator. :param seed: The seed to use. Strings or other objects will be hashed. :type seed: Integer :return: None """ random.seed(seed) # TODO: Add this feature ASAP def subtract_mean(self, probability=1): # For implementation example, see bottom of: # https://patrykchrabaszcz.github.io/Imagenet32/ self.add_operation(Mean(probability=probability)) def rotate90(self, probability): """ Rotate an image by 90 degrees. The operation will rotate an image by 90 degrees, and will be performed with a probability of that specified by the :attr:`probability` parameter. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Rotate(probability=probability, rotation=90)) def rotate180(self, probability): """ Rotate an image by 180 degrees. The operation will rotate an image by 180 degrees, and will be performed with a probability of that specified by the :attr:`probability` parameter. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Rotate(probability=probability, rotation=180)) def rotate270(self, probability): """ Rotate an image by 270 degrees. The operation will rotate an image by 270 degrees, and will be performed with a probability of that specified by the :attr:`probability` parameter. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Rotate(probability=probability, rotation=270)) def rotate_random_90(self, probability): """ Rotate an image by either 90, 180, or 270 degrees, selected randomly. This function will rotate by either 90, 180, or 270 degrees. This is useful to avoid scenarios where images may be rotated back to their original positions (such as a :func:`rotate90` and a :func:`rotate270` being performed directly afterwards. The random rotation is chosen uniformly from 90, 180, or 270 degrees. The probability controls the chance of the operation being performed at all, and does not affect the rotation degree. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Rotate(probability=probability, rotation=-1)) def rotate_alt(self, probability, rotation): pass def rotate(self, probability, max_left_rotation, max_right_rotation): """ Rotate an image by an arbitrary amount. The operation will rotate an image by an random amount, within a range specified. The parameters :attr:`max_left_rotation` and :attr:`max_right_rotation` allow you to control this range. If you wish to rotate the images by an exact number of degrees, set both :attr:`max_left_rotation` and :attr:`max_right_rotation` to the same value. .. note:: This function will rotate **in place**, and crop the largest possible rectangle from the rotated image. In practice, angles larger than 25 degrees result in images that do not render correctly, therefore there is a limit of 25 degrees for this function. If this function returns images that are not rendered correctly, then you must reduce the :attr:`max_left_rotation` and :attr:`max_right_rotation` arguments! :param max_left_rotation: The maximum number of degrees the image can be rotated to the left. :param max_right_rotation: The maximum number of degrees the image can be rotated to the right. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type max_left_rotation: Integer :type max_right_rotation: Integer :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) if not 0 <= max_left_rotation <= 25: raise ValueError("The max_left_rotation argument must be between 0 and 25.") if not 0 <= max_right_rotation <= 25: raise ValueError("The max_right_rotation argument must be between 0 and 25.") else: self.add_operation(RotateRange(probability=probability, max_left_rotation=ceil(max_left_rotation), max_right_rotation=ceil(max_right_rotation))) def flip_top_bottom(self, probability): """ Flip (mirror) the image along its vertical axis, i.e. from top to bottom. .. seealso:: The :func:`flip_left_right` function. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Flip(probability=probability, top_bottom_left_right="TOP_BOTTOM")) def flip_left_right(self, probability): """ Flip (mirror) the image along its horizontal axis, i.e. from left to right. .. seealso:: The :func:`flip_top_bottom` function. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Flip(probability=probability, top_bottom_left_right="LEFT_RIGHT")) def flip_random(self, probability): """ Flip (mirror) the image along **either** its horizontal or vertical axis. This function mirrors the image along either the horizontal axis or the vertical access. The axis is selected randomly. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Flip(probability=probability, top_bottom_left_right="RANDOM")) def random_distortion(self, probability, grid_width, grid_height, magnitude): """ Performs a random, elastic distortion on an image. This function performs a randomised, elastic distortion controlled by the parameters specified. The grid width and height controls how fine the distortions are. Smaller sizes will result in larger, more pronounced, and less granular distortions. Larger numbers will result in finer, more granular distortions. The magnitude of the distortions can be controlled using magnitude. This can be random or fixed. *Good* values for parameters are between 2 and 10 for the grid width and height, with a magnitude of between 1 and 10. Using values outside of these approximate ranges may result in unpredictable behaviour. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param grid_width: The number of rectangles in the grid's horizontal axis. :param grid_height: The number of rectangles in the grid's vertical axis. :param magnitude: The magnitude of the distortions. :type probability: Float :type grid_width: Integer :type grid_height: Integer :type magnitude: Integer :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Distort(probability=probability, grid_width=grid_width, grid_height=grid_height, magnitude=magnitude)) def gaussian_distortion(self, probability, grid_width, grid_height, magnitude, corner, method, mex=0.5, mey=0.5, sdx=0.05, sdy=0.05): """ Performs a random, elastic gaussian distortion on an image. This function performs a randomised, elastic gaussian distortion controlled by the parameters specified. The grid width and height controls how fine the distortions are. Smaller sizes will result in larger, more pronounced, and less granular distortions. Larger numbers will result in finer, more granular distortions. The magnitude of the distortions can be controlled using magnitude. This can be random or fixed. *Good* values for parameters are between 2 and 10 for the grid width and height, with a magnitude of between 1 and 10. Using values outside of these approximate ranges may result in unpredictable behaviour. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param grid_width: The number of rectangles in the grid's horizontal axis. :param grid_height: The number of rectangles in the grid's vertical axis. :param magnitude: The magnitude of the distortions. :param corner: which corner of picture to distort. Possible values: "bell"(circular surface applied), "ul"(upper left), "ur"(upper right), "dl"(down left), "dr"(down right). :param method: possible values: "in"(apply max magnitude to the chosen corner), "out"(inverse of method in). :param mex: used to generate 3d surface for similar distortions. Surface is based on normal distribution. :param mey: used to generate 3d surface for similar distortions. Surface is based on normal distribution. :param sdx: used to generate 3d surface for similar distortions. Surface is based on normal distribution. :param sdy: used to generate 3d surface for similar distortions. Surface is based on normal distribution. :type probability: Float :type grid_width: Integer :type grid_height: Integer :type magnitude: Integer :type corner: String :type method: String :type mex: Float :type mey: Float :type sdx: Float :type sdy: Float :return: None For values :attr:`mex`, :attr:`mey`, :attr:`sdx`, and :attr:`sdy` the surface is based on the normal distribution: .. math:: e^{- \Big( \\frac{(x-\\text{mex})^2}{\\text{sdx}} + \\frac{(y-\\text{mey})^2}{\\text{sdy}} \Big) } """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(GaussianDistortion(probability=probability, grid_width=grid_width, grid_height=grid_height, magnitude=magnitude, corner=corner, method=method, mex=mex, mey=mey, sdx=sdx, sdy=sdy)) def zoom(self, probability, min_factor, max_factor): """ Zoom in to an image, while **maintaining its size**. The amount by which the image is zoomed is a randomly chosen value between :attr:`min_factor` and :attr:`max_factor`. Typical values may be ``min_factor=1.1`` and ``max_factor=1.5``. To zoom by a constant amount, set :attr:`min_factor` and :attr:`max_factor` to the same value. .. seealso:: See :func:`zoom_random` for zooming into random areas of the image. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param min_factor: The minimum factor by which to zoom the image. :param max_factor: The maximum factor by which to zoom the image. :type probability: Float :type min_factor: Float :type max_factor: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif min_factor < 1: raise ValueError("The min_factor argument must be greater than 1.") else: self.add_operation(Zoom(probability=probability, min_factor=min_factor, max_factor=max_factor)) def zoom_random(self, probability, percentage_area, randomise_percentage_area=False): """ Zooms into an image at a random location within the image. You can randomise the zoom level by setting the :attr:`randomise_percentage_area` argument to true. .. seealso:: See :func:`zoom` for zooming into the centre of images. :param probability: The probability that the function will execute when the image is passed through the pipeline. :param percentage_area: The area, as a percentage of the current image's area, to crop. :param randomise_percentage_area: If True, will use :attr:`percentage_area` as an upper bound and randomise the crop from between 0 and :attr:`percentage_area`. :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0.1 <= percentage_area < 1: raise ValueError("The percentage_area argument must be greater than 0.1 and less than 1.") elif not isinstance(randomise_percentage_area, bool): raise ValueError("The randomise_percentage_area argument must be True or False.") else: self.add_operation(ZoomRandom(probability=probability, percentage_area=percentage_area, randomise=randomise_percentage_area)) def crop_by_size(self, probability, width, height, centre=True): """ Crop an image according to a set of dimensions. Crop each image according to :attr:`width` and :attr:`height`, by default at the centre of each image, otherwise at a random location within the image. .. seealso:: See :func:`crop_random` to crop a random, non-centred area of the image. If the crop area exceeds the size of the image, this function will crop the entire area of the image. :param probability: The probability that the function will execute when the image is passed through the pipeline. :param width: The width of the desired crop. :param height: The height of the desired crop. :param centre: If **True**, crops from the centre of the image, otherwise crops at a random location within the image, maintaining the dimensions specified. :type probability: Float :type width: Integer :type height: Integer :type centre: Boolean :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif width <= 1: raise ValueError("The width argument must be greater than 1.") elif height <= 1: raise ValueError("The height argument must be greater than 1.") elif not isinstance(centre, bool): raise ValueError("The centre argument must be True or False.") else: self.add_operation(Crop(probability=probability, width=width, height=height, centre=centre)) def crop_centre(self, probability, percentage_area, randomise_percentage_area=False): """ Crops the centre of an image as a percentage of the image's area. :param probability: The probability that the function will execute when the image is passed through the pipeline. :param percentage_area: The area, as a percentage of the current image's area, to crop. :param randomise_percentage_area: If True, will use :attr:`percentage_area` as an upper bound and randomise the crop from between 0 and :attr:`percentage_area`. :type probability: Float :type percentage_area: Float :type randomise_percentage_area: Boolean :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0.1 <= percentage_area < 1: raise ValueError("The percentage_area argument must be greater than 0.1 and less than 1.") elif not isinstance(randomise_percentage_area, bool): raise ValueError("The randomise_percentage_area argument must be True or False.") else: self.add_operation(CropPercentage(probability=probability, percentage_area=percentage_area, centre=True, randomise_percentage_area=randomise_percentage_area)) def crop_random(self, probability, percentage_area, randomise_percentage_area=False): """ Crop a random area of an image, based on the percentage area to be returned. This function crops a random area from an image, based on the area you specify using :attr:`percentage_area`. :param probability: The probability that the function will execute when the image is passed through the pipeline. :param percentage_area: The area, as a percentage of the current image's area, to crop. :param randomise_percentage_area: If True, will use :attr:`percentage_area` as an upper bound and randomise the crop from between 0 and :attr:`percentage_area`. :type probability: Float :type percentage_area: Float :type randomise_percentage_area: Boolean :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0.1 <= percentage_area < 1: raise ValueError("The percentage_area argument must be greater than 0.1 and less than 1.") elif not isinstance(randomise_percentage_area, bool): raise ValueError("The randomise_percentage_area argument must be True or False.") else: self.add_operation(CropPercentage(probability=probability, percentage_area=percentage_area, centre=False, randomise_percentage_area=randomise_percentage_area)) def histogram_equalisation(self, probability=1.0): """ Apply histogram equalisation to the image. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. For histogram, equalisation it is recommended that the probability be set to 1. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(HistogramEqualisation(probability=probability)) def scale(self, probability, scale_factor): """ Scale (enlarge) an image, while maintaining its aspect ratio. This returns an image with larger dimensions than the original image. Use :func:`resize` to resize an image to absolute pixel values. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param scale_factor: The factor to scale by, which must be greater than 1.0. :type probability: Float :type scale_factor: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif scale_factor <= 1.0: raise ValueError("The scale_factor argument must be greater than 1.") else: self.add_operation(Scale(probability=probability, scale_factor=scale_factor)) def resize(self, probability, width, height, resample_filter="BICUBIC"): """ Resize an image according to a set of dimensions specified by the user in pixels. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. For resizing, it is recommended that the probability be set to 1. :param width: The new width that the image should be resized to. :param height: The new height that the image should be resized to. :param resample_filter: The resampling filter to use. Must be one of BICUBIC, BILINEAR, ANTIALIAS, or NEAREST. :type probability: Float :type width: Integer :type height: Integer :type resample_filter: String :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not width > 1: raise ValueError("Width must be greater than 1.") elif not height > 1: raise ValueError("Height must be greater than 1.") elif resample_filter not in Pipeline._legal_filters: raise ValueError("The save_filter argument must be one of %s." % Pipeline._legal_filters) else: self.add_operation(Resize(probability=probability, width=width, height=height, resample_filter=resample_filter)) def skew_left_right(self, probability, magnitude=1): """ Skew an image by tilting it left or right by a random amount. The magnitude of this skew can be set to a maximum using the magnitude parameter. This can be either a scalar representing the maximum tilt, or vector representing a range. To see examples of the various skews, see :ref:`perspectiveskewing`. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param magnitude: The maximum tilt, which must be value between 0.1 and 1.0, where 1 represents a tilt of 45 degrees. :type probability: Float :type magnitude: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < magnitude <= 1: raise ValueError("The magnitude argument must be greater than 0 and less than or equal to 1.") else: self.add_operation(Skew(probability=probability, skew_type="TILT_LEFT_RIGHT", magnitude=magnitude)) def skew_top_bottom(self, probability, magnitude=1): """ Skew an image by tilting it forwards or backwards by a random amount. The magnitude of this skew can be set to a maximum using the magnitude parameter. This can be either a scalar representing the maximum tilt, or vector representing a range. To see examples of the various skews, see :ref:`perspectiveskewing`. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param magnitude: The maximum tilt, which must be value between 0.1 and 1.0, where 1 represents a tilt of 45 degrees. :type probability: Float :type magnitude: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < magnitude <= 1: raise ValueError("The magnitude argument must be greater than 0 and less than or equal to 1.") else: self.add_operation(Skew(probability=probability, skew_type="TILT_TOP_BOTTOM", magnitude=magnitude)) def skew_tilt(self, probability, magnitude=1): """ Skew an image by tilting in a random direction, either forwards, backwards, left, or right, by a random amount. The magnitude of this skew can be set to a maximum using the magnitude parameter. This can be either a scalar representing the maximum tilt, or vector representing a range. To see examples of the various skews, see :ref:`perspectiveskewing`. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param magnitude: The maximum tilt, which must be value between 0.1 and 1.0, where 1 represents a tilt of 45 degrees. :type probability: Float :type magnitude: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < magnitude <= 1: raise ValueError("The magnitude argument must be greater than 0 and less than or equal to 1.") else: self.add_operation(Skew(probability=probability, skew_type="TILT", magnitude=magnitude)) def skew_corner(self, probability, magnitude=1): """ Skew an image towards one corner, randomly by a random magnitude. To see examples of the various skews, see :ref:`perspectiveskewing`. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param magnitude: The maximum skew, which must be value between 0.1 and 1.0. :return: """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < magnitude <= 1: raise ValueError("The magnitude argument must be greater than 0 and less than or equal to 1.") else: self.add_operation(Skew(probability=probability, skew_type="CORNER", magnitude=magnitude)) def skew(self, probability, magnitude=1): """ Skew an image in a random direction, either left to right, top to bottom, or one of 8 corner directions. To see examples of all the skew types, see :ref:`perspectiveskewing`. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param magnitude: The maximum skew, which must be value between 0.1 and 1.0. :type probability: Float :type magnitude: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < magnitude <= 1: raise ValueError("The magnitude argument must be greater than 0 and less than or equal to 1.") else: self.add_operation(Skew(probability=probability, skew_type="RANDOM", magnitude=magnitude)) def shear(self, probability, max_shear_left, max_shear_right): """ Shear the image by a specified number of degrees. In practice, shear angles of more than 25 degrees can cause unpredictable behaviour. If you are observing images that are incorrectly rendered (e.g. they do not contain any information) then reduce the shear angles. :param probability: The probability that the operation is performed. :param max_shear_left: The max number of degrees to shear to the left. Cannot be larger than 25 degrees. :param max_shear_right: The max number of degrees to shear to the right. Cannot be larger than 25 degrees. :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 < max_shear_left <= 25: raise ValueError("The max_shear_left argument must be between 0 and 25.") elif not 0 < max_shear_right <= 25: raise ValueError("The max_shear_right argument must be between 0 and 25.") else: self.add_operation(Shear(probability=probability, max_shear_left=max_shear_left, max_shear_right=max_shear_right)) def greyscale(self, probability): """ Convert images to greyscale. For this operation, setting the :attr:`probability` to 1.0 is recommended. .. seealso:: The :func:`black_and_white` function. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. For resizing, it is recommended that the probability be set to 1. :type probability: Float :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Greyscale(probability=probability)) def black_and_white(self, probability, threshold=128): """ Convert images to black and white. In other words convert the image to use a 1-bit, binary palette. The threshold defaults to 128, but can be controlled using the :attr:`threshold` parameter. .. seealso:: The :func:`greyscale` function. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. For resizing, it is recommended that the probability be set to 1. :param threshold: A value between 0 and 255 which controls the threshold point at which each pixel is converted to either black or white. Any values above this threshold are converted to white, and any values below this threshold are converted to black. :type probability: Float :type threshold: Integer :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) elif not 0 <= threshold <= 255: raise ValueError("The threshold must be between 0 and 255.") else: self.add_operation(BlackAndWhite(probability=probability, threshold=threshold)) def invert(self, probability): """ Invert an image. For this operation, setting the :attr:`probability` to 1.0 is recommended. .. warning:: This function will cause errors if used on binary, 1-bit palette images (e.g. black and white). :param probability: A value between 0 and 1 representing the probability that the operation should be performed. For resizing, it is recommended that the probability be set to 1. :return: None """ if not 0 < probability <= 1: raise ValueError(Pipeline._probability_error_text) else: self.add_operation(Invert(probability=probability))
{ "alphanum_fraction": 0.6418237941, "author": null, "avg_line_length": 44.9273182957, "converted": null, "ext": "py", "file": null, "hexsha": "76ff447ed1660b7fdca3e6b2265273554a44afd4", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-07-11T08:34:44.000Z", "max_forks_repo_forks_event_min_datetime": "2019-07-11T08:34:44.000Z", "max_forks_repo_head_hexsha": "c83d6620eaf522d636e5a55c3cce4ca6df5e1f14", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "xy0806/Augmentor", "max_forks_repo_path": "Augmentor/Pipeline.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "c83d6620eaf522d636e5a55c3cce4ca6df5e1f14", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "xy0806/Augmentor", "max_issues_repo_path": "Augmentor/Pipeline.py", "max_line_length": 137, "max_stars_count": null, "max_stars_repo_head_hexsha": "c83d6620eaf522d636e5a55c3cce4ca6df5e1f14", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "xy0806/Augmentor", "max_stars_repo_path": "Augmentor/Pipeline.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11137, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 53778 }
import numpy as np import cv2 import matplotlib.pyplot as plt from scipy.fftpack import fft2, ifft2, fftshift, ifftshift from scipy.stats import multivariate_normal from scipy.ndimage import rotate from .image_io import crop_patch from .utils import pre_process, rotateImage, plot ######################################################################################################################## # RGB Mosse ######################################################################################################################## class MultiMosseTracker(): def __init__(self, learning_rate = 0.125, epsilon=1e-3, search_size = 1.0, sigma = 2.0, save_img=False, name='rgb', save_frame = 10): self.epsilon = epsilon self.learning_rate = learning_rate self.sigma = sigma/search_size self.search_size = search_size self.save_img = save_img self.name = name self.save_frame = save_frame def get_patch(self, features): region = self.search_region return [crop_patch(c, region) for c in features] def preprocess_data(self, features): return np.asarray([pre_process(c) for c in features]) def start(self, image, region): self.frame = 0 features = [image[...,i] for i in range(image.shape[-1])] assert len(features) == 3, print(len(features)) # Image is the first frame # Region is the bounding box around target in first frame self.region = region self.region_shape = (region.height, region.width) self.region_center = (region.height // 2, region.width // 2) self.search_region = region.rescale(self.search_size,round_coordinates=True) self.search_region_shape = (self.search_region.height, self.search_region.width) self.search_region_center = (self.search_region.height // 2, self.search_region.width // 2) # Extract patches from image f = self.get_patch(features) f = self.preprocess_data(f) F = fft2(f) # Create desired response Sigma = np.eye(2) * self.sigma ** 2 mu = [self.search_region_center[0], self.search_region_center[1]] x, y = np.mgrid[0:self.search_region.height:1, 0:self.search_region.width:1] pos = np.dstack((x, y)) r = multivariate_normal(mu, Sigma) g = r.pdf(pos) self.G = np.expand_dims(fft2(g), axis=0) # using same desired response for all channels, P is organized (channels, height, width) A = self.G * np.conj(F) B = F * np.conj(F) image_center = (self.region.xpos + self.region_center[1], self.region.ypos + self.region_center[0]) for angle in np.arange(-20,20,5): img_tmp = rotateImage(image, angle, image_center) # Rotate features = [img_tmp[...,i] for i in range(img_tmp.shape[-1])] f = self.get_patch(features) f = self.preprocess_data(f) F = fft2(f) A += self.G * np.conj(F) B += F * np.conj(F) self.A = A self.B = B self.H_conj = self.A / (self.B + self.epsilon) if self.save_img and self.frame % self.save_frame == 0: plot(image, g, self.search_region, "{0}_{1}".format(self.name, self.frame)) def detect(self, image): self.frame += 1 features = [image[...,i] for i in range(image.shape[-1])] assert len(features) == 3, print(len(features)) f = self.get_patch(features) f = self.preprocess_data(f) F = fft2(f) R = F * self.H_conj responses = ifft2(R) response = responses.sum(axis=0) # .real r, c = np.unravel_index(np.argmax(response), response.shape) if self.save_img and self.frame % self.save_frame == 0: plot(image, response, self.search_region, "{0}_{1}".format(self.name, self.frame)) # Keep for visualisation self.last_response = response r_offset = r - self.region_center[0] c_offset = c - self.region_center[1] self.region.xpos += c_offset self.region.ypos += r_offset self.search_region.xpos += c_offset self.search_region.ypos += r_offset return self.region def update(self, image): features = [image[...,i] for i in range(image.shape[-1])] assert len(features) == 3, print(len(features)) f = self.get_patch(features) f = self.preprocess_data(f) F = fft2(f) self.A = self.learning_rate * self.G * np.conj(F) + (1-self.learning_rate) * self.A self.B = self.learning_rate * F * np.conj(F) + (1-self.learning_rate) * self.B self.H_conj = self.A / (self.B + self.epsilon)
{ "alphanum_fraction": 0.5663734915, "author": null, "avg_line_length": 39.4274193548, "converted": null, "ext": "py", "file": null, "hexsha": "4c3479b2d76a8a8df041a730ca2f64e730bb73b9", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "68263ec3f67e91e438fdde837dce4d7f038635c8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ngunnar/VOT", "max_forks_repo_path": "cvl/rgb_mosse.py", "max_issues_count": 3, "max_issues_repo_head_hexsha": "68263ec3f67e91e438fdde837dce4d7f038635c8", "max_issues_repo_issues_event_max_datetime": "2020-12-17T23:05:20.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-17T07:45:03.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ngunnar/VOT", "max_issues_repo_path": "cvl/rgb_mosse.py", "max_line_length": 138, "max_stars_count": null, "max_stars_repo_head_hexsha": "68263ec3f67e91e438fdde837dce4d7f038635c8", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ngunnar/VOT", "max_stars_repo_path": "cvl/rgb_mosse.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1145, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 4889 }
// example/stopwatch_example.cpp ---------------------------------------------------// // Copyright Beman Dawes 2006, 2008 // Copyright 2009-2011 Vicente J. Botet Escriba // Distributed under the Boost Software License, Version 1.0. // See http://www.boost.org/LICENSE_1_0.txt // See http://www.boost.org/libs/chrono/stopwatches for documentation. //#include <iostream> #include <boost/chrono/stopwatches/strict_stopwatch.hpp> #include <boost/chrono/chrono_io.hpp> #include <boost/chrono/process_cpu_clocks.hpp> #include <cmath> using namespace boost::chrono; #ifdef BOOST_CHRONO_HAS_PROCESS_CLOCKS typedef process_cpu_clock clock_type; #else typedef high_resolution_clock clock_type; #endif namespace ex { template<class Rep, class Period> void sleep_for(const duration<Rep, Period>& d) { typedef high_resolution_clock Clock; typename Clock::time_point go = Clock::now() + d; while (Clock::now() < go) { } } } int f1(long j) { strict_stopwatch<clock_type> sw; for ( long i = 0; i < j; ++i ) std::sqrt( 123.456L ); // burn some time ex::sleep_for(milliseconds(100)); std::cout << "f1("<< j <<") Elapsed time: " << sw.elapsed() << std::endl; return 0; } int main() { strict_stopwatch<clock_type> sw; f1(1000); f1(2000); f1(3000); #ifdef BOOST_CHRONO_HAS_PROCESS_CLOCKS2 std::cout << "main() Elapsed time: " << duration_cast<duration<process_times<double>,boost::ratio<1> > >(sw.elapsed()) << std::endl; std::cout << "main() Elapsed time: " << duration_cast<duration<process_times<nanoseconds::rep>,boost::milli> >(sw.elapsed()) << std::endl; #endif std::cout << "main() Elapsed time: " << sw.elapsed() << std::endl; return 0; }
{ "alphanum_fraction": 0.6587622903, "author": null, "avg_line_length": 28.3442622951, "converted": null, "ext": "cpp", "file": null, "hexsha": "2c775332a51e4c99530331b11e7e951cb405c6ba", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "32dcafec6fc63bf826e2310822496472ed9d4467", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "danieljames/chrono", "max_forks_repo_path": "stopwatches/example/stopwatch_example.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "32dcafec6fc63bf826e2310822496472ed9d4467", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "danieljames/chrono", "max_issues_repo_path": "stopwatches/example/stopwatch_example.cpp", "max_line_length": 140, "max_stars_count": null, "max_stars_repo_head_hexsha": "32dcafec6fc63bf826e2310822496472ed9d4467", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "danieljames/chrono", "max_stars_repo_path": "stopwatches/example/stopwatch_example.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 488, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1729 }
[STATEMENT] lemma antisymPI: "(\<And>x y. \<lbrakk> r x y; r y x \<rbrakk> \<Longrightarrow> x = y) \<Longrightarrow> antisymp r" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>x y. \<lbrakk>r x y; r y x\<rbrakk> \<Longrightarrow> x = y) \<Longrightarrow> antisymp r [PROOF STEP] by (fact antisympI)
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "JinjaThreads_MM_Orders", "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 132, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
[STATEMENT] lemma assign_eval\<^sub>w_const\<^sub>C: shows "(\<langle>x \<leftarrow> Const c, mds, mem\<rangle>, \<langle>Stop, mds, mem (x := c)\<rangle>) \<in> C.eval\<^sub>w" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] using C.unannotated[OF C.assign, where E="[]", simplified] [PROOF STATE] proof (prove) using this: eval_abv\<^sub>C \<langle>?x1 \<leftarrow> ?e1, ?mds, ?mem\<rangle>\<^sub>C \<langle>Stop, ?mds, ?mem(?x1 := ev\<^sub>A\<^sub>C ?mem ?e1)\<rangle>\<^sub>C goal (1 subgoal): 1. eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] apply(drule_tac x=x in meta_spec) [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>e mds mem. eval_abv\<^sub>C \<langle>x \<leftarrow> e, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := ev\<^sub>A\<^sub>C mem e)\<rangle>\<^sub>C) \<Longrightarrow> eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] apply(drule_tac x="Const c" in meta_spec) [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>mds mem. eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := ev\<^sub>A\<^sub>C mem (aexp\<^sub>C.Const c))\<rangle>\<^sub>C) \<Longrightarrow> eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] apply(drule_tac x=mds in meta_spec) [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>mem. eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := ev\<^sub>A\<^sub>C mem (aexp\<^sub>C.Const c))\<rangle>\<^sub>C) \<Longrightarrow> eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] apply(drule_tac x=mem in meta_spec) [PROOF STATE] proof (prove) goal (1 subgoal): 1. eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := ev\<^sub>A\<^sub>C mem (aexp\<^sub>C.Const c))\<rangle>\<^sub>C \<Longrightarrow> eval_abv\<^sub>C \<langle>x \<leftarrow> aexp\<^sub>C.Const c, mds, mem\<rangle>\<^sub>C \<langle>Stop, mds, mem(x := c)\<rangle>\<^sub>C [PROOF STEP] apply clarsimp [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Dependent_SIFUM_Refinement_Examples_Eg1Eg2", "hexsha": null, "include": null, "lang": null, "length": 7, "llama_tokens": 1113, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import numpy as np from numba import cuda, int32, float32 from numba.cuda.testing import unittest, CUDATestCase from numba.core.config import ENABLE_CUDASIM def useless_sync(ary): i = cuda.grid(1) cuda.syncthreads() ary[i] = i def simple_smem(ary): N = 100 sm = cuda.shared.array(N, int32) i = cuda.grid(1) if i == 0: for j in range(N): sm[j] = j cuda.syncthreads() ary[i] = sm[i] def coop_smem2d(ary): i, j = cuda.grid(2) sm = cuda.shared.array((10, 20), float32) sm[i, j] = (i + 1) / (j + 1) cuda.syncthreads() ary[i, j] = sm[i, j] def dyn_shared_memory(ary): i = cuda.grid(1) sm = cuda.shared.array(0, float32) sm[i] = i * 2 cuda.syncthreads() ary[i] = sm[i] def use_threadfence(ary): ary[0] += 123 cuda.threadfence() ary[0] += 321 def use_threadfence_block(ary): ary[0] += 123 cuda.threadfence_block() ary[0] += 321 def use_threadfence_system(ary): ary[0] += 123 cuda.threadfence_system() ary[0] += 321 def use_syncthreads_count(ary_in, ary_out): i = cuda.grid(1) ary_out[i] = cuda.syncthreads_count(ary_in[i]) def use_syncthreads_and(ary_in, ary_out): i = cuda.grid(1) ary_out[i] = cuda.syncthreads_and(ary_in[i]) def use_syncthreads_or(ary_in, ary_out): i = cuda.grid(1) ary_out[i] = cuda.syncthreads_or(ary_in[i]) class TestCudaSync(CUDATestCase): def test_useless_sync(self): compiled = cuda.jit("void(int32[::1])")(useless_sync) nelem = 10 ary = np.empty(nelem, dtype=np.int32) exp = np.arange(nelem, dtype=np.int32) compiled[1, nelem](ary) self.assertTrue(np.all(ary == exp)) def test_simple_smem(self): compiled = cuda.jit("void(int32[::1])")(simple_smem) nelem = 100 ary = np.empty(nelem, dtype=np.int32) compiled[1, nelem](ary) self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32))) def test_coop_smem2d(self): compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d) shape = 10, 20 ary = np.empty(shape, dtype=np.float32) compiled[1, shape](ary) exp = np.empty_like(ary) for i in range(ary.shape[0]): for j in range(ary.shape[1]): exp[i, j] = (i + 1) / (j + 1) self.assertTrue(np.allclose(ary, exp)) def test_dyn_shared_memory(self): compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory) shape = 50 ary = np.empty(shape, dtype=np.float32) compiled[1, shape, 0, ary.size * 4](ary) self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32))) def test_threadfence_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.gl;", compiled.ptx) def test_threadfence_block_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence_block) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.cta;", compiled.ptx) def test_threadfence_system_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence_system) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.sys;", compiled.ptx) def test_syncthreads_count(self): compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_count) ary_in = np.ones(72, dtype=np.int32) ary_out = np.zeros(72, dtype=np.int32) ary_in[31] = 0 ary_in[42] = 0 compiled[1, 72](ary_in, ary_out) self.assertTrue(np.all(ary_out == 70)) def test_syncthreads_and(self): compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_and) nelem = 100 ary_in = np.ones(nelem, dtype=np.int32) ary_out = np.zeros(nelem, dtype=np.int32) compiled[1, nelem](ary_in, ary_out) self.assertTrue(np.all(ary_out == 1)) ary_in[31] = 0 compiled[1, nelem](ary_in, ary_out) self.assertTrue(np.all(ary_out == 0)) def test_syncthreads_or(self): compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_or) nelem = 100 ary_in = np.zeros(nelem, dtype=np.int32) ary_out = np.zeros(nelem, dtype=np.int32) compiled[1, nelem](ary_in, ary_out) self.assertTrue(np.all(ary_out == 0)) ary_in[31] = 1 compiled[1, nelem](ary_in, ary_out) self.assertTrue(np.all(ary_out == 1)) if __name__ == '__main__': unittest.main()
{ "alphanum_fraction": 0.6040400078, "author": null, "avg_line_length": 30.1715976331, "converted": null, "ext": "py", "file": null, "hexsha": "163976e072605bd1631cd1ba862a5d6647e8c843", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 11, "max_forks_repo_forks_event_max_datetime": "2022-02-05T16:48:35.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-12T16:18:07.000Z", "max_forks_repo_head_hexsha": "3b9647d17d653abac15363da604eeb804dbdd15a", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "blair1306/numba", "max_forks_repo_path": "numba/cuda/tests/cudapy/test_sync.py", "max_issues_count": 11, "max_issues_repo_head_hexsha": "3b9647d17d653abac15363da604eeb804dbdd15a", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:50:14.000Z", "max_issues_repo_issues_event_min_datetime": "2020-08-09T02:30:14.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "blair1306/numba", "max_issues_repo_path": "numba/cuda/tests/cudapy/test_sync.py", "max_line_length": 79, "max_stars_count": 76, "max_stars_repo_head_hexsha": "3b9647d17d653abac15363da604eeb804dbdd15a", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "blair1306/numba", "max_stars_repo_path": "numba/cuda/tests/cudapy/test_sync.py", "max_stars_repo_stars_event_max_datetime": "2022-02-14T15:30:21.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-06T14:44:05.000Z", "num_tokens": 1519, "path": null, "reason": "import numpy,from numba", "repo": null, "save_path": null, "sha": null, "size": 5099 }
from flearn.servers.server import Server import numpy as np class qFFL(Server): def __init__(self, q, L, train_data, ids, Learner, initial_params, learning_rate): self.L = L self.q = q super(qFFL, self).__init__(train_data, ids, Learner, initial_params, learning_rate) def train(self, epoch, batch_size, select_rate = 1): self.send_model() self.select_clients = self.select_client(select_rate) self.start_losses = [] losses = [] for client in self.select_clients: start_loss = client.model.solve_loss(client.client_data) self.start_losses.append(start_loss) _, client_loss = client.train(epoch, batch_size) losses.append(client_loss) # print('Client: {}, Local_loss: {:f}'.format(client.id, client_loss)) self.aggregate() return np.sum(losses) def aggregate(self): total_params = [np.zeros(len(param)) for param in self.model.print_params()] delta_ = [np.zeros(len(param)) for param in self.model.print_params()] start_params = [param for param in self.model.print_params()] h_ = 0 for k, c in enumerate(self.select_clients): loss = self.start_losses[k] client_params = c.model.print_params() for i in range(len(total_params)): delta_[i] += np.power(loss, self.q) * (start_params[i] - client_params[i]) flatten_deltas = np.concatenate(delta_).ravel().tolist() h_ += self.q * np.power(loss, self.q - 1) * np.sum(np.square(flatten_deltas)) + self.L * np.power(loss, self.q) for i in range(len(total_params)): total_params[i] = start_params[i] - delta_[i] / h_ self.model.assign_params(total_params) return total_params
{ "alphanum_fraction": 0.6152606126, "author": null, "avg_line_length": 47.7179487179, "converted": null, "ext": "py", "file": null, "hexsha": "13ec6878ba9dd99ffd8b2a17ec2c5bfc193b57ea", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b5ddb26acbee3218b11894fb7ca7ce24677c0b50", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zhaolotelli/FedLearn", "max_forks_repo_path": "flearn/servers/qFFL.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "b5ddb26acbee3218b11894fb7ca7ce24677c0b50", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zhaolotelli/FedLearn", "max_issues_repo_path": "flearn/servers/qFFL.py", "max_line_length": 124, "max_stars_count": 6, "max_stars_repo_head_hexsha": "b5ddb26acbee3218b11894fb7ca7ce24677c0b50", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zhaolotelli/FedLearn", "max_stars_repo_path": "flearn/servers/qFFL.py", "max_stars_repo_stars_event_max_datetime": "2022-01-11T18:23:41.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-30T12:58:08.000Z", "num_tokens": 418, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1861 }
% DEMSPGP1D2 Do a simple 1-D regression after Snelson & Ghahramani's example. % GP % Fix seeds randn('seed', 2e5); rand('seed', 2e5); seedVal = 2e5; dataSetName = 'spgp1d'; experimentNo = 2; % load data [X, y] = mapLoadData(dataSetName, seedVal); % Set up model options = gpOptions('fitc'); options.numActive = 9; % use the deterministic training conditional. q = size(X, 2); d = size(y, 2); model = gpCreate(q, d, X, y, options); model.X_u = randn(9, 1)*0.25 - 0.75; params = gpExtractParam(model); model = gpExpandParam(model, params); % Optimise the model. iters = 1000; display = 1; model.beta = 4/var(y); model.kern.variance = var(y); model.kern.inverseWidth = 1./((-min(X)+max(X))'/2).^2 model = gpOptimise(model, display, iters); % Save the results. capName = dataSetName;; capName(1) = upper(capName(1)); save(['dem' capName num2str(experimentNo) '.mat'], 'model'); demSpgp1dPlot
{ "alphanum_fraction": null, "author": "SheffieldML", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/MATLAB/SheffieldML-GPmat/GPmat-4b5914a38ecbad9fb7a13a3392970bfc28c9d911/gp/demSpgp1d2.m", "reason": null, "repo": "GPmat", "save_path": "github-repos/MATLAB/SheffieldML-GPmat", "sha": "4b5914a38ecbad9fb7a13a3392970bfc28c9d911", "size": null }
FFT_TYPE = "scipy" import os import pathlib import warnings import numpy as np import scipy.signal from scipy.io import wavfile from ..parameter import Parameter from ..processor import Processor from ..parameter_list import ParameterList if FFT_TYPE == "scipy": from scipy.fftpack import fft, ifft else: from numpy.fft import fft, ifft # Impulse responses ir_dir = "irs" src = {"sm-room" : "small_room.wav", "md-room" : "medium_room.wav", "lg-room" : "large_room.wav", "hall" : "hall.wav", "plate" : "plate.wav"} class ConvolutionalReverb(Processor): def __init__(self, name="reverb", parameters=None, block_size=512, sample_rate=44100): super().__init__(name, parameters, block_size, sample_rate) if not parameters: self.parameters = ParameterList() self.parameters.add(Parameter("type", "sm-room", "string", processor=self, options=["sm-room", "md-room", "lg-room", "hall", "plate"])) self.parameters.add(Parameter("decay", 1.0, "float", processor=self, minimum=0.1, maximum=1.0)) self.parameters.add(Parameter("dry_mix", 1.0, "float", processor=self, minimum=0.0, maximum=1.0)) self.parameters.add(Parameter("wet_mix", 0.0, "float", processor=self, minimum=0.0, maximum=1.0)) self.impulses = {} # dict to store numpy array for each impulse response self.load() # load all impulses into the dict self.update("type") # pre-process current impulse ready for application def process(self, x): if x.ndim < 2: # if input is mono (samples,) add stereo dim x = np.expand_dims(x, 1) if x.shape[1] == 1: # if input is mono copy L to R mono = True x = np.repeat(x, 2, axis=1) if self.parameters.wet_mix.value == 0.0: return x else: # perform partitioned convolution y = scipy.signal.fftconvolve(x, self.h, axes=0, mode='full') # pick out the previous overlap that will be added to output overlap = self.overlap[:self.block_size] # there may be some overlap left over we need to save again eoverlap = self.overlap[self.block_size:] if eoverlap.shape[0] == 0: padsize = self.block_size - overlap.shape[0] overlap = np.pad(overlap, ((0,padsize),(0,0))) eoverlap = np.zeros((self.overlap.shape)) else: padsize = self.overlap.shape[0] - eoverlap.shape[0] eoverlap = np.pad(eoverlap, ((0,padsize),(0,0))) wet = y[:self.block_size] + overlap # add the previous overlap to the output dry = x[:self.block_size,:] # grab the dry signal self.overlap = y[self.block_size:,:] + eoverlap # store the overlap for the next frame (with extra overlap) wet *= self.parameters.wet_mix.value # apply gain to wet signal dry *= self.parameters.dry_mix.value # apply gain to input (dry) signal out = wet + dry # mix wet and dry signals return out def load(self): # read all impulse responses from disk and store for reverb in self.parameters.type.options: curdir = pathlib.Path(__file__).parent.absolute() filename = os.path.join(curdir, "..", ir_dir, src[reverb]) sr, h = wavfile.read(filename) # load the audio file for correct impulse response # check if the sample rate matches processor if sr != self.sample_rate: # for now we raise an error. but in the future we would want to automatically resample raise RuntimeError(f"Sample rate of impulse {sr} must match sample rate of processor {self.sample_rate}") h = h/32767 # convert from 16 bit into to 32 bit float h *= 0.125 # perform additional scaling for headroom self.impulses[reverb] = h.astype(self.dtype) # store into dictionary def update(self, parameter_name=None): # this should be updated soon so we only update certain parts # based on which parameters change # load proper impulse from memory self.h = self.impulses[self.parameters.type.value].copy() # fade out the impulse based on the decay setting fstart = int(self.parameters.decay.value * self.h.shape[0]) fstop = np.min((self.h.shape[0], fstart + int(0.020*self.sample_rate))) # constant 50 ms fade out flen = fstop - fstart # if there is a fade (i.e. decay < 1.0) if flen > 0 and True: fade = np.arange(flen, dtype=self.dtype)/flen # normalized set of indices fade = np.power(0.1, (1-fade) * 5) # fade gain values with 100 dB of atten fade = np.expand_dims(fade, 1) # add stereo dim fade = np.repeat(fade, 2, axis=1) # copy gain to stereo dim self.h[fstart:fstop,:] *= fade # apply fade self.h = self.h[:fstop] # throw away faded samples self.reset_state() # set the internal buffer to zeros def reset_state(self): overlap_shape = self.h.shape[0] - 1 # overlap buffer size overlap_init = np.zeros((overlap_shape,self.h.shape[1])) # create buffer for the time-domain overlap signal self.overlap = overlap_init.astype(self.dtype) # store zero values input buffer
{ "alphanum_fraction": 0.584175671, "author": null, "avg_line_length": 45.5396825397, "converted": null, "ext": "py", "file": null, "hexsha": "432306f4b7fc108577eabddefd8c48e2e119b11e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-01-21T05:58:15.000Z", "max_forks_repo_forks_event_min_datetime": "2020-12-01T08:57:44.000Z", "max_forks_repo_head_hexsha": "49d86aeddfdd0b59c18830b02f63c98f721caf9d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JalleyLin/pymixconsole", "max_forks_repo_path": "pymixconsole/processors/convreverb.py", "max_issues_count": 6, "max_issues_repo_head_hexsha": "49d86aeddfdd0b59c18830b02f63c98f721caf9d", "max_issues_repo_issues_event_max_datetime": "2022-02-22T09:07:04.000Z", "max_issues_repo_issues_event_min_datetime": "2020-10-13T20:10:34.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JalleyLin/pymixconsole", "max_issues_repo_path": "pymixconsole/processors/convreverb.py", "max_line_length": 147, "max_stars_count": 62, "max_stars_repo_head_hexsha": "2e249493fbc1aea96db28d24bd41f14b1a2e9f93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "TE-StefanUhlich/pymixconsole", "max_stars_repo_path": "pymixconsole/processors/convreverb.py", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:38:15.000Z", "max_stars_repo_stars_event_min_datetime": "2020-10-08T06:19:04.000Z", "num_tokens": 1353, "path": null, "reason": "import numpy,from numpy,import scipy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 5738 }
# -*- coding: utf-8 -*- # # Copyright 2018-2020 Data61, CSIRO # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from tensorflow.keras import backend as K import tensorflow as tf from scipy.sparse import csr_matrix from stellargraph.mapper import SparseFullBatchNodeSequence, FullBatchNodeSequence class GradientSaliency: """ Class to compute the saliency maps based on the vanilla gradient w.r.t the adjacency and the feature matrix. """ def __init__(self, model, generator): """ Args: model (Keras model object): The differentiable graph model object. For a dense model, the model.input should contain two tensors: - features: The placeholder of the feature matrix. - adj: The placeholder of the adjacency matrix. For a sparse model, the model.input should contain three tensors: - features: The placeholder of the feature matrix. - adj_index: The placeholder of the adjacency matrix. - adj_values: The placeholder of the adjacency matrix. The model.output (Keras tensor) is the tensor of model prediction output. This is typically the logit or softmax output. """ # Set sparse flag from the generator self.is_sparse = generator.use_sparse if self.is_sparse: if not isinstance(generator, SparseFullBatchNodeSequence): raise TypeError( "The generator supplied has to be an object of SparseFullBatchNodeSequence for sparse adjacency matrix." ) if len(model.input) != 4: raise RuntimeError( "Keras model for sparse adjacency is expected to have four inputs" ) self.A = generator.A_values self.A_indices = generator.A_indices features_t, output_indices_t, adj_indices_t, adj_t = model.input else: if not isinstance(generator, FullBatchNodeSequence): raise TypeError( "The generator supplied has to be an object of FullBatchNodeSequence for dense adjacency matrix." ) if len(model.input) != 3: raise RuntimeError( "Keras model for dense adjacency is expected to have three inputs" ) self.A = generator.A_dense features_t, output_indices_t, adj_t = model.input # Extract features from generator self.X = generator.features self.model = model # Placeholder for class prediction (model output): output = model.output def compute_node_gradients(self, node_mask_tensors): for i, x in enumerate(node_mask_tensors): if not isinstance(x, tf.Tensor): node_mask_tensors[i] = tf.convert_to_tensor(x) if self.is_sparse: ( features_t, output_indices_t, adj_indices_t, adj_t, _, class_of_interest, ) = node_mask_tensors model_input = [features_t, output_indices_t, adj_indices_t, adj_t] else: ( features_t, output_indices_t, adj_t, _, class_of_interest, ) = node_mask_tensors model_input = [features_t, output_indices_t, adj_t] with tf.GradientTape() as tape: tape.watch(features_t) output = self.model(model_input) cost_value = K.gather(output[0, 0], class_of_interest) node_gradients = tape.gradient(cost_value, features_t) return node_gradients def compute_link_gradients(self, link_mask_tensors): for i, x in enumerate(link_mask_tensors): if not isinstance(x, tf.Tensor): link_mask_tensors[i] = tf.convert_to_tensor(x) if self.is_sparse: ( features_t, output_indices_t, adj_indices_t, adj_t, _, class_of_interest, ) = link_mask_tensors model_input = [features_t, output_indices_t, adj_indices_t, adj_t] else: ( features_t, output_indices_t, adj_t, _, class_of_interest, ) = link_mask_tensors model_input = [features_t, output_indices_t, adj_t] with tf.GradientTape() as tape: tape.watch(adj_t) output = self.model(model_input) cost_value = K.gather(output[0, 0], class_of_interest) link_gradients = tape.gradient(cost_value, adj_t) return link_gradients def get_node_masks( self, node_idx, class_of_interest, X_val=None, A_index=None, A_val=None ): """ Args: node_idx, class_of_interest: The values to feed while computing the gradients. X_val, The value of node features, default is obtained from the generator. A_val: The values of adjacency matrix while computing the gradients. When the adjacency matrix is sparse, it only contains the non-zero values. The default is obtained from the generator. A_index: When the adjacency matrix is sparse, it is the indices of the non-zero values. The default is obtained from the generator. Returns: gradients (Numpy array): Returns a vanilla gradient mask for the nodes. """ if X_val is None: X_val = self.X if A_index is None and self.is_sparse: A_index = self.A_indices if A_val is None: A_val = self.A out_indices = np.array([[node_idx]]) if self.is_sparse: gradients = self.compute_node_gradients( [X_val, out_indices, A_index, A_val, 0, class_of_interest] ) # Execute the function to compute the gradient else: gradients = self.compute_node_gradients( [X_val, out_indices, A_val, 0, class_of_interest] ) return gradients[0] def get_link_masks( self, node_idx, class_of_interest, X_val=None, A_index=None, A_val=None ): """ Args: node_idx, class_of_interest: The values to feed while computing the gradients. X_val, The value of node features, default is obtained from the generator. A_val: The values of adjacency matrix while computing the gradients. When the adjacency matrix is sparse, it only contains the non-zero values. The default is obtained from the generator. A_index: When the adjacency matrix is sparse, it is the indices of the non-zero values. The default is obtained from the generator. Returns: gradients (Numpy array): Returns a vanilla gradient mask for the nodes. """ if X_val is None: X_val = self.X if A_index is None and self.is_sparse: A_index = self.A_indices if A_val is None: A_val = self.A out_indices = np.array([[node_idx]]) # Execute the function to compute the gradient if self.is_sparse: # raise NotImplementedError("Sparse matrix support is not yet implemented") gradients = self.compute_link_gradients( [X_val, out_indices, A_index, A_val, 0, class_of_interest] ) else: gradients = self.compute_link_gradients( [X_val, out_indices, A_val, 0, class_of_interest] ) if self.is_sparse: return csr_matrix( (gradients.numpy()[0, :], (A_index[0, :, 0], A_index[0, :, 1])) ) return np.squeeze(gradients, 0) def get_node_importance( self, node_idx, class_of_interest, X_val=None, A_index=None, A_val=None ): """ For nodes, the saliency mask we get gives us the importance of each features. For visualization purpose, we may want to see a summary of the importance for the node. The importance of each node can be defined as the sum of all the partial gradients w.r.t its features. Args: X_val, A_val, node_idx, class_of_interest: The values to feed while computing the gradients. Returns: (Numpy array): Each element indicates the importance of a node. """ gradients = self.get_node_masks( node_idx, class_of_interest, X_val=None, A_index=None, A_val=None ) return np.sum(gradients, axis=1)
{ "alphanum_fraction": 0.6067741935, "author": null, "avg_line_length": 39.0756302521, "converted": null, "ext": "py", "file": null, "hexsha": "dd87420fdd9ed9e6c9dfd668f83dc1e3aaef8be7", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "10e62006907dd5968286f33648d1054e9c961c1b", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "zblumen/stellargraph", "max_forks_repo_path": "stellargraph/utils/saliency_maps/saliency.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "10e62006907dd5968286f33648d1054e9c961c1b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "zblumen/stellargraph", "max_issues_repo_path": "stellargraph/utils/saliency_maps/saliency.py", "max_line_length": 199, "max_stars_count": 3, "max_stars_repo_head_hexsha": "10e62006907dd5968286f33648d1054e9c961c1b", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "zblumen/stellargraph", "max_stars_repo_path": "stellargraph/utils/saliency_maps/saliency.py", "max_stars_repo_stars_event_max_datetime": "2020-03-19T12:49:23.000Z", "max_stars_repo_stars_event_min_datetime": "2020-01-17T10:33:12.000Z", "num_tokens": 1881, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 9300 }
# -*- coding: utf-8 -*- from ..io.spec import spec import h5py import numpy as np import matplotlib.pyplot as plt from scipy import interpolate from ..math.utils import logscale import warnings def show( x, y, images, xp, yp, xlabel, ylabel, names, transpose=False, flipvert=False, fliphor=False, color="#ffffff", defaultorigin=False, printpos=False, outname=None, ): """ Args: x(np.array): horizontal coordinates y(np.array): vertical coordinates images(np.array): image xp(np.array): marker horizontal coord. yp(np.array): marker vertical coord. xlabel(str): ylabel(str): names(list(str)): """ # Make monotonically increasing (required by interp2d) ind = np.argsort(x) x = x[ind] images = images[:, :, ind] ind = np.argsort(y) y = y[ind] images = images[:, ind, :] nimg = images.shape[0] # New grid xnew = np.linspace(x[0], x[-1], len(x)) ynew = np.linspace(y[0], y[-1], len(y)) # Interpolate for i in range(nimg): # Use another algorithm f = interpolate.interp2d(x, y, images[i, ...], kind="cubic") images[i, ...] = np.clip(f(xnew, ynew), 0, 1) # Plot range dx = (xnew[1] - xnew[0]) / 2.0 dy = (ynew[1] - ynew[0]) / 2.0 extent = (x[0] - dx, x[-1] + dx, y[0] - dy, y[-1] + dy) origin = "lower" # Transpose if transpose: extent = (extent[2], extent[3], extent[0], extent[1]) images = images.transpose((0, 2, 1)) xp, yp = yp, xp xlabel, ylabel = ylabel, xlabel # Flip vertical if flipvert: extent = (extent[0], extent[1], extent[3], extent[2]) images = images[:, ::-1, :] # Flip horizontal if fliphor: extent = (extent[1], extent[0], extent[2], extent[3]) images = images[:, :, ::-1] # Origin left bottom if defaultorigin: ind = [0, 1, 2, 3] if extent[1] < extent[0]: # extent[0] ... xp .......... extent[1] # extent[1] ... xp .......... extent[0] xp = extent[1] + extent[0] - xp ind[0] = 1 ind[1] = 0 if extent[3] < extent[2]: ind[2] = 3 ind[3] = 2 yp = extent[3] + extent[2] - yp extent = (extent[ind[0]], extent[ind[1]], extent[ind[2]], extent[ind[3]]) # Show if printpos: print(extent) print(np.vstack((xp, yp)).T) # RGB for plotting if transpose: rgb = np.zeros((len(xnew), len(ynew), 3)) else: rgb = np.zeros((len(ynew), len(xnew), 3)) for i in range(3): rgb[..., i] = images[i, ...] # rgb = images[0:3,...].transpose((1,2,0)) # Plot plt.figure(1) plt.clf() im = plt.imshow( rgb, extent=extent, origin=origin, interpolation="nearest", aspect=1 ) # ,cmap=plt.get_cmap("gray") axes = plt.gca() axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) xlim, ylim = axes.get_xlim(), axes.get_ylim() fontsize = 12 s = fontsize / 2 axes.scatter(xp, yp, marker="o", s=s, color=color) for i in range(len(names)): # try: # rgbi = rgb[int(np.round(xp[i])),int(np.round(yp[i])),:]*255 # print(rgbi[0]*0.299 + rgbi[1]*0.587 + rgbi[2]*0.114) # if (rgbi[0]*0.299 + rgbi[1]*0.587 + rgbi[2]*0.114) > 100: # color = '#000000' # else: # color = '#ffffff' # except: # color = '#ffffff' # color = '#%02x%02x%02x' % tuple(255-rgbi) # axes.scatter(xp[i], yp[i], marker='o',s=s,color = color) if names[i] is not None: axes.annotate( names[i], xy=(xp[i], yp[i]), xytext=(xp[i] + dx, yp[i]), color=color ) axes.set_xlim(xlim) axes.set_ylim(ylim) if outname is None: plt.show() else: plt.savefig(outname, bbox_inches="tight", dpi=300) def plot( hdf5filename, grps, specfilename, specnumbers, offsamy, offsamz, transpose=False, flipvert=True, fliphor=False, defaultorigin=False, showlabels=False, color="#ffffff", printpos=False, outname=None, log=False, ): """ Args: hdf5filename(str) grps(dict): keys must be 0, 1 or 2 (r, g, b) specfilename(str) specnumbers(list(int)) offhor(float) offvert(float) """ oh5 = h5py.File(hdf5filename) # Prepare global coordinates dim1off = 0.0 dim1name = "samz" dim1mult = 1 dim2off = 0.0 dim2name = "samy" dim2mult = 1 try: ocoord = oh5["stackinfo"] except KeyError: warnings.warn( '"coordinates" is deprecated and should be replaced by "stackinfo"', DeprecationWarning, ) ocoord = oh5["coordinates"] for f in ocoord: if f == "samz": dim1off = ocoord[f].value * 1000 dim1name = "sampz" dim1mult = 1 if f == "sampz": dim1off = ocoord[f].value dim1name = "samz" dim1mult = 1000 if f == "samy": dim2off = ocoord[f].value * 1000 dim2name = "sampy" dim2mult = 1 if f == "sampy": dim2off = ocoord[f].value dim2name = "samy" dim2mult = 1000 # Get image with axes in micron for i in grps: ogrp = oh5[grps[i]["path"]] odset = ogrp[ogrp.attrs["signal"]] dim1 = dim1off[grps[i]["ind"]] + ogrp[dim1name].value * dim1mult dim2 = dim2off[grps[i]["ind"]] + ogrp[dim2name].value * dim2mult idim1 = ogrp.attrs[dim1name + "_indices"] idim2 = ogrp.attrs[dim2name + "_indices"] if idim2 != 0 and idim1 != 0: img = odset[grps[i]["ind"], ...] elif idim2 != 1 and idim1 != 1: img = odset[:, grps[i]["ind"], :] else: img = odset[..., grps[i]["ind"]] img[np.isnan(img)] = np.nanmin(img) if idim1 > idim2: img = img.T if i == 0: images = np.zeros((3,) + img.shape, dtype=img.dtype) if log: img = logscale(img) mi = np.min(img) ma = np.max(img) d = ma - mi mi += d * grps[i]["lo"] ma -= d * (1 - grps[i]["hi"]) img -= mi img /= ma img = np.clip(img, 0, 1) images[i, ...] = img oh5.close() # XANES positions ospec = spec(specfilename) motors = ["samz", "sampz", "samy", "sampy"] n = len(specnumbers) pdim1 = np.empty(n) pdim2 = np.empty(n) if not hasattr(offsamz, "__len__"): offsamz = [offsamz] * n if not hasattr(offsamy, "__len__"): offsamy = [offsamy] * n for i in range(n): v = ospec.getmotorvalues(specnumbers[i], motors) if printpos: print("Spec number {}".format(i)) for a, b in zip(motors, v): print(" {} = {}".format(a, b)) pdim1[i] = v[0] * 1000 + v[1] + offsamz[i] pdim2[i] = v[2] * 1000 + v[3] + offsamy[i] # Make axes values readable m1 = min(dim1) m2 = min(dim2) dim1 -= m1 dim2 -= m2 pdim1 -= m1 pdim2 -= m2 # Plot if showlabels: names = [str(i) for i in specnumbers] else: names = [None] * n if defaultorigin: dim2label = "x ($\mu$m)" dim1label = "y ($\mu$m)" else: dim2label = "y ($\mu$m)" dim1label = "z ($\mu$m)" show( dim2, dim1, images, pdim2, pdim1, dim2label, dim1label, names, transpose=transpose, flipvert=flipvert, fliphor=fliphor, color=color, defaultorigin=defaultorigin, printpos=printpos, outname=outname, )
{ "alphanum_fraction": 0.5020799193, "author": null, "avg_line_length": 25.7564935065, "converted": null, "ext": "py", "file": null, "hexsha": "cafa40005ee431747842024f1a01a763d9993d4d", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fde4b6e0f462f464ce7af6a942b355d3d8f39f77", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "woutdenolf/spectrocrunch", "max_forks_repo_path": "spectrocrunch/visualization/id21_scanoverlap.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "fde4b6e0f462f464ce7af6a942b355d3d8f39f77", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "woutdenolf/spectrocrunch", "max_issues_repo_path": "spectrocrunch/visualization/id21_scanoverlap.py", "max_line_length": 84, "max_stars_count": 3, "max_stars_repo_head_hexsha": "fde4b6e0f462f464ce7af6a942b355d3d8f39f77", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "woutdenolf/spectrocrunch", "max_stars_repo_path": "spectrocrunch/visualization/id21_scanoverlap.py", "max_stars_repo_stars_event_max_datetime": "2019-12-16T11:21:05.000Z", "max_stars_repo_stars_event_min_datetime": "2018-04-16T15:51:36.000Z", "num_tokens": 2434, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 7933 }
Require Import ssr. Require Import lib. Require Import withzero. Set Implicit Arguments. Unset Strict Implicit. Import Prenex Implicits. Open Scope dnat_scope. Module Type GALOIS. (* -------------------------------------------------------------------------- *) (* Rings *) (* -------------------------------------------------------------------------- *) Section Ring. Section Axioms. Variable d' : eqType. Notation d := (withzeroData d'). Notation "0" := (@Zero _). Definition lift_opp (f:d'->d') x := match x with | Zero => 0 | Nz x => Nz (f x) end. Definition lift_add (add:d'->d'->d) x y := match x, y with | Zero, _ => y | _, Zero => x | Nz x, Nz y => add x y end. Definition lift_mul (mul:d'->d'->d) x y := match x, y with | Nz x, Nz y => mul x y | _, _ => 0 end. Variable addr' : d'->d'->d. Variable mulr' : d'->d'->d. Variable oppr' : d'->d'. Variable oner' : d'. Notation "x1 + x2" := (lift_add addr' x1 x2). Notation "x1 * x2" := (lift_mul mulr' x1 x2). Notation "- x" := (lift_opp oppr' x). Notation "1" := (Nz oner'). Structure ring_axioms : Type := Ring_axioms { addC' : forall x1 x2, x1 + x2 = x2 + x1; addA' : forall x1 x2 x3, x1 + (x2 + x3) = (x1 + x2) + x3; oppL' : forall x, - x + x = 0; mulC' : forall x1 x2, x1 * x2 = x2 * x1; mulA' : forall x1 x2 x3, x1 * (x2 * x3) = x1 * x2 * x3; mul1r' : forall x, 1 * x = x; distPM' : forall x1 x2 x3, (x1 + x2) * x3 = x1 * x3 + x2 * x3; distMP' : forall x1 x2 x3, x1 * (x2 + x3) = x1 * x2 + x1 * x3 }. End Axioms. Structure ring : Type := Ring { rbase' : eqType; addr' : rbase' -> rbase' -> withzero rbase'; oppr' : rbase' -> rbase'; oner' : rbase'; mulr' : rbase' -> rbase' -> withzero rbase'; axioms : ring_axioms addr' mulr' oppr' oner' }. Definition rbase r := withzeroData (rbase' r). Coercion rbase : ring >-> eqType. Variable r:ring. Definition addr (x y:r) := lift_add (@addr' r) x y. Definition mulr (x y:r) := lift_mul (@mulr' r) x y. Definition oppr (x:r) := lift_opp (@oppr' r) x. Definition oner := Nz (oner' r). Notation "x1 + x2" := (addr x1 x2). Notation "x1 * x2" := (mulr x1 x2). Notation "- x" := (oppr x). Notation "x - y" := (x + (- y)). Notation "1" := (oner). Notation "0" := (@Zero _). Definition divides (a b:r) := exists a', a * a' = b. Notation "x |` y" := (divides x y) (at level 55). CoInductive gcd (f g d:r) : Type := Gcd : (d |` f) -> (d |` g) -> (forall d', (d' |` f) -> (d' |` g) -> (d' |` d)) -> gcd f g d. Definition unit (x:r) := exists x', (x * x' = 1). Definition associates x y := exists u : r, unit u /\ x = u * y. Definition irreducible p := forall x y, x * y = p -> (unit x \/ unit y). Definition prime (p:r) := ~ (unit p) /\ irreducible p. Definition rel_prime x y := forall d:r, gcd x y d -> unit d. Fixpoint pow (x:r) (n:nat) {struct n} : r := if n is S n' then x * pow x n' else 1. Fixpoint cmul (n:nat) (a:r) {struct n} : r := if n is S n' then a + cmul n' a else 1. Fixpoint dot (s1 s2:seq r) {struct s1} : r := match s1,s2 with | seq0, seq0 => 1 | Adds h1 t1, Adds h2 t2 => h1 * h2 + dot t1 t2 | _, _ => 0 end. End Ring. Notation "x1 + x2" := (addr x1 x2) : ring_scope. Notation "x1 * x2" := (mulr x1 x2) : ring_scope. Notation "- x" := (oppr x) : ring_scope. Notation "0" := (@Zero _) : ring_scope. Notation "1" := (oner _) : ring_scope. Notation "x - y" := (x + oppr y) : ring_scope. Notation addrr := (fun x y => y + x). Notation mulrr := (fun x y => y * x). Open Scope ring_scope. (* -------------------------------------------------------------------------- *) (* Domains *) (* -------------------------------------------------------------------------- *) Section Domain. Structure domain : Type := Domain { dbase :> ring; domainP : forall x1 x2:rbase' dbase, mulr' x1 x1 <> 0 }. End Domain. (* -------------------------------------------------------------------------- *) (* Fields *) (* -------------------------------------------------------------------------- *) Section Field. Structure field : Type := Field { fbase :> domain; invr' : rbase' fbase -> rbase' fbase; unitPL0 : forall x, mulr' x (invr' x) = 1 }. Definition invr (f:field) (x:f) := if x is Nz x' then Nz(invr' x') else 0. End Field. Notation "x '^-1'" := (invr x) (at level 9, format "x '^-1'") : ring_scope. (* -------------------------------------------------------------------------- *) (* Subrings *) (* -------------------------------------------------------------------------- *) Section Subring. Variable u:ring. Structure subring : Type := Subring { srbase :> set u; zeroP : srbase 0; oneP : srbase 1; addP : forall x y, srbase x -> srbase y -> srbase (x + y); mulP : forall x y, srbase x -> srbase y -> srbase (x * y); oppP : forall x, srbase x -> srbase (- x) }. End Subring. (* -------------------------------------------------------------------------- *) (* Subfields *) (* -------------------------------------------------------------------------- *) Section Subfield. Variable f:field. Structure subfield : Type := Subfield { sfbase :> subring f; invP : forall x, sfbase x -> sfbase (invr x) }. End Subfield. (* -------------------------------------------------------------------------- *) (* Homomorphisms *) (* -------------------------------------------------------------------------- *) Section Homomorphism. Variable u v:ring. Variable r:subring u. Variable s:subring v. Structure homo : Type := Homo { hbase :> u->v; homoP : forall x, r x -> s (hbase x); homoAddP : forall x y, r x -> r y -> hbase (x + y) = hbase x + hbase y; homoMulP : forall x y, r x -> r y -> hbase (x * y) = hbase x * hbase y; homoJunk : forall x, ~ (r x) -> hbase x = 0 }. Definition kernel (h:homo) := fun x => r x && (h x == 0). Structure iso : Type := Iso { isbase :> homo; imonoP : forall x y, r x -> r y -> isbase x = isbase y -> x = y; iontoP : surj r s isbase }. End Homomorphism. (* -------------------------------------------------------------------------- *) (* Ideals *) (* -------------------------------------------------------------------------- *) Section Ideal. Variable u:ring. Variable r:subring u. Structure ideal : Type := Ideal { idbase :> set u; id_ss : sub_set idbase r; id0 : idbase 0; id_add : forall x y, idbase x -> idbase y -> idbase (x + y); idPL : forall x y, idbase x -> r y -> idbase (x * y); idPR : forall x y, r x -> idbase y -> idbase (x * y) }. Parameter ring_to_ideal : forall r:subring u, ideal. Variable i:ideal. Definition maximal_ideal := i <> ring_to_ideal r /\ forall j : ideal, sub_set i j -> j = i \/ j = ring_to_ideal r. Parameter principle_ideal : forall a:u, ideal. Definition pid := forall i:ideal, exists a, i = principle_ideal a. End Ideal. (* -------------------------------------------------------------------------- *) (* Quotients *) (* -------------------------------------------------------------------------- *) Section Quotient. Variable U:ring. Variable R:subring U. Variable I:ideal R. Definition coset_pred (s:set U) := exists a, s a /\ forall x, s x <-> exists i, I i /\ x = a + i. Structure coset : Type := Coset { cosetS :> set U; coset_mem : coset_pred cosetS }. Definition eqcoset (c1 c2:coset) := Pb (forall x, cosetS c1 x == cosetS c2 x). Axiom eqcosetPx : reflect_eq eqcoset. Canonical Structure cosetData := EqType eqcosetPx. Parameter elem_of_coset : coset -> U. Parameter coset_of_elem : U -> coset. Definition addq c1 c2 := coset_of_elem ((elem_of_coset c1) + (elem_of_coset c2)). Definition mulq c1 c2 := coset_of_elem ((elem_of_coset c1) * (elem_of_coset c2)). Definition oppq c := coset_of_elem (- (elem_of_coset c)). Definition zeroq := coset_of_elem 0. Definition oneq := coset_of_elem 1. Notation "x1 +` x2" := (addq x1 x2) (at level 50). Notation "x1 *` x2" := (mulq x1 x2) (at level 40). Notation "-` x" := (oppq x) (at level 35). Axiom addqC : forall c1 c2:cosetData, c1 +` c2 = c2 +` c1. Axiom addqA : forall c1 c2 c3, c1 +` (c2 +` c3) = c1 +` c2 +` c3. Axiom addq0 : forall c, c +` zeroq = c. Axiom oppqL : forall c, -` c +` c = zeroq. Axiom mulqC : forall x y, x *` y = y *` x. Axiom mulqA : forall c1 c2 c3, c1 *` (c2 *` c3) = c1 *` c2 *` c3. Axiom mul1q : forall x, oneq *` x = x. Axiom distqPM : forall x1 x2 x3, (x1 +` x2) *` x3 = x1 *` x3 +` x2 *` x3. Axiom distqMP : forall x1 x2 x3, x1 *` (x2 +` x3) = x1 *` x2 +` x1 *` x3. Canonical Structure quotient := Ring (Ring_axioms addqC addqA oppqL mulqC mulqA mul1q distqPM distqMP). End Quotient. (* -------------------------------------------------------------------------- *) (* Polynomials *) (* -------------------------------------------------------------------------- *) Section Poly. Variable r:domain_z. Inductive polyz : Type := Lc (c:rbase_z r) | Pcons (h:r) (t:polyz). Notation "h :: t" := (Pcons h t) (at level 70). Fixpoint eqpolyz (p1 p2:polyz) {struct p1} : bool := match p1, p2 with | Lc c1, Lc c2 => c1 == c2 | c1::t1, c2::t2 => (c1 == c2) && eqpolyz t1 t2 | _, _ => false end. Axiom eqpolyzPx : reflect_eq eqpolyz. Canonical Structure polyzData := EqType eqpolyzPx. Definition poly := (withzeroData polyzData). Definition onep := Lc (oner_z _). Notation "1" := onep. Definition const c := if c is Nz c' then Nz (Lc c') else Zero. Definition X := Nz(0::onep). Definition horner c p := if p is Nz p' then Nz (c::p') else const c. Fixpoint addpz (p1 p2:polyz) {struct p2} : poly := match p1, p2 with | h::t, Lc c => Nz (addrz h (Nz c) :: t) | Lc c, h::t => Nz (addrz (Nz c) h :: t) | Lc c1, Lc c2 => const (addr_z c1 c2) | h1 :: t1, h2 :: t2 => horner (h1 + h2) (addpz t1 t2) end. Definition addp (p1 p2:poly) : poly := lift_add addpz p1 p2. Fixpoint cmulpz (c:rbase_z r) (p:polyz) {struct p} : polyz := match p with | Lc c' => Lc (if mulr_z c c' is Nz c'' then c'' else c') | h :: t => (mulrz (Nz c) h)::(cmulpz c t) end. Definition cmulp (c:r) (p:poly) : poly := match c, p with | Zero, _ => Zero | _, Zero => Zero | Nz c', Nz p' => Nz (cmulpz c' p') end. Definition mulpz_aux (c:r) p : poly := if c is Nz c' then Nz (cmulpz c' p) else Zero. Fixpoint mulpz (p1 p2 : polyz) {struct p1} : poly := match p1 with | Lc c => Nz (cmulpz c p2) | h :: t => addp (mulpz_aux h p2) (horner Zero (mulpz t p2)) end. Definition mulp (p1 p2:poly) : poly := lift_mul mulpz p1 p2. Fixpoint opppz (p:polyz) {struct p} : polyz := match p with | h::t => - h::opppz t | Lc c => Lc (oppr_z c) end. Definition oppp (p:poly) : poly := if p is Nz p' then Nz(opppz p') else Zero. Fixpoint coefz (p:polyz) (i:nat) {struct i} : r := match p, i with | h::t, S n => coefz t n | h::t, O => h | Lc c, O => Nz c | Lc c, S n => 0 end. Definition coef (p:poly) i : r := if p is Nz p' then coefz p' i else 0. Notation "x1 + x2" := (addp x1 x2). Notation "x1 * x2" := (mulp x1 x2). Notation "- x" := (oppp x). Notation "x - y" := (x + (- y)). Notation "0" := (Zero). Notation "1" := (Nz onep). Notation "x <= y" := (nati.leq x y). Notation "x < y" := (nati.lt x y). Axiom poly_indh : forall (P:poly->Prop), P Zero -> (forall c p, P p -> P (horner c p)) -> (forall p, P p). Axiom opppL : forall p, - p + p = Zero. Axiom addpA : forall p1 p2 p3, p1 + (p2 + p3) = p1 + p2 + p3. Axiom addpC : forall p1 p2, p1 + p2 = p2 + p1. Axiom mul1p : forall p, 1 * p = p. Axiom distpMP : forall p1 p2 p3, p1 * (p2 + p3) = p1 * p2 + p1 * p3. Axiom distpPM : forall p1 p2 p3, (p1 + p2) * p3 = p1 * p3 + p2 * p3. Axiom mulp1 : forall p, p * 1 = p. Axiom mulpA : forall p1 p2 p3, p1 * (p2 * p3) = p1 * p2 * p3. Axiom mulpC : forall p1 p2, p1 * p2 = p2 * p1. Canonical Structure poly_ring := Ring_z (Ringz_axioms opppL addpA addpC mul1p mulp1 mulpA distpPM distpMP mulpC). Fixpoint degpz (p:polyz) {struct p} : nat := if p is h::t then S (degpz t) else O. Definition degp p := if p is Nz p' then Nat (degpz p') else -oo. Definition constant p := degp p = Nat O \/ degp p = -oo. Definition linear p := degp p = Nat 1. Definition quadratic p := degp p = Nat 2. Definition cubic p := degp p = Nat 3. Definition quartic p := degp p = Nat 4. Definition quintic p := degp p = Nat 5. Axiom degp_const : forall c, degp (const c) = if c is Nz _ then Nat O else -oo. Axiom degp_add_unevenL : forall p1 p2, degp p2 < degp p1 -> degp (p1 + p2) = degp p1. Axiom degp_add_unevenR : forall p1 p2, degp p1 < degp p2 -> degp (p1 + p2) = degp p2. Axiom degp_inf : forall p, degp p = -oo -> p = 0. Axiom degp_add : forall p q:poly, nati.leq (degp (p + q)) (maxi (degp p) (degp q)). Axiom degp_opp : forall p, degp (- p) = degp p. Fixpoint lcz (p:polyz) {struct p} : rbase_z r := match p with | Lc c => c | h::t => lcz t end. Definition lc (p:poly) : r := if p is Nz p' then Nz (lcz p') else 0. Definition monic p := lc p = (@onerz _). Definition irreduciblep p := forall p1 p2, p = p1 * p2 -> degp p1 = Nat O \/ degp p2 = Nat O. End Poly. Notation "h :: t" := (Pcons h t) (at level 70) : ring_scope. (* -------------------------------------------------------------------------- *) (* Field Extensions *) (* -------------------------------------------------------------------------- *) Section Fields. Variable U:field. Variable K F : subfield U. Definition extension (K F : subfield U) := sub_set F K. Structure lcomb (vs' : seq U) (x : U) : Prop := Lcomb { fs' : seq U; fsP' : all F fs'; leP : dot fs' vs' = x }. Structure lcomb_ext (vs' : seq U) (x : U) : Prop := Lcomb_ext { fs0 : seq U; fsP0 : all F fs0; leP0 : dot fs0 vs' = x; leP1 : size fs0 = size vs' }. Axiom lcomb_extend : forall vs x, lcomb vs x <-> lcomb_ext vs x. Definition span vs := fun x => Pb (lcomb vs x). Fixpoint linind (vs : seq U) : bool := if vs is Adds v vs' then ~~ (span vs' v) && linind vs' else true. Structure lindep (vs : seq U) : Prop := Linind_spec { fs : seq U; nz : U; nzP : nz != 0; nzM : fs nz; fsP : all F fs; fvP : (size fs) <= (size vs); depP : dot fs vs = 0 }. Definition basis bs := linind bs && Pb (span bs = srbase K). Definition finD := exists b, basis b. Axiom inhabit : inhabited (seq U). Definition index := if Pb finD then Nat(size(epsilon inhabit basis)) else -oo. Definition finite_ext := extension K F /\ finD. Structure splits_def (K F : subfield U) (p : poly U) (sseq : seq (polyData U)) : Prop := Splits_def { sseqP : all F p; sseq_lin : all (@linear U) sseq; sseq_k : all (all K \o (@coefs _)) sseq; sseq_mul : foldr (@mul (poly_idom U)) 1 sseq = p }. Definition splits K F p := exists s, splits_def K F p s. Structure splitting_field (K F : subfield U) (p : poly U) : Prop := Splitting_field { sfQ : all F p; sf_spl : splits K F p; sfP : forall K' : subfield U, extension K K' -> splits K' F p -> K = K' }. Structure min_poly (F : subfield U) (p : poly U) (a : U) : Prop := Minp { minpQ : all F p; minp_monic : monic p; minpP : root a p; minpH : forall (p' : poly U), all F p' -> root a p' -> degp p <= degp p' }. Structure algebraic (F : subfield U) (a : U) : Prop := Algebraic_spec { algp : poly U; algP : all F algp; anz : algp <> 0; art : root a algp }. (* Note! one is not a galois automorphism if K is not an extension of F, so take the fixed field to be the intersection of F,K *) Definition galois_auto (a : auto_ty K) := Pb (forall x, F x -> K x -> auto a x = x). Definition galois_fauto := fun (K F : subfield U) (Ekf : finite_ext K F) => let (_, T) := finite_finite Ekf in FinType (proj2 T). Definition galois_group (K F : subfield U) : finGroupType. Canonical Structure galois_group := Subgroup galois_mul galois_inv galois1. Pb(forall a, H a -> (auto a) x = x). Definition fixed_ring (H : subgroup(auto_group U)) : subring U. (* {{{ *) move=> H. exists (fixed H). - abstract( apply/PbP; move=> a Ha /=; rewrite /auto; exact: homo0 (autoP a)). - abstract( apply/PbP; move=> a Ha /=; exact: iso1 (autoP a)). - abstract( move=> x y Hx Hy; move/PbP: (Hx) => Hx'; move/PbP: (Hy) => Hy'; apply/PbP; move=> a Ha; rewrite (homoAddP (autoP a)); eauto; rewrite Hx'; eauto; by rewrite Hy'; eauto). - abstract( move=> x y Hx Hy; move/PbP: (Hx) => Hx'; move/PbP: (Hy) => Hy'; apply/PbP; move=> a Ha; rewrite (homoMulP (autoP a)); eauto; rewrite Hx'; eauto; by rewrite Hy'; eauto). abstract( move=> x Hx; move/PbP: (Hx) => Hx'; apply/PbP; move=> a Ha; rewrite (homoOpp (autoP a)); eauto; by rewrite Hx';eauto). (* }}} *) Defined. Definition fixed_field (H : subgroup(auto_group U)) : subfield U. (* {{{ *) move=> H;exists (fixed_ring H). abstract( move=> x Hx; move/PbP: (Hx) => Hx'; apply/PbP; move=> a Ha; move: Hx; rewrite /= => Hx; (case H0 : (x == 0); first by move/eqP: H0 => ->;rewrite inv0 (homo0 (autoP a))); move/eqP: H0 => H0; rewrite (inv_iso (autoP a)) => //; by rewrite (Hx' a Ha)). (* }}} *) Defined. Definition normal_ext := fixed_field galois_group = F. End Fields.
{ "alphanum_fraction": null, "author": "kallol26", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/coq/kallol26-coq-galois-theory/coq-galois-theory-4fff4d1b919d79f4dc4ba5afa126aa995e577489/src/modules/galois_sig.old.v", "reason": null, "repo": "coq-galois-theory", "save_path": "github-repos/coq/kallol26-coq-galois-theory", "sha": "4fff4d1b919d79f4dc4ba5afa126aa995e577489", "size": null }
[STATEMENT] lemma read_point: assumes "point p" and "mapping x" shows "point (x[[p]])" [PROOF STATE] proof (prove) goal (1 subgoal): 1. point (x[[p]]) [PROOF STEP] using assms comp_associative read_injective read_surjective [PROOF STATE] proof (prove) using this: point p coreflexive (x[[x]]) \<and> times_top_class.total x ?x * ?y * ?z = ?x * (?y * ?z) \<lbrakk>injective ?y; coreflexive (?x[[?x]])\<rbrakk> \<Longrightarrow> injective (?x[[?y]]) \<lbrakk>surjective ?y; times_top_class.total ?x\<rbrakk> \<Longrightarrow> surjective (?x[[?y]]) goal (1 subgoal): 1. point (x[[p]]) [PROOF STEP] by auto
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Relational_Disjoint_Set_Forests_Disjoint_Set_Forests", "hexsha": null, "include": null, "lang": null, "length": 2, "llama_tokens": 250, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
#include <Eigen/Dense> #include <iostream> #include <fstream> #include <boost/dynamic_bitset.hpp> #include <boost/container/vector.hpp> #include <boost/unordered_map.hpp> #include <boost/random/uniform_01.hpp> #include <boost/random/niederreiter_base2.hpp> #include <./Timer.cpp> typedef boost::dynamic_bitset<> Vetor; typedef boost::container::vector<Vetor> Matriz; /* * * Ht: Matriz de verificação de paridade original * * Ht_long: Referência de onde guardar o array em que cada elemento é uma linha de Ht * * Erros_1: Referência de onde guardar o array em que cada elemento é um erro de peso=1 (mas só a porção de informação desse erro) * exemplo: se há 5 bits na palavra-código e os 2 primeiros são de informação, então um elemento * de Erros_1 pode ser 3=0b11, significando erro em cada um dos 2 bits de informação * * n_linhas, n_colunas, n_informacao: Quantas linhas e colunas tem Ht, e quantos bits são de informação * na palavra código (assume-se que são correspondem às primeiras linhas de Ht) */ void __gera_auxiliares( const Matriz& Ht, unsigned long* Ht_long, unsigned long* Erros_1, int n_linhas, int n_colunas, int n_informacao ) { /** * Só necessários os ulongs correspondentes a cada síndrome */ for (int i = 0; i < n_linhas; ++i) { Ht_long[i] = Ht[i].to_ulong(); } /** * Porção de informação dos erros associados a cada síndrome * (por isso só importam os erros em bits de informação) */ for (int i = 0; i < n_linhas; ++i) { // Erros depois de n_informacao linhas correspondem somente a erros de bit // de paridade, portanto aparecem como 0 já que só se coletam os erros de informação Erros_1[i] = i < n_informacao ? ((unsigned long)1) << (n_informacao - i - 1) : 0; } } /** * input_csv: Espera-se formato de valores 0 ou 1 separados por vírgula e \n, sem espaços * * M: Matriz onde armazenar a leitura. * Restrição: linha/coluna pode ter, no máximo (inclusive), 64 bits (cabe num unsigned long) */ void carrega_matriz( std::ifstream& input_csv, Matriz& M ) { std::string numero_string; while (getline(input_csv, numero_string)) { int commaPos; while ((commaPos = numero_string.find(",")) != std::string::npos) numero_string.erase(commaPos, 1); Vetor numero (numero_string.length(), stoul(numero_string, 0, 2)); // std::cout << numero << "\n"; M.push_back(numero); } } int testa_carrega_matriz() { std::string Htcsv = "./dados/Ht.csv"; std::ifstream input {Htcsv}; // if(!input) // std::error("could not open " + Htcsv); std::string numero_string; Matriz Ht; while (getline(input, numero_string)) { int commaPos; while ((commaPos = numero_string.find(",")) != std::string::npos) numero_string.erase(commaPos, 1); Vetor numero (numero_string.length(), stoul(numero_string, 0, 2)); std::cout << numero << "\n"; Ht.push_back(numero); } std::cout << "Finished loading. Now replicating:" << std::endl; std::for_each(Ht.begin(), Ht.end(), [](Vetor line) { std::cout << line << std::endl; }); } /** * Auxiliar para `popular_dict` * */ void __popular_dict( boost::unordered_map<unsigned long, unsigned long>& dict, unsigned long* Ht_long, unsigned long* Erros_1, int n_linhas, int n_colunas, int n_informacao, unsigned long sindrome_parcial, unsigned long erro_parcial, int linha_inicio, int niveis_restantes ) { if (niveis_restantes == 1) { unsigned long sindrome, erro; for (int i = linha_inicio; i < n_linhas; ++i) { sindrome = sindrome_parcial ^ Ht_long[i]; erro = erro_parcial ^ Erros_1[i]; if (dict.find(sindrome) == dict.end()) dict[sindrome] = erro; } return; } for (int i = linha_inicio; i < n_linhas - (niveis_restantes - 1) /*iterar todas as síndromes*/; ++i) { __popular_dict( dict, Ht_long, Erros_1, n_linhas, n_colunas, n_informacao, sindrome_parcial ^ Ht_long[i], erro_parcial ^ Erros_1[i], i+1, niveis_restantes-1 ); } } /** * * dict: Onde guardar o mapa "síndrome->erro" * * Ht: Matriz de verificação de paridade (transposta). Assume-se que suas últimas linhas * correspondem aos bits de informação (identidade em cima, outras linhas embaixo) * * n_linhas, n_colunas, n_informacao: Quantas linhas e colunas tem Ht, e quantos bits são de informação * na palavra código (assume-se que são correspondem às primeiras linhas de Ht) * * peso_maximo: O peso dos maiores erros de informação a serem catalogados em `dict` * */ void popular_dict( boost::unordered_map<unsigned long, unsigned long>& dict, const Matriz& Ht, int n_linhas, int n_colunas, int n_informacao, int peso_maximo ) { unsigned long *Ht_long = new unsigned long[n_linhas]; unsigned long *Erros_1 = new unsigned long[n_linhas]; __gera_auxiliares( Ht, Ht_long, Erros_1, n_linhas, n_colunas, n_informacao ); for (int i = 1; i <= peso_maximo; ++i) { __popular_dict( dict, Ht_long, Erros_1, n_linhas, n_colunas, n_informacao, 0, 0, 0, i ); } } void testa_popular_dict() { std::ifstream Htcsv("dados/Ht.csv"); Matriz Ht; boost::unordered_map<unsigned long, unsigned long> dict; carrega_matriz(Htcsv, Ht); int n_linhas = Ht.size(); int n_colunas = Ht[0].size(); int n_informacao = n_linhas - n_colunas; popular_dict( dict, Ht, n_linhas, n_colunas, n_informacao, 3 ); assert(dict.find(0) == dict.end()); assert(dict.at(1099511595008) == 34359738368); // 1ª linha de Ht -> erro só no 1º bit de info assert(dict.at(1064615018496) == 17179869184); // 2ª linha de Ht -> erro só no 2º bit de info assert(dict.at(34896576512) == 51539607552); // 1ª+2ª linhas de Ht -> erros nos 2 MSB de info assert(dict.at(1052568944640) == 55834574848); // 1ª+2ª+4ª linhas de Ht -> erros nos 1º,2º,4º bits de info assert(dict.at(549755813888) == 0); // erro composto só pela 37ª linha de Ht não aparece nos bits de info assert(dict.at(481038172167) == 1); // mas se erro for de 36ª+37ª linhas de Ht -> erro no último bit de info // (e ignora o bit de paridade) } /** * Retorna nova matriz em que cada linha é uma coluna da matriz original. */ Matriz& transposta( const Matriz& M ) { int antes_colunas = M[0].size(); int antes_linhas = M.size(); Matriz* resultado = new Matriz(antes_colunas); for (int i = 0; i < antes_colunas; ++i) { (*resultado)[i] = Vetor (antes_linhas); // Se não for Vetor&, boost copia implicitamente Vetor& linha = (*resultado)[i]; for (int j = 0; j < antes_linhas; ++j) { linha[antes_linhas - j - 1] = M[j][antes_colunas - i - 1]; }; } return *resultado; } void testa_transposta() { Matriz m = Matriz(3); /* * m == [1 1; 0 1; 0 0] */ m[0] = Vetor(2, 3); m[1] = Vetor(2, 1); m[2] = Vetor(2, 0); Matriz mt = transposta(m); assert(mt[0] == Vetor(3, 4)); assert(mt[1] == Vetor(3, 6)); } /** * Calcula M.v */ Vetor& mult(const Matriz& M, const Vetor& v) { int n_linhas = M.size(), n_colunas = v.size(); Vetor* resultado = new Vetor(n_linhas); for (int i = 0; i < n_linhas; ++i) { (*resultado)[n_linhas - i - 1] = (M[i] & v).count() & 1; } return *resultado; } /** * Calcula M.N */ Matriz& mult(const Matriz& M, const Matriz& N) { int n_linhas = M.size(); int n_colunas = N[0].size(); Matriz pre_resultado = Matriz(n_colunas); Matriz Nt = transposta(N); for (int i = 0; i < n_colunas; ++i) { pre_resultado[i] = mult(M, Nt[i]); } Matriz& resultado = transposta(pre_resultado); return resultado; } void testa_mult() { // M == [1 0 1; 0 0 1] Matriz M = Matriz(2); M[0] = Vetor(3, 5); M[1] = Vetor(3, 1); // v == [0 1 1] Vetor v = Vetor(3, 3); // resultado deve ser [1 1] assert(mult(M, v) == Vetor(2, 3)); // [1 1; 0 0; 1 1] Matriz N = Matriz(3); N[0] = Vetor(2, 3); N[1] = Vetor(2, 0); N[2] = Vetor(2, 3); assert(mult(M, N).size() == 2); assert(mult(M, N)[0] == Vetor(2, 0)); assert(mult(M, N)[1] == Vetor(2, 7)); } /** * Modifica `Transmitido`, com chance `p` de inverter cada bit * de cada palavra código. * * Transmitido: Assume-se que cada linha é uma palavra-código */ void canal(Matriz& Transmitido, double p) { int count=0; int n_linhas = Transmitido.size(); int n_colunas = Transmitido[0].size(); Vetor* linha; boost::random::niederreiter_base2 gen(4); boost::random::uniform_01<double> random; for (int i = 0; i < n_linhas; ++i) { linha = &Transmitido[i]; for (int j = 0; j < n_colunas; ++j) { if (random(gen) < p) { (*linha)[j].flip(); count++; } } } // std::cout << "p ≃ " << ((double) count) / (n_linhas*n_colunas) << std::endl; } /** * amostras_informacao: Lista de palavras de informacao a serem enviadas * Espera-se uma palavra por linha, elementos separados por vírgulas (sem espaço) * * Ht_csv: Matriz de verificação de paridade (transposta). Mesmo formato de `amostras_informacao_csv` * * Gt_csv: Matriz de geração do código (transposta). Mesmo formato de `amostras_informacao_csv` * * p: Lista de chances de o canal BSC inverter um bit transmitido * * peso_maximo_memorizado: Caso se encontre síndrome com peso maior que isto, ela não será corrigida * * Retorna lista de chances de erro de bit (uma para cada valor de p) */ boost::container::vector<double> desempenho( std::ifstream& amostras_informacao_csv, std::ifstream& Ht_csv, std::ifstream& Gt_csv, const boost::container::vector<double>& p, int peso_maximo_memorizado ) { boost::container::vector<double>* resultado = new boost::container::vector<double>(p.size()); Matriz Ht; carrega_matriz(Ht_csv, Ht); /** * Exemplo de chave e valor: * - chave: 0b1001 , valor: 0b101 significa uma síndrome [1, 0, 0, 1] com erro associado [1, 0, 1, ...] * em que as reticências indicam a parte do erro concernente aos bits de paridade (não importam) */ boost::unordered_map<unsigned long, unsigned long> dict; int n_linhas = Ht.size(); int n_colunas = Ht[0].size(); int n_informacao = n_linhas - n_colunas; popular_dict( dict, Ht, n_linhas, n_colunas, n_informacao, peso_maximo_memorizado ); Matriz Info; carrega_matriz(amostras_informacao_csv, Info); int n_amostras = Info.size(); Matriz Gt; carrega_matriz(Gt_csv, Gt); Matriz G = transposta(Gt); for (int i_p = 0; i_p < p.size(); ++i_p) { Matriz Transmitido = mult(Info, G); canal(Transmitido, p[i_p]); Matriz Sindromes = mult(Transmitido, Ht); Matriz Transmitido_informacao = Matriz(n_amostras); for (int i = 0; i < n_amostras; ++i) { Transmitido_informacao[i] = Vetor(n_informacao); for (int j = 0; j < n_informacao; ++j) { Transmitido_informacao[i][n_informacao - j - 1] = Transmitido[i][n_linhas - j - 1]; } // assert(Transmitido_informacao.size() == Transmitido.size()); // assert(Transmitido_informacao[0].size() == Info[0].size()); // assert(Transmitido_informacao[i] == Info[i]); } int n_erros = 0; int incr=0; unsigned long sindrome; Vetor correcao_nula = Vetor(n_informacao, 0); Vetor correcao; for (int i = 0; i < n_amostras; ++i) { sindrome = Sindromes[i].to_ulong(); if (dict.find(sindrome) == dict.end()) correcao = correcao_nula; else correcao = Vetor(n_informacao, dict.at(sindrome)); incr = (Transmitido_informacao[i] ^ correcao ^ Info[i]).count(); // if (incr != 0) { // std::cout << correcao << ": " << Transmitido_informacao[i] << " versus " << Info[i] << std::endl; // std::cout << "diferença " << (Transmitido_informacao[i] ^ Info[i]) << std::endl; // std::cout << "Originais: " << Transmitido[i] << " versus " << mult(Gt, Info[i]) << std::endl; // std::cout << "diferença: " << (Transmitido[i] ^ mult(Gt, Info[i])) << std::endl; // std::cout << "síndrome: " << Sindromes[i] << std::endl; // std::cout << std::endl; // } n_erros += incr; } (*resultado)[i_p] = n_erros / ((double) n_amostras * n_informacao); std::cout << "parcial(" << i_p << "): " << (*resultado)[i_p] << std::endl; } return *resultado; } int main(int argc, char** argv) { int arg_peso_maximo = std::stoi(argv[1]); std::string arg_resultados = argv[2]; std::ifstream Htcsv ("dados/Ht.csv"); std::ifstream amostrasInput("dados/amostra-informacao.csv"); std::ifstream GtInput("dados/Gt.csv"); std::ofstream Resultados(arg_resultados); std::ofstream P("dados/lista-de-p.csv"); boost::container::vector<double> p = boost::container::vector<double>(0); double p0 = 0.5; while(p0 > 1 /((double) 1000000)) { p.push_back(p0); p0 *= 0.5; } boost::container::vector<double> des = desempenho(amostrasInput, Htcsv, GtInput, p, arg_peso_maximo); Resultados << des[0]; P << p[0]; for (int i = 1; i < des.size(); ++i) { Resultados << ","; Resultados << des[i]; P << ","; P << p[i]; } Resultados << std::endl; P << std::endl; return 0; }
{ "alphanum_fraction": 0.5859899329, "author": null, "avg_line_length": 27.9921722114, "converted": null, "ext": "cpp", "file": null, "hexsha": "b70be8a5d9602e682ae5ccd0cc4b8fdead4fd1db", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "70a0818e303f2154862b1772d317fdbcc7dfb5c0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "megatron0000/ELE-32-codigos-de-bloco", "max_forks_repo_path": "benchmark.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "70a0818e303f2154862b1772d317fdbcc7dfb5c0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "megatron0000/ELE-32-codigos-de-bloco", "max_issues_repo_path": "benchmark.cpp", "max_line_length": 116, "max_stars_count": null, "max_stars_repo_head_hexsha": "70a0818e303f2154862b1772d317fdbcc7dfb5c0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "megatron0000/ELE-32-codigos-de-bloco", "max_stars_repo_path": "benchmark.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4399, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 14304 }
\subsection{Surface integral for vector fields}
{ "alphanum_fraction": 0.7843137255, "author": null, "avg_line_length": 10.2, "converted": null, "ext": "tex", "file": null, "hexsha": "c5162a1d42bd9fbed8682137406040af9b0d5900", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/analysis/multiCalculusIntegration/01-03-surfaceIntegral.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/analysis/multiCalculusIntegration/01-03-surfaceIntegral.tex", "max_line_length": 47, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/analysis/multiCalculusIntegration/01-03-surfaceIntegral.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 51 }
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2019-12-04 15:13 qiang.zhou <theodoruszq@gmail.com> # # Distributed under terms of the MIT license. """ """ import cv2 from PIL import Image import random import numpy as np import torch import torchvision.transforms.functional as TF def Train_Collatefn(data): all_F, all_L, all_info = [], [], [] for i in range(len(data)): all_F.append(data[i][0]) all_L.append(data[i][1]) all_info.append(data[i][2]) all_F = torch.cat(all_F, dim=0) all_L = torch.cat(all_L, dim=0) return all_F, all_L, all_info # Input is image HxWxC, mask HxW in PIL format """Random affine transformations implemented based on torchvision. The input img&mask can be single PIL.Image instance or [PIL.Image] list. Refer to: https://pytorch.org/docs/stable/torchvision/transforms.html Params: [img] -- [PIL.Image instance or a list of PIL.Image instances] [ANGLE_R] -- [Rotation range based on the center point, from -ANGLE_R~ANGLE_R] [TRANS_R] -- [Spatial translation, -TRANS_R~TRANS_R, e.g. 0.1 -> 0.1*img_height] [SCALE_R] -- [Zoom scale factor, -SCALR_R~SCALE_R, e.g. 0.1 -> 0.1*img_height] [SHEAR_R] -- [Shear degree, -SHEAR_R~SHEAR_R] [FLIP_B] -- [Flip uniformly for all PIL.Image instances, default is False] """ def Rand_Affine(img, ANGLE_R=10, TRANS_R=0.2, SCALE_R=0.3, SHEAR_R=15, FLIP_B=False): assert isinstance(img, Image.Image) or isinstance(img[0], Image.Image) def affop(img, angle, translate, scale, shear, flip): if flip: img = img.transpose(Image.FLIP_LEFT_RIGHT) _img = TF.affine(img, angle, translate, scale, shear, resample=Image.BILINEAR) return _img if isinstance(img, list): w, h = img[0].size else: w, h = img.size angle = random.randint(-ANGLE_R, ANGLE_R) translate = (random.randint(int(-w*TRANS_R), int(w*TRANS_R)), random.randint(int(-h*TRANS_R), int(h*TRANS_R))) # x, y axis scale = 1 + round(random.uniform(-SCALE_R, SCALE_R), 1) shear = random.randint(-SHEAR_R, SHEAR_R) flip = FLIP_B and random.random() >= 0.5 #print (angle, translate, scale, shear) if isinstance(img, list): img_L = [] for i_img in img: i_img = affop(i_img, angle, translate, scale, shear, flip) img_L.append(i_img) return img_L else: _img = affop(img, angle, translate, scale, shear, flip) return _img # img must be a np.uint8 TxHxW datatype numpy def Rand_Crop(img, crop_size): shape = img.shape[1:] # h, w crop_y = random.randint(0, shape[0] - crop_size[0]) crop_x = random.randint(0, shape[1] - crop_size[1]) crop_img = img[:, crop_y:crop_y+crop_size[0], crop_x:crop_x+crop_size[1]] return crop_img """ imgs must be a TxHxW tensor""" # Affine Transforms # Color Transforms def Rand_Transforms(imgs, ANGLE_R=10, TRANS_R=0.1, SCALE_R=0.2, SHEAR_R=10, BRIGHT_R=0.5, CONTRAST_R=0.3): # To Image.Image instances #imgs = np.asarray(imgs, dtype=np.uint8) #print(imgs.max()) pil_imgs = [Image.fromarray(x*255) for x in imgs] w, h = pil_imgs[0].size #print(w, h) #print(pil_imgs[100]) #pil_imgs[100].convert("L").save('test.jpg') #cv2.imwrite('test.jpg', imgs[100]*255) # Affine Transforms def affop(img, angle, translate, scale, shear): _img = TF.affine(img, angle, translate, scale, shear, resample=Image.BILINEAR) return _img angle = random.randint(-ANGLE_R, ANGLE_R) translate = (random.randint(int(-w*TRANS_R), int(w*TRANS_R)), random.randint(int(-h*TRANS_R), int(h*TRANS_R))) # x, y axis scale = 1 + round(random.uniform(-SCALE_R, SCALE_R), 1) shear = random.randint(-SHEAR_R, SHEAR_R) pil_imgs = [affop(x, angle, translate, scale, shear) for x in pil_imgs] # Color Transforms def colorop(img, bright, contrast): _img = TF.adjust_brightness(img, bright) _img = TF.adjust_contrast(_img, contrast) return _img bright = 1 + round(random.uniform(-BRIGHT_R, BRIGHT_R), 1) contrast = 1 + round(random.uniform(-CONTRAST_R, CONTRAST_R), 1) pil_imgs = [colorop(x.convert("L"), bright, contrast) for x in pil_imgs] imgs = np.asarray([np.asarray(x,dtype=np.float32) for x in pil_imgs], dtype=np.float32) #imgs = np.asarray([np.asarray(x, dtype=np.uint8) for x in pil_imgs], dtype=np.uint8) return imgs if __name__ == "__main__": imgs = np.load("../PE-CTA/PE/p0541758.npy") Rand_Transforms(imgs)
{ "alphanum_fraction": 0.6429798197, "author": null, "avg_line_length": 36.9682539683, "converted": null, "ext": "py", "file": null, "hexsha": "dfd4614d0cc9d43846559d9d421f60c0c12c3a2e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-08-17T09:03:49.000Z", "max_forks_repo_forks_event_min_datetime": "2020-08-15T03:54:55.000Z", "max_forks_repo_head_hexsha": "33abbf6810dce79cfedd18650e0e6e7b6d2e7122", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zhyhan/AD3DMIL", "max_forks_repo_path": "ops/dataset_ops.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "33abbf6810dce79cfedd18650e0e6e7b6d2e7122", "max_issues_repo_issues_event_max_datetime": "2021-01-10T02:16:13.000Z", "max_issues_repo_issues_event_min_datetime": "2021-01-10T02:16:13.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zhyhan/AD3DMIL", "max_issues_repo_path": "ops/dataset_ops.py", "max_line_length": 139, "max_stars_count": 5, "max_stars_repo_head_hexsha": "33abbf6810dce79cfedd18650e0e6e7b6d2e7122", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zhyhan/AD3DMIL", "max_stars_repo_path": "ops/dataset_ops.py", "max_stars_repo_stars_event_max_datetime": "2022-02-09T03:28:47.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-15T06:48:53.000Z", "num_tokens": 1403, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4658 }
using Polyhedra include("simplex.jl") include("permutahedron.jl") include("board.jl") myeq(x::Real, y::Real) = myeq(promote(x, y)...) myeq{T<:Real}(x::T, y::T) = x == y myeq{T<:AbstractFloat}(x::T, y::T) = y < x+1024*eps(T) && x < y+1024*eps(T) myeq{S<:Real,T<:Real}(x::Vector{S}, y::Vector{T}) = myeq(promote(x, y)...) myeq{T<:Real}(x::Vector{T}, y::Vector{T}) = x == y myeq{T<:AbstractFloat}(x::Vector{T}, y::Vector{T}) = myeq(norm(x - y), zero(T)) myeqzero{T<:Real}(x::T) = myeq(x, zero(T)) tomatrix(M::Matrix) = M function tomatrix(v::Vector) M = Matrix{eltype(v)}(length(v), 1) M[:,1] = v M end function inlinspace(x, L) for i in 1:size(L, 1) y = vec(L[i,:]) # remove component x = x * dot(y, y) - y * dot(y, x) end myeqzero(norm(x)) end function inequality_fulltest(p::Polyhedron, A, b, linset) A = tomatrix(A) detecthlinearities!(p) removeredundantinequalities!(p) ine = SimpleHRepresentation(getinequalities(p)) @test size(ine.A) == size(A) @test length(ine.linset) == length(linset) aff = SimpleHRepresentation(getinequalities(affinehull(p))) affAb = [aff.b aff.A] inaff(x) = inlinspace(x, affAb) for i in 1:size(A, 1) found = false for j in 1:size(ine.A, 1) # vec for julia 0.4 if !((i in linset) $ (j in ine.linset)) && inaff([b[i]-ine.b[j];vec(A[i,:]-ine.A[j,:])]) found = true break end end @test found end end function generator_fulltest(p::Polyhedron, V, R=Matrix{eltype(V)}(0, size(V, 2)), Vlinset = IntSet(), Rlinset = IntSet()) V = tomatrix(V) R = tomatrix(R) detectvlinearities!(p) removeredundantgenerators!(p) ext = SimpleVRepresentation(getgenerators(p)) @test size(ext.V) == size(V) @test size(ext.R) == size(R) @test length(ext.Vlinset) == length(Vlinset) @test length(ext.Rlinset) == length(Rlinset) for i in 1:size(V, 1) found = false for j in 1:size(ext.V, 1) if myeq(vec(V[i, :]), vec(ext.V[j, :])) found = true break end end @test found end linspace = ext.R[collect(ext.Rlinset),:] inlin(x) = inlinspace(vec(x), linspace) for i in 1:size(R, 1) found = false for j in 1:size(ext.R, 1) if !((i in Rlinset) $ (j in ext.Rlinset)) && inlin(R[i,:]-ext.R[j,:]) #if parallel(vec(R[i, :]), vec(ext.R[j, :]), (i in Rlinset) || (j in ext.Rlinset)) found = true break end end @test found end end #generator_fulltest(p::Polyhedron, V) = generator_fulltest(p, V, Matrix{eltype(V)}(0, size(V, 2))) function alltests{Lib<:PolyhedraLibrary}(lib::Lib) simplextest(lib) permutahedrontest(lib) boardtest(lib) end
{ "alphanum_fraction": 0.6054216867, "author": null, "avg_line_length": 27.6666666667, "converted": null, "ext": "jl", "file": null, "hexsha": "7ae7ba711d2ceb53a085cbfca2efd852ec8a4fff", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_forks_repo_path": "test/alltests.jl", "max_issues_count": null, "max_issues_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_issues_repo_path": "test/alltests.jl", "max_line_length": 121, "max_stars_count": null, "max_stars_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_stars_repo_path": "test/alltests.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 966, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 2656 }
import numpy as np import matplotlib.pyplot as plt import matplotlib from matplotlib import rc # TO MANAGE MATPLOTLIB PARAMETERS" rc('font',family='serif') rc('text',usetex = True) import scipy.optimize as optimization logeVe,logr,logAcce,logtcelle,logAdve,logDiffe,logEmaxHe,logSye,logIC,logBr = np.loadtxt('electronLosses.txt',unpack=True,skiprows=1) logeVp,logr,logAccp,logtcellp,logAdvp,logDiffp,logEmaxHp,logSyp,logpp,logpg = np.loadtxt('protonLosses.txt',unpack=True,skiprows=1) x_eVe = [logeVe[0]+.5,12] x_eVp = [logeVp[0]+.5,logeVp[-1]] y_p = [-10,15] y_e = [-10,15] nR = 30 f = 5 nE = 100 colors = np.arange(nR)/nR Cell = np.arange(nR//f) for r1 in np.arange(nR//f): fig, ax1 = plt.subplots() ax1.tick_params(axis='both',labelsize=12) ax1.set_xlim(x_eVe) ax1.set_ylim(y_e) ax1.set_xlabel(r'$\mathrm{Log}(E/\mathrm{eV})$',fontsize=13) ax1.set_ylabel(r'$\mathrm{Log}(t^{-1} ~ [\mathrm{s}^{-1}])$',fontsize=13) ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logSye[f*r1*nE:(f*r1+1)*nE],label='Sy') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logAdve[f*r1*nE:(f*r1+1)*nE],label='Adv') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logtcelle[f*r1*nE:(f*r1+1)*nE],label='tCell') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logDiffe[f*r1*nE:(f*r1+1)*nE],label='Diff') ax1.axvline(logEmaxHe[0],label='Hillas') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logAcce[f*r1*nE:(f*r1+1)*nE],label='Acc') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logIC[f*r1*nE:(f*r1+1)*nE],label='IC') ax1.plot(logeVe[f*r1*nE:(f*r1+1)*nE],logBr[f*r1*nE:(f*r1+1)*nE],label='Br') ax1.legend(loc='best',fontsize=8) fig.savefig('electronLosses_'+str(Cell[r1]+1)+'.pdf') for r1 in np.arange(nR//f): fig, ax1 = plt.subplots() ax1.tick_params(axis='both',labelsize=12) ax1.set_xlim(x_eVp) ax1.set_ylim(y_p) ax1.set_xlabel(r'$\mathrm{Log}(E/\mathrm{eV})$',fontsize=13) ax1.set_ylabel(r'$\mathrm{Log}(t^{-1} ~ [\mathrm{s}^{-1}])$',fontsize=13) ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logSyp[f*r1*nE:(f*r1+1)*nE],label='Sy') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logAdvp[f*r1*nE:(f*r1+1)*nE],label='Adv') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logtcellp[f*r1*nE:(f*r1+1)*nE],label='tCell') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logDiffp[f*r1*nE:(f*r1+1)*nE],label='Diff') ax1.axvline(logEmaxHp[0],label='Hillas') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logAccp[f*r1*nE:(f*r1+1)*nE],label='Acc') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logpp[f*r1*nE:(f*r1+1)*nE],label='pp') ax1.plot(logeVp[f*r1*nE:(f*r1+1)*nE],logpg[f*r1*nE:(f*r1+1)*nE],label=r'p$\gamma$') ax1.legend(loc='best',fontsize=8) fig.savefig('protonLosses_'+str(Cell[r1]+1)+'.pdf')
{ "alphanum_fraction": 0.6415929204, "author": null, "avg_line_length": 41.0909090909, "converted": null, "ext": "py", "file": null, "hexsha": "313c221fc7b1155fbbff10a47e3b459bcb176336", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eduardomgutierrez/RIAF_radproc", "max_forks_repo_path": "src/adaf/python_scripts/plotLossesBXB.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eduardomgutierrez/RIAF_radproc", "max_issues_repo_path": "src/adaf/python_scripts/plotLossesBXB.py", "max_line_length": 133, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eduardomgutierrez/RIAF_radproc", "max_stars_repo_path": "src/adaf/python_scripts/plotLossesBXB.py", "max_stars_repo_stars_event_max_datetime": "2021-08-30T06:56:03.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-30T06:56:03.000Z", "num_tokens": 1195, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 2712 }
""" This module contains the tests for timeconv function """ # Standard library imports # Third party imports from pytest import approx #https://www.scivision.dev/pytest-approx-equal-assert-allclose/ import numpy as np from pathlib import Path import sys from numpy import rad2deg, deg2rad # Local application imports from myorbit.util.timeut import EQX_B1950, EQX_J2000, mjd2str_date import myorbit.data_catalog as dc from myorbit.util.general import my_range, NoConvergenceError from myorbit.kepler.keplerian import KeplerianStateSolver from myorbit.ephemeris_input import EphemrisInput from myorbit.two_body import calc_eph_twobody, calc_eph_minor_body_perturbed, calc_eph_twobody_universal from myorbit.pert_cowels import calc_eph_by_cowells from myorbit.pert_enckes import calc_eph_by_enckes # The configuration file is shared between general config and logging config CONFIG_INI=Path(__file__).resolve().parents[1].joinpath('conf','config.ini') print (CONFIG_INI) # For logging configuration import logging.config logging.config.fileConfig(CONFIG_INI, disable_existing_loggers=False) from common import check_df, TEST_DATA_PATH # The test consist should fail if a NoConvergenceError convergence error is raised def test_almost_parabolical(): delta_days=50 df = dc.DF_COMETS COMETS_NO_CONVERGED = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)'] df = df[df.Name.isin(COMETS_NO_CONVERGED)] for idx, name in enumerate(df['Name']): obj = dc.read_comet_elms_for(name,df) msg = f'Testing Object: {obj.name} with Tp:{mjd2str_date(obj.tp_mjd)}' print (msg) solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd, force_orbit='near_parabolical') hs = [] for clock_mjd in my_range(obj.tp_mjd-delta_days, obj.tp_mjd+delta_days, 2): r_xyz, rdot_xyz, r, h_xyz, *others = solver.calc_rv(clock_mjd) hs.append(h_xyz) #print(mjd2str_date(clock_mjd)) if not all(np.allclose(h_xyz, hs[0], atol=1e-12) for h_xyz in hs): msg = f'The angular momentum is NOT constant in the orbit' print (msg) TEST_ENCKES = True from time import process_time def test_speed_C_2011_W3_Lovejoy_for_2011(): fn = TEST_DATA_PATH.joinpath('jpl_C_2011_W3_Lovejoy_2011-Nov-16_2011-Dic-16.csv') exp_df = dc.read_jpl_data(fn) EXP_DIFF = 493.9 EXP_DIFF_NEAR_PARABOLICAL = 493.9 EXP_DIFF_PERT = 279.3 EXP_DIFF_PERT_ENCKES = 290.99 FUNC_NAME=sys._getframe().f_code.co_name obj = dc.C_2011_W3_Lovejoy eph = EphemrisInput(from_date="2011.10.16.0", to_date = "2012.01.16.0", step_dd_hh_hhh = "02 00.0", equinox_name = EQX_J2000) df = calc_eph_twobody(obj,eph) method=FUNC_NAME+":calc_eph_twobody" check_df(df, exp_df, EXP_DIFF, method) df = calc_eph_twobody(obj,eph,force_orbit='near_parabolical') method=FUNC_NAME+":calc_eph_twobody_near_parabolical" check_df(df, exp_df, EXP_DIFF_NEAR_PARABOLICAL,method) df = calc_eph_minor_body_perturbed(obj, eph) method=FUNC_NAME+":calc_eph_minor_body_perturbed" check_df(df, exp_df, EXP_DIFF_PERT,method) df = calc_eph_by_cowells(obj, eph) method=FUNC_NAME+":calc_eph_by_cowells" check_df(df, exp_df, EXP_DIFF_PERT,method) if TEST_ENCKES : t0 = int(round(process_time() * 1000)) df = calc_eph_by_enckes(obj, eph) t1 = int(round(process_time() * 1000)) method=FUNC_NAME+":calc_eph_by_enckes" check_df(df, exp_df, EXP_DIFF_PERT_ENCKES,method) assert t1-t0 < 6200, "Performance problem introduced" def test_speed_C_2007_M5_SOHO_at_perihelion(): fn = TEST_DATA_PATH.joinpath('jpl_C_2007_M5_SOHO_at_perihelion.csv') exp_df = dc.read_jpl_data(fn) EXP_DIFF_NEAR_PARABOLICAL = 1501.1 EXP_DIFF_PARABOLICAL = 1501.1 EXP_DIFF_UNIVERSAL = 1501.1 EXP_DIFF_PERT = 464.3 EXP_DIFF_PERT_ENCKES = 632.8 FUNC_NAME=sys._getframe().f_code.co_name obj=dc.C_2007_M5_SOHO eph = EphemrisInput(from_date="2007.06.15.0", to_date = "2007.07.15.0", step_dd_hh_hhh = "02 00.0", equinox_name = EQX_J2000) df = calc_eph_twobody(obj, eph) method=FUNC_NAME+":calc_eph_twobody" check_df(df, exp_df, EXP_DIFF_PARABOLICAL,method) df = calc_eph_twobody(obj, eph, force_orbit='near_parabolical') method=FUNC_NAME+":calc_eph_twobody_near_parabolical" check_df(df, exp_df, EXP_DIFF_NEAR_PARABOLICAL,method) df = calc_eph_twobody_universal(obj, eph) method=FUNC_NAME+":calc_eph_twobody_universal" check_df(df, exp_df, EXP_DIFF_UNIVERSAL,method) df = calc_eph_by_cowells(obj, eph) method=FUNC_NAME+":calc_eph_by_cowells" check_df(df, exp_df, EXP_DIFF_PERT,method) if TEST_ENCKES : df = calc_eph_by_enckes(obj, eph) method=FUNC_NAME+":calc_eph_by_enckes" check_df(df, exp_df, EXP_DIFF_PERT_ENCKES,method) t0 = int(round(process_time() * 1000)) df = calc_eph_by_enckes(obj, eph) t1 = int(round(process_time() * 1000)) method=FUNC_NAME+":calc_eph_by_enckes" check_df(df, exp_df, EXP_DIFF_PERT_ENCKES,method) assert t1-t0 < 2500, "Performance problem introduced" def test_speed_C_2007_M5_SOHO_6_months(): fn = TEST_DATA_PATH.joinpath('jpl_C_2007_M5_SOHO_6months.csv') exp_df = dc.read_jpl_data(fn) EXP_DIFF_NEAR_PARABOLICAL = 60849.4 EXP_DIFF_PARABOLICAL = 60849.4 EXP_DIFF_UNIVERSAL = 60849.3 EXP_DIFF_PERT = 18467.9 EXP_DIFF_PERT_ENCKES = 18968.1 FUNC_NAME=sys._getframe().f_code.co_name obj=dc.C_2007_M5_SOHO eph = EphemrisInput(from_date="2007.03.15.0", to_date = "2007.10.15.0", step_dd_hh_hhh = "02 00.0", equinox_name = EQX_J2000) df = calc_eph_twobody(obj, eph) method=FUNC_NAME+":calc_eph_twobody" check_df(df, exp_df, EXP_DIFF_PARABOLICAL,method) df = calc_eph_twobody(obj, eph, force_orbit='near_parabolical') method=FUNC_NAME+":calc_eph_twobody_near_parabolical" check_df(df, exp_df, EXP_DIFF_NEAR_PARABOLICAL,method) df = calc_eph_twobody_universal(obj, eph) method=FUNC_NAME+":calc_eph_twobody_universal" check_df(df, exp_df, EXP_DIFF_UNIVERSAL,method) df = calc_eph_by_cowells(obj, eph) method=FUNC_NAME+":calc_eph_by_cowells" check_df(df, exp_df, EXP_DIFF_PERT,method) if TEST_ENCKES : t0 = int(round(process_time() * 1000)) df = calc_eph_by_enckes(obj, eph) t1 = int(round(process_time() * 1000)) method=FUNC_NAME+":calc_eph_by_enckes" check_df(df, exp_df, EXP_DIFF_PERT_ENCKES,method) assert t1-t0 < 17000, "Performance problem introduced"
{ "alphanum_fraction": 0.6850341046, "author": null, "avg_line_length": 38.7409326425, "converted": null, "ext": "py", "file": null, "hexsha": "263498d088831b05d0d60639d4696feedf39cb0b", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a8ca434af3295c8bcb04bb43fc9fb703deda0087", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "benitocm/my-orbits", "max_forks_repo_path": "tests/test_near_parabolic.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "a8ca434af3295c8bcb04bb43fc9fb703deda0087", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "benitocm/my-orbits", "max_issues_repo_path": "tests/test_near_parabolic.py", "max_line_length": 455, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a8ca434af3295c8bcb04bb43fc9fb703deda0087", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "benitocm/my-orbits", "max_stars_repo_path": "tests/test_near_parabolic.py", "max_stars_repo_stars_event_max_datetime": "2021-10-18T13:23:21.000Z", "max_stars_repo_stars_event_min_datetime": "2021-10-18T13:23:21.000Z", "num_tokens": 2337, "path": null, "reason": "import numpy,from numpy", "repo": null, "save_path": null, "sha": null, "size": 7477 }
import sys import json from pathlib import Path import logging import gc import click import numpy as np import torch from data import Vocab, Dataset from model import MultiClassModel from config import Config logger = logging.getLogger(__name__) LOG_FORMAT = '[%(asctime)s] [%(levelname)s] %(message)s (%(funcName)s@%(filename)s:%(lineno)s)' logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) def batch_pad(batch_tokens, vocab): max_len = max(len(tokens) for tokens in batch_tokens) output = np.full((len(batch_tokens), max_len), vocab.PAD_ID) for i, tokens in enumerate(batch_tokens): output[i, :len(tokens)] = tokens return output, (output != vocab.PAD_ID).astype(np.int32) @click.command() @click.argument('model-dir', type=click.Path(exists=True)) @click.option('--ckpt', type=str, default='best.model') @click.option('--device', type=str) @click.option('--emb-path', type=click.Path(exists=True)) def main(model_dir, ckpt, device, emb_path): model_dir = Path(model_dir) config = Config(model_dir / 'config.json') with open(config.vocab.words) as fwords, open(config.vocab.labels) as flabels: vocab = Vocab.load(fwords, flabels) model = MultiClassModel(vocab, config.model) logger.info("Loading parameters") model.load_state_dict(torch.load(model_dir / ckpt)) if emb_path: logger.info("Loading embeddings") with open(emb_path) as femb, open(config.vocab.labels) as flabels: vocab, vecs = Vocab.build_from_emb(femb, flabels) model.set_embedding(vocab, vecs) if device is not None: device = torch.device(device) else: device = torch.device(config.training.device) model = model.to(device) model.eval() logger.info("Start prediction") input_lines = [] batch_tokens = [] for i, line in enumerate(sys.stdin): input_lines.append(line) tokens = line.split() tokens = vocab.idfy(tokens) batch_tokens.append(tokens) if (i + 1) % 32 == 0: batch_padded_tokens, batch_mask = batch_pad(batch_tokens, vocab) batch_padded_tokens = torch.tensor(batch_padded_tokens, dtype=torch.long, requires_grad=False).to(device) batch_mask = torch.tensor(batch_mask, requires_grad=False).to(device) result = model(batch_padded_tokens, batch_mask, predict=True) prob = torch.nn.functional.softmax(result['logits'], dim=1) for i in range(len(result['pred'])): print(json.dumps({ 'input': input_lines[i], 'prob': {vocab.id2label(j): float(prob[i,j]) for j in range(prob.shape[1])}, 'prediction': vocab.id2label(int(result['pred'][i])) })) input_lines = [] batch_tokens = [] if not input_lines: return batch_padded_tokens, batch_mask = batch_pad(batch_tokens, vocab) result = model(batch_padded_tokens, batch_mask, predict=True) prob = torch.nn.functional.softmax(result['logits'], dim=1) for i in len(result['pred']): print(json.dumps({ 'input': input_lines[i], 'prob': {vocab.id2label(j): float(prob[i,j]) for j in range(prob.shape[1])}, 'prediction': vocab.id2label(int(result['pred'][i])) })) if __name__ == '__main__': main()
{ "alphanum_fraction": 0.6450471698, "author": null, "avg_line_length": 32.932038835, "converted": null, "ext": "py", "file": null, "hexsha": "c8f5bd519cfa12ee7a74e2d9c1105cc5aa793de4", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5fdeb9090eacc57c87f6bc5a616ed2778e978be1", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "jyori112/task-spec", "max_forks_repo_path": "taskspec/predict.py", "max_issues_count": 4, "max_issues_repo_head_hexsha": "5fdeb9090eacc57c87f6bc5a616ed2778e978be1", "max_issues_repo_issues_event_max_datetime": "2021-08-23T20:31:04.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:03:36.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "jyori112/task-spec", "max_issues_repo_path": "taskspec/predict.py", "max_line_length": 117, "max_stars_count": null, "max_stars_repo_head_hexsha": "5fdeb9090eacc57c87f6bc5a616ed2778e978be1", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "jyori112/task-spec", "max_stars_repo_path": "taskspec/predict.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 789, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3392 }
import numpy as np import matplotlib.pyplot as plt def plot_nodes(graph, color='r'): for node in range(len(graph.x_of_node)): x, y = graph.x_of_node[node], graph.y_of_node[node] plt.plot(graph.x_of_node[node], graph.y_of_node[node], 'o', color=color) plt.text(x, y, node, color=color, size=16) plt.xlabel('x') plt.ylabel('y') plt.gca().set_aspect(1.) def plot_links(graph, color='b', linestyle='solid'): for link, nodes in enumerate(graph.nodes_at_link): x, y = graph.x_of_node[nodes[0]], graph.y_of_node[nodes[0]] dx, dy = graph.x_of_node[nodes[1]] - x, graph.y_of_node[nodes[1]] - y plt.arrow(x, y, dx, dy, head_width=.1, length_includes_head=True, color=color, linestyle=linestyle) plt.text(x + dx * .5, y + dy * .5, link, size=16, color=color) plt.xlabel('x') plt.ylabel('y') plt.gca().set_aspect(1.) def plot_patches(graph, color='g'): for patch, nodes in enumerate(graph.nodes_at_patch): x, y = np.mean(graph.x_of_node[nodes]), np.mean(graph.y_of_node[nodes]) plt.text(x, y, patch, color=color, size=16) plt.xlabel('x') plt.ylabel('y') plt.gca().set_aspect(1.) def plot_graph(graph, at='node,link,patch'): locs = [loc.strip() for loc in at.split(',')] for loc in locs: if loc not in ('node', 'link', 'patch', 'corner', 'face', 'cell'): raise ValueError( '{at}: "at" element not understood'.format(at=loc)) plt.plot(graph.x_of_node, graph.y_of_node, '.', color='r') if 'node' in locs: plot_nodes(graph) if 'link' in locs: plot_links(graph) if 'patch' in locs: plot_patches(graph) if 'corner' in locs: plot_nodes(graph.dual, color='c') if 'face' in locs: plot_links(graph.dual, linestyle='dotted', color='k') if 'cell' in locs: plot_patches(graph.dual, color='m') plt.show()
{ "alphanum_fraction": 0.598073999, "author": null, "avg_line_length": 30.828125, "converted": null, "ext": "py", "file": null, "hexsha": "7a9df278290b2ba4d3afc908f911a7427c709652", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2018-09-06T23:58:19.000Z", "max_forks_repo_forks_event_min_datetime": "2017-07-03T20:21:13.000Z", "max_forks_repo_head_hexsha": "871151bff814e672b4f09f091b6347367758c764", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "laijingtao/landlab", "max_forks_repo_path": "landlab/plot/graph.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "871151bff814e672b4f09f091b6347367758c764", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "laijingtao/landlab", "max_issues_repo_path": "landlab/plot/graph.py", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "871151bff814e672b4f09f091b6347367758c764", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "laijingtao/landlab", "max_stars_repo_path": "landlab/plot/graph.py", "max_stars_repo_stars_event_max_datetime": "2015-08-17T19:29:50.000Z", "max_stars_repo_stars_event_min_datetime": "2015-08-17T19:29:50.000Z", "num_tokens": 553, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1973 }
import time import numpy as np import vpi import cv2 from threading import Thread from PIL import Image from jetvision.elements import Camera MAX_DISP = 64 WINDOW_SIZE = 10 def get_calibration() -> tuple: fs = cv2.FileStorage( "calibration/rectify_map_imx219_160deg_1080p.yaml", cv2.FILE_STORAGE_READ ) map_l = (fs.getNode("map_l_x").mat(), fs.getNode("map_l_y").mat()) map_r = (fs.getNode("map_r_x").mat(), fs.getNode("map_r_y").mat()) fs.release() return map_l, map_r class CameraThread(Thread): def __init__(self, sensor_id) -> None: super().__init__() self._camera = Camera(sensor_id) self._should_run = True self._image = self._camera.read() self.start() def run(self): while self._should_run: self._image = self._camera.read() @property def image(self): # NOTE: if we care about atomicity of reads, we can add a lock here return self._image def stop(self): self._should_run = False self._camera.stop() if __name__ == "__main__": map_l, map_r = get_calibration() cam_l = CameraThread(1) cam_r = CameraThread(0) try: with vpi.Backend.CUDA: for i in range(100): ts = [] ts.append(time.perf_counter()) # confidenceMap = vpi.Image(vpi_l.size, vpi.Format.U16) arr_l = cam_l.image arr_r = cam_r.image ts.append(time.perf_counter()) # RGB -> GRAY # arr_l = cv2.cvtColor(arr_l, cv2.COLOR_RGB2GRAY) # arr_r = cv2.cvtColor(arr_r, cv2.COLOR_RGB2GRAY) ts.append(time.perf_counter()) # Rectify arr_l_rect = cv2.remap(arr_l, *map_l, cv2.INTER_LANCZOS4) arr_r_rect = cv2.remap(arr_r, *map_r, cv2.INTER_LANCZOS4) ts.append(time.perf_counter()) # Resize arr_l_rect = cv2.resize(arr_l_rect, (480, 270)) arr_r_rect = cv2.resize(arr_r_rect, (480, 270)) ts.append(time.perf_counter()) # Convert to VPI image vpi_l = vpi.asimage(arr_l_rect) vpi_r = vpi.asimage(arr_r_rect) vpi_l_16bpp = vpi_l.convert(vpi.Format.U16, scale=1) vpi_r_16bpp = vpi_r.convert(vpi.Format.U16, scale=1) vpi_l_16bpp = vpi_l.convert(vpi.Format.U16, scale=1) vpi_r_16bpp = vpi_r.convert(vpi.Format.U16, scale=1) ts.append(time.perf_counter()) disparity_16bpp = vpi.stereodisp( vpi_l_16bpp, vpi_r_16bpp, out_confmap=None, backend=vpi.Backend.CUDA, window=WINDOW_SIZE, maxdisp=MAX_DISP, ) disparity_8bpp = disparity_16bpp.convert( vpi.Format.U8, scale=255.0 / (32 * MAX_DISP) ) ts.append(time.perf_counter()) disp_arr = disparity_8bpp.cpu() ts.append(time.perf_counter()) disp_arr = cv2.applyColorMap(disp_arr, cv2.COLORMAP_TURBO) ts.append(time.perf_counter()) cv2.imshow("Disparity", disp_arr) cv2.waitKey(1) ts.append(time.perf_counter()) ts = np.array(ts) ts_deltas = np.diff(ts) debug_str = f"Iter {i}\n" for task, dt in zip( [ "Read images", "OpenCV RGB->GRAY", "OpenCV Rectify", "OpenCV 1080p->270p Resize", "VPI conversions", "Disparity calc", ".cpu() mapping", "OpenCV colormap", "Render", ], ts_deltas, ): debug_str += f"{task} {1000*dt:0.2f}\n" print(debug_str) except KeyboardInterrupt as e: print(e) finally: cam_l.stop() cam_r.stop()
{ "alphanum_fraction": 0.5068780602, "author": null, "avg_line_length": 29.3767123288, "converted": null, "ext": "py", "file": null, "hexsha": "1b75e5a59786ebb60325ebed64ad38e6a155314d", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-12-30T18:32:06.000Z", "max_forks_repo_forks_event_min_datetime": "2021-12-21T23:44:47.000Z", "max_forks_repo_head_hexsha": "8e2e950a6a38ac545bc07b35119767faed6412e9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "NVIDIA-AI-IOT/jetson-stereo-depth", "max_forks_repo_path": "depth_pipeline_python/depth_opencv.py", "max_issues_count": 2, "max_issues_repo_head_hexsha": "8e2e950a6a38ac545bc07b35119767faed6412e9", "max_issues_repo_issues_event_max_datetime": "2022-02-01T05:23:36.000Z", "max_issues_repo_issues_event_min_datetime": "2022-01-06T05:28:27.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "NVIDIA-AI-IOT/jetson-stereo-depth", "max_issues_repo_path": "depth_pipeline_python/depth_opencv.py", "max_line_length": 81, "max_stars_count": 15, "max_stars_repo_head_hexsha": "8e2e950a6a38ac545bc07b35119767faed6412e9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "NVIDIA-AI-IOT/jetson-stereo-depth", "max_stars_repo_path": "depth_pipeline_python/depth_opencv.py", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:10:22.000Z", "max_stars_repo_stars_event_min_datetime": "2021-12-12T17:29:49.000Z", "num_tokens": 1009, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4289 }
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. """ signal """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import numpy as np from onnx import onnx_pb from onnx.numpy_helper import to_array from tf2onnx import utils from tf2onnx.handler import tf_op logger = logging.getLogger(__name__) # pylint: disable=unused-argument,missing-docstring def make_dft_constant(length, dtype, fft_length): n = np.arange(length) k = n.reshape((length, 1)).astype(np.float64) mat = np.exp(-2j * np.pi * k * n / length) mat = mat[:fft_length // 2 + 1] both = np.empty((2,) + mat.shape, dtype=dtype) both[0, :, :] = np.real(mat) both[1, :, :] = np.imag(mat) return both @tf_op("RFFT") class RFFTOp: # support more dtype @classmethod def version_1(cls, ctx, node, **kwargs): """ Inspired from `Python implementation of RFFT <https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/>`_. Complex version: :: import numpy as np def _DFT_cst(N, fft_length): n = np.arange(N) k = n.reshape((N, 1)).astype(np.float64) M = np.exp(-2j * np.pi * k * n / N) return M[:fft_length // 2 + 1] def DFT(x, fft_length=None): if len(x.shape) == 1: x = x.reshape((-1, 1)) else: x = x.T if fft_length is None: fft_length = x.shape[0] cst = _DFT_cst(x.shape[0], fft_length) return np.dot(cst, x).T Real version, first axis is (real, imag) part: :: import numpy as np def _DFT_real_cst(N, fft_length): n = np.arange(N) k = n.reshape((N, 1)).astype(np.float64) M = np.exp(-2j * np.pi * k * n / N) M = M[:fft_length // 2 + 1] both = np.empty((2,) + M.shape) both[0, :, :] = np.real(M) both[1, :, :] = np.imag(M) return both def DFT_real(x, fft_length=None): if len(x.shape) == 1: x = x.reshape((-1, 1)) else: x = x.T if fft_length is None: fft_length = x.shape[0] cst = _DFT_real_cst(x.shape[0], fft_length) res = np.dot(cst, x) return np.transpose(res, (0, 2, 1)) """ supported_dtypes = [ onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE, onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128, ] consumers = ctx.find_output_consumers(node.output[0]) consumer_types = set(op.type for op in consumers) utils.make_sure( consumer_types == {'ComplexAbs'}, "Current implementation of RFFT only allows ComplexAbs as consumer not %r", consumer_types) onnx_dtype = ctx.get_dtype(node.input[0]) utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.") shape = ctx.get_shape(node.input[0]) np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) shape_n = shape[-1] utils.make_sure(len(node.input) == 2, "Two inputs expected not %r", len(node.input)) # This input should be a constant. fft_length_name = node.input[1] node_fft_length = ctx.get_node_by_output(fft_length_name, search_in_parent_graphs=True) utils.make_sure(node_fft_length.type == 'Const', "fft_length should be a constant, the other case is not implemented yet.") value = node_fft_length.get_attr("value") value_array = to_array(value.t) utils.make_sure(value_array.shape == (1,), "Unexpected shape for fft_length (%r)", value_array.shape) fft_length = value_array[0] # TODO: handle this parameter when onnx.helper.make_node is fixed. # Tcomplex = node.get_attr("Tcomplex") if np_dtype == np.float16: res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float16) np_dtype = np.float16 elif np_dtype in (np.float32, np.complex64): res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float32) np_dtype = np.float32 else: res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float64) np_dtype = np.float64 real_imag_part = make_dft_constant(shape_n, np_dtype, fft_length) onx_real_imag_part = ctx.make_const( name=utils.make_name('cst_rfft_%d' % shape_n), np_val=real_imag_part) shapei = list(np.arange(len(shape))) perm = shapei[:-2] + [shapei[-1], shapei[-2]] trx = ctx.make_node( "Transpose", inputs=[node.input[0]], attr=dict(perm=perm), name=utils.make_name(node.name + 'tr')) ctx.remove_node(node.name) mult = ctx.make_node( "MatMul", inputs=[onx_real_imag_part.name, trx.output[0]], name=utils.make_name('CPLX_' + node.name + 'rfft')) new_shape = [2] + list(shape) shapei = list(np.arange(len(new_shape))) perm = shapei[:-2] + [shapei[-1], shapei[-2]] last_node = ctx.make_node( "Transpose", inputs=[mult.output[0]], attr=dict(perm=perm), name=utils.make_name('CPLX_' + node.name + 'rfft'), shapes=[new_shape], dtypes=[res_onnx_dtype]) ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() @tf_op("ComplexAbs") class ComplexAbsOp: # support more dtype @classmethod def any_version(cls, opset, ctx, node, **kwargs): """ Computes the modules of a complex. If the matrix dtype is not complex64 or complex128, it assumes the first dimension means real part (0) and imaginary part (1, :, :...). """ supported_dtypes = [ onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE, onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128, ] onnx_dtype = ctx.get_dtype(node.input[0]) utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.") shape = ctx.get_shape(node.input[0]) np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) utils.make_sure(shape[0] == 2, "ComplexAbs expected the first dimension to be 2 but shape is %r", shape) ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64)) ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64)) p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype)) real_part = ctx.make_node( 'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0), name=utils.make_name('Real_' + node.name)) imag_part = ctx.make_node( 'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0), name=utils.make_name('Imag_' + node.name)) real_part2 = ctx.make_node( 'Pow', inputs=[real_part.output[0], p2.name], name=utils.make_name(real_part.name + 'p2p')) imag_part2 = ctx.make_node( 'Pow', inputs=[imag_part.output[0], p2.name], name=utils.make_name(imag_part.name + 'p2p')) ctx.remove_node(node.name) add = ctx.make_node( "Add", inputs=[real_part2.output[0], imag_part2.output[0]], name=utils.make_name('ComplexAbs_' + node.name)) if opset == 1: squeezed = ctx.make_node( "Squeeze", inputs=add.output[:1], attr=dict(axes=[0]), name=utils.make_name('ComplexAbs' + node.name)) else: squeezed = ctx.make_node( "Squeeze", inputs=[add.output[0], ind0], name=utils.make_name('ComplexAbsSqr' + node.name)) last_node = ctx.make_node( "Sqrt", inputs=squeezed.output[:1], name=utils.make_name('ComplexAbs' + node.name), shapes=[shape[1:]], dtypes=[onnx_dtype]) ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() @classmethod def version_1(cls, ctx, node, **kwargs): cls.any_version(1, ctx, node, **kwargs) @classmethod def version_13(cls, ctx, node, **kwargs): cls.any_version(11, ctx, node, **kwargs)
{ "alphanum_fraction": 0.5806341241, "author": null, "avg_line_length": 36.9957805907, "converted": null, "ext": "py", "file": null, "hexsha": "0d53ece16420e68c069f6ba34fbf9be4fa60c449", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "214309d44852f5cf086452fbd1a0d045ad502559", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zzpmiracle/tensorflow-onnx", "max_forks_repo_path": "tf2onnx/onnx_opset/signal.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "214309d44852f5cf086452fbd1a0d045ad502559", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zzpmiracle/tensorflow-onnx", "max_issues_repo_path": "tf2onnx/onnx_opset/signal.py", "max_line_length": 112, "max_stars_count": null, "max_stars_repo_head_hexsha": "214309d44852f5cf086452fbd1a0d045ad502559", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zzpmiracle/tensorflow-onnx", "max_stars_repo_path": "tf2onnx/onnx_opset/signal.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2205, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 8768 }
# ------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------- """The three-parameter Gompertz growth model.""" import dataclasses import warnings import numpy as np from scipy import optimize from staticchar.basic_types import ArrayLike from staticchar.models.base import BaseModel, CurveParameters def _function(ts: np.ndarray, a: float, b: float, c: float) -> np.ndarray: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "overflow encountered") return a * np.exp(-b * np.exp(-c * ts)) @dataclasses.dataclass(frozen=True) class _GompertzParameters: """The parameters of the curve y(t) = a * exp(-b * exp(-c * t)). They shouldn't probably be used widely (as they are hard to interpret), but they are very convenient mathematically. See also: CurveParameters, _reparametrize_to_model, _reparametrize_to_curve """ a: float b: float c: float def _reparametrize_to_model(params: CurveParameters) -> _GompertzParameters: """Auxiliary function for changing parametrization of the curve. See also: _reparametrize_to_curve """ a: float = params.carrying_capacity c: float = params.growth_rate * np.e / a b: float = np.exp(c * params.lag_time + 1.0) return _GompertzParameters(a=a, b=b, c=c) def _reparametrize_to_curve(params: _GompertzParameters) -> CurveParameters: """Auxiliary function for changing parametrization of the curve. See also: _reparametrize_to_model """ return CurveParameters( carrying_capacity=params.a, growth_rate=params.a * params.c / np.e, lag_time=(np.log(params.b) - 1.0) / params.c, ) class GompertzModel(BaseModel): @property def time_maximal_activity(self) -> float: """The time of maximal growth rate""" params_model: _GompertzParameters = _reparametrize_to_model(self.parameters) return np.log(params_model.b) / params_model.c def predict(self, ts: ArrayLike) -> np.ndarray: """Gives the values of the model at timepoints `ts`.""" ts = np.asarray(ts) model_params = _reparametrize_to_model(self.parameters) return _function(ts, a=model_params.a, b=model_params.b, c=model_params.c) @property def _log_initial_density(self) -> float: # pragma: no cover """Return the natural logarithm of the initial density""" params_model: _GompertzParameters = _reparametrize_to_model(self.parameters) # We have `y(0) = a * exp(-b)`, so that `log(y(0)) = log(a) - b` return np.log(params_model.a) - params_model.b @staticmethod def _fit(ts: np.ndarray, ys: np.ndarray, initial_guess: CurveParameters, max_iterations: int) -> CurveParameters: """Finds optimal parameters of the growth model parameters.""" initial_guess_model = _reparametrize_to_model(initial_guess) p0 = (initial_guess_model.a, initial_guess_model.b, initial_guess_model.c) estimates = optimize.curve_fit(_function, ts, ys, p0=p0, maxfev=max_iterations)[0] optimal_model = _GompertzParameters(a=estimates[0], b=estimates[1], c=estimates[2]) return _reparametrize_to_curve(optimal_model)
{ "alphanum_fraction": 0.6587664192, "author": null, "avg_line_length": 36.8631578947, "converted": null, "ext": "py", "file": null, "hexsha": "768e37560dbf4b45264ad4d78d06e177e9ec95d9", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-10-02T17:53:07.000Z", "max_forks_repo_forks_event_min_datetime": "2021-09-27T10:35:20.000Z", "max_forks_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "BrunoKM/station-b-libraries", "max_forks_repo_path": "PyStationB/libraries/StaticCharacterization/staticchar/models/gompertz.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "BrunoKM/station-b-libraries", "max_issues_repo_path": "PyStationB/libraries/StaticCharacterization/staticchar/models/gompertz.py", "max_line_length": 117, "max_stars_count": 6, "max_stars_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "BrunoKM/station-b-libraries", "max_stars_repo_path": "PyStationB/libraries/StaticCharacterization/staticchar/models/gompertz.py", "max_stars_repo_stars_event_max_datetime": "2021-12-14T18:39:51.000Z", "max_stars_repo_stars_event_min_datetime": "2021-09-29T15:46:55.000Z", "num_tokens": 827, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 3502 }
#pragma once #include <Eigen/Dense> #include <Eigen/Sparse> #include <cstdlib> namespace edp { template<typename T, class ColFunc> auto constructMat(size_t dim, ColFunc&& colFunc) -> Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> { Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> res(dim, dim); res.setZero(); for(size_t i = 0; i < dim; i++) { auto m = colFunc(i); for(auto& elt : m) { res(elt.first, i) = elt.second; } } return res; } template<typename T, class ColFunc> auto constructSparseMat(size_t dim, ColFunc&& colFunc) -> Eigen::SparseMatrix<T> { using TripletT = Eigen::Triplet<T>; std::vector<TripletT> tripletList; tripletList.reserve(3 * dim); for(size_t col = 0; col < dim; ++col) { auto m = colFunc(col); for(const auto& v : m) { tripletList.emplace_back(v.first, col, v.second); } } Eigen::SparseMatrix<T> res(dim, dim); res.setFromTriplets(tripletList.begin(), tripletList.end()); return res; } // basis must be sorted template<typename T, typename ColFunc> auto constructSubspaceMat(ColFunc&& t, const std::vector<uint32_t>& basis) -> Eigen::SparseMatrix<T> { const size_t n = basis.size(); using TripletT = Eigen::Triplet<T>; std::vector<TripletT> tripletList; for(size_t i = 0; i < n; i++) { std::map<uint32_t, T> m = t(basis[i]); auto iter = basis.begin(); for(auto& kv : m) { iter = std::lower_bound(iter, basis.end(), kv.first); if(iter == basis.end()) { break; } auto j = std::distance(basis.begin(), iter); { tripletList.emplace_back(i, j, kv.second); } } } Eigen::SparseMatrix<T> res(n, n); res.setFromTriplets(tripletList.begin(), tripletList.end()); return res; } }
{ "alphanum_fraction": 0.5694444444, "author": null, "avg_line_length": 25.2467532468, "converted": null, "ext": "hpp", "file": null, "hexsha": "c436d83efe656b084ed12df20a43f5dc7e96e08e", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "cecri/ExactDiagonalization", "max_forks_repo_path": "include/edlib/EDP/ConstructSparseMat.hpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "cecri/ExactDiagonalization", "max_issues_repo_path": "include/edlib/EDP/ConstructSparseMat.hpp", "max_line_length": 100, "max_stars_count": null, "max_stars_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "cecri/ExactDiagonalization", "max_stars_repo_path": "include/edlib/EDP/ConstructSparseMat.hpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 513, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 1944 }
Glacier Point Apartments is a quiet and friendly community located in an optimal area of West Davis. Residents enjoy a fitness center, pool and spa, barbeque spot, media loft and internet lounge with free WiFi. With quick freeway access, walking distance to shopping, restaurants and more, and a bike path and bus lines to campus, Glacier Point is the ultimate location! The complex offers an array of floor plans from single bedroom to spacious four bedroom apartments and every unit features a washer and dryer. There is permanent staff in the office available to help anyone with questions regarding Glacier Point. Prices are competitive compared to other apartment communities in the area: $1060.00 for a 1BD/1BR, $1295.00 for a 2BD/1BR, $1335 for a 2BD/2BR, $1,715 for a 3BD/2BR, and $2,095.00 for a 4BD/2BR. The MU is about 2.5 miles from Glacier Point by car and just 1.5 miles via bike. Glacier Point is on the Unitrans P/Q bus line, which also runs on Saturdays. The Q Line stops directly at Glacier Point. The P Line can be caught on the corner of Shasta Drive, the street before Denali. Many residents opt to take the Q and transfer to the G Bus Line G Line at the Congregation Bet Haverim synagogue stop. However, if transfers are late, Residents have suggested communicating with the drivers. For those who dont mind a short walk, the B line, on Sycamore Lane one block south of the Marketplace, is only 10 minutes away. The Marketplace is just down the street (1 mile), where there is a Safeway and lots of other stores and restaurants. If you go down a little bit further, youll find the Anderson Plaza with its SaveMart (1.3 miles) and Oak Tree Plaza with its Nugget and lots more shops and restaurants (2.6 miles). For more information on rental housing in Davis, take a look at our Housing Guide as well as our Apartments page. Older Reviews /Reviews Before 2009 Reviews Before 2009 Current Reviews 20090821 13:37:43 nbsp We moved in to the complex back in 8/2006. On initial movein there was a different manager, nice person, bit of a busy body (gossiped & would try to act like your best buddy). I did not care for her too much but she wasnt there for much longer. Management changed within the same year and made quite a difference. Beverly is one of the nicest and accomadating managers I have met yet. Jerry (maintenance) is very friendly, responds to ANY issues same day and I never felt nervous with him in our apartment with our cat when we are not there very important to me. We had a bad roommate situation and they were completely understanding & did not hold it against us for our misfortune. They told us how to handle it and what they couldnt do and really showed us they cared. Nicole joined there team and she is just as awesome as the other two. This team really makes the effort to show the tenants that they are there for us. As compared to other places I have lived in where it was always at their convenience. The only reason I am moving is because we needed to relocate. If I ever need to move back and this management team is still on board, I would move back in a heartback. Good Luck Beverly, Gerry & Nicole, and Thanks! Users/Shallon 20090901 10:47:15 nbsp Had a great experience living at glacier, didnt deal with management much, but when we did they were very easy to talk to and very nice about dealing with things. The repair guy was great and responded to requests very quickly. Would have continued to live here but we needed a place closer to school. Great place to live, no complaints. Users/rocknice 20100125 09:45:56 nbsp I have been living at the Glacier Point apartments since September. So far, Ive had a great experience. Before moving in I looked at a lot of other apartment complexes and this one was by far the nicest. The property is kept neat and the insides of the apartments are also in great condition...new paint, clean bathrooms, etc. The management are very nice either when just saying good morning or when dealing with any concerns that I have. There are always (good) coffee and donuts in the mornings and cookies in the afternoons. Whenever Ive reported something that needed to be fixed, like a light, it was taken care of within that same day. Parking has never been a problem, I can even find spaces in the lot when I come back at 3 a.m. The gym and pool areas are also very nice. They even have my favorite exercise machine from the ARC. The only problem Ive found is that my cell phone reception wavers depending on where I am inside the apartment. Besides that, I love everything about this place and had no hesitation renewing my lease. Users/KatieP 20100125 20:12:55 nbsp Glacier is an okay place to live in, as a freshman who moved in from the dorms, its decent (no more fire hazards!) My roommates and I are currently in a three bedroom suite. As what everyone says, the walls are thin, but as far as I can tell I can only hear locally within my own suite, I sometimes hear the people upstairs but thats usually like a chair falling over and other things. Unfortunately, the way the master bedroom in the three bedroom is connected to the living room, lets just say Ive heard things that Ive never really wanted to hear. I do have nice things to say about the management, they are friendly and pleasant. Maintenance is usually pretty quick about things and are honest about when things would be done, Jerry the main guy is friendly. And Glacier occasionally holds some events like pie day, free breakfast for finals week which is nice. And I will definitely say that Cell phone reception for AT&T is terrible for my spot, I usually only have 12 bars and my roommate has to put his phone by the window. I know this isnt particularly Glaciers problem, its just annoying. And currently Glaciers only service provider for internet is comcast, Id definitely recommend calling the davis rep for comcast for a deal were are paying ~$80 for high speed internet and basic cable with free HBO/Starz for 9months. And biking to the UCD campus takes about 1020mins if you take the bike path across 113. We did not renew our lease for this year because of roommates going to different places, but I guess I would still recommend Glacier, yay for free cookies! Users/theyangster 20100220 18:20:26 nbsp i know this is probably just wishful thinking, but does anyone know if were allowed to paint the walls while we live there? then just paint them back to white before we move out? Users/JacquelineD 20100831 16:05:44 nbsp Weve been here for 4 years, and we loved it. Were moving out because our family is growing, otherwise we would have stayed. Management is very nice, maintenance requests are addressed the same day or the next day. The pool area is nice and hardly ever crowded, so is the gym. They added the BBQ area a couple of years ago which is very nice. The place is quiet good for older undergrads/grads and young professionals. The only downside (for lazy me) was that it was a longish bike ride to campus 1520 minutes. Other than that I highly recommend it! Users/Zzz 20100831 16:07:12 nbsp I forgot to add: the morning coffee and donuts and afternoon cookies are great. For the past year or two, they also have a community dinner twice a year with free food. And, my favorite, free continental breakfast during finals week. Loved it! Users/Zzz 20110215 15:09:04 nbsp I have lived at Glacier Point Apartments for 2 years now in a 1 bedroom apartment. The complex was built in the mid2000s if I recall correctly, so it’s fairly new. The apartments are in good working order (nice windows, carpet, appliances) and come with their own washer/dryer as well as central heating and air conditioning, and a kitchen with all of the standard appliances. I cant hear my neighbors unless they slam their front door or hammer their walls. Im on the top floor so I cant comment on footsteps, etc., but Ive never had any complaints from the residents below me. There is a pool and hot tub that are wellmaintained, as well as a rec room with treadmills and other exercise machines. The entire property is taken care of very well and looks nice. Parking is available for all residents and finding a spot is never a problem. The complex is off of Covell. There is a bus stop for the Q line right at the complex, and a bus stop for the P line a few minutes away by walking. Additionally, the D and K lines are accessible by a 5minute walk down Denali Dr to Arlington & Shasta. Highway 113 is one block away on Covell, and driving to campus or downtown takes 510 minutes. There is a plaza with a Safeway and CVS about 2 minutes east driving on Covell. There is a bike pathway off of Glacier Dr. that links to the bike loop; biking to campus takes 1020 minutes. The management (Bev and Nicole) are very friendly, along with Jerry (maintenance). They have coffee, donuts, cookies, and candy at their office whenever I stop by. Jerry has responded to my maintenance requests within 12 days and I’ve never had a problem getting things fixed. Management hosts fall and spring socials for residents where they provide delicious hot food. During finals week of each quarter, continental breakfast is offered to residents. There are a few potential drawbacks to living at Glacier Point. It’s far away from downtown or campus, so you have to bike, drive, or take the bus anywhere you want to go. The bus lines nearby aren’t the best – the P/Q have long routes and only stop at the MU but many residents use them. It’s also a quiet complex that’s probably not too partyfriendly. I chose Glacier Point because it was new, quiet, and offered plenty of amenities, especially the washer and dryer. Its location and distance from campus didn’t bother me since I enjoy riding my bike everywhere. I don’t take the bus often, but it’s convenient should I ever have to. I would definitely recommend this complex to anyone! Users/jamesd 20110802 16:44:53 nbsp I lived there last year and enjoyed it quite a bit. Reasonable pricing, good perks (donuts every morning, cookies in the afternoon, breakfast during finals, etc.), and decent location. But the best thing about Glacier Point is Jerry. Jerry is the maintenance guy for the complex and he rocks! Super friendly, good attitude and responds very quickly to work orders. I would definitely recommend living here, as long as youre able to put up with the P/Q lines. Users/KieranToovey 20110925 20:48:10 nbsp Glacier point is a fairly nice place to live in, except for how far from campus it is. The worst part comes when you actually move out...so here comes my experiences at Glacier point (over 2 years). The first apartment I was in, was a 3 bed/2 bath with a $600 security deposit. Come the end of the year (AFTER I had already signed a lease on a different apartment in the same complex), the management attempted to withhold a little less than half of my security deposit for cleaning and painting. When we received the moveout paperwork, it said that approximately $200 would automatically be deducted for CARPET cleaning...no matter what.<this is ILLEGAL. Also, my roommate and I (and our parents) cleaned that place for hours...and followed all of their instructions to the letter. So needless to say, we were surprised to have gotten only half of our money back after all of the effort to clean the place. So we had to go after the management, but wouldnt you know it...they gave our money back because they were not allowed to do that. Of course I had signed a lease before I realized that all this crap was going to happen...so come year 2. I had prepared myself and discussed all issues with the management at the beginning of the year. This time, they had more specific cleaning outlines, as well as did not deduct for carpet cleaning costs. BUT of course something else had to happen. My roommate and I cleaned the place, once again following the instructions to the letter. Also, the complex is trying to charge me for a 1/2 paint on an apartment that DID NOT need any painting, because it was NOT damaged by me. Again they have tried to weasel me out of half of my security deposit. Even though this complex looks nice, it is nice until you try to get your security deposit back....because you WILL have to fight in order to get your full deposit back. Renter beware because the security deposit is sure a lot of money! Users/EmilyRichards 20120726 14:24:08 nbsp Ive lived in Glacier for the past year and I love the property. I had planned to live here for an additional year, however some things happen with the people that I was supposed to live with. Beverly was very understanding with the issues that we were going through and tried to help us to her best ability. She is the nicest out of all the people in the office. Also, during the time that I lived at Glacier anytime I had any issues with anything in my apartment not working the maintenance man Jerry always came in a timely manner to fix the problem. In all Glacier is a very nice place to live and I would definitely recommend it ! Users/PatriciaCrowder 20120813 10:25:29 nbsp My wife, daughter and I lived in GP for just under 2 years. Bev and Jerry are very friendly and repair service has always been prompt. I believe this is one of the quieter complexes, since I have heard some of my friends complain about undergrad partying where they live. The management sponsors many tenant appreciation events, such as bagel breakfasts during finals week, chili night, and free coffee and donuts on weekdays. The pool and spa are also in good working order and are always a pleasure after a long day at work. Upstairs from the exercise room there is also a large screen TV for movies and cable TV where my daughter watched the olympics. Aside from the amenities the apartments are acceptable. The appliances are neither very new nor very old, and the location is neither very good nor very bad. For families with schoolage children, the Patwin school is a 3 minute bike ride down the green belt, and it is an excellent elementary school. The bike commute to UCD campus is 1520 minutes, and there are close bus stops on Covell for those not into biking. The parking lot is all unreserved spots, with some under a shelter. I have only once seen it completely full, so there is never a problem finding a place to park. All in all, our experience has been very positive living here, and I would recommend it to others looking for an attentive management service. Our experience in other places with bad property managers fueled our pursuit of a good staff (using these reviews on Davis wiki) and the reputation for prompt and courteous care taken by Bev and Jerry is welldeserved. (e.g. Jerry helped me when I lost a house key and fixed our washing machine very promptly. Bev gave us a very good reference for our new landlord and helped us set up a sublease agreement for the last month of our lease). While we have not completed our lease yet, I cannot address the comments from other tenants that Yolo property management will try to keep the deposit. However, our past experience is that they have been fair and that they offer a walkthrough before the lease is up give some idea if the apartments condition merits a full refund. Again, because of the management and amenities I highly recommend Glacier Point, especially for students or small families looking for a quieter apartment complex. Users/IsaacGreenhut 20130308 12:57:12 nbsp I absolutely LOVE this apartment complex. My housemates and I moved in three years ago and we never left! The management is extremely nice and helpful. They are always willing to answer any questions that we have and assist us on the spot if we ever get locked out of our apartments or need to pick a package up. Jerry, the maintenance guy, is extremely friendly and efficient! If we ever had a problem with our bathroom or apartment light etc. all I would have to do is tell Jerry about it, and he would show up on the same day and fix it. On top of that, we get free donuts and coffee every morning, free cookies every afternoon, and free breakfast during finals week. We also get free printing in the main office. With such an incredible management and so many perks, Im sad to leave Glacier Point Apartments because Ill be leaving Davis. But I would recommend anyone whos looking for a great place to stay to check this place out! Users/manpreetbath 20130311 11:44:34 nbsp We have lived in these apartments for three years and never once had a problem. The management at Glacier Point is outstanding. Whenever there is something wrong ( i.e a light is out or a drain is clogged) Jerry the Maintenance Man fixes it right away. In addition to the excellent service at Glacier Point the people who work here are also very personable ( Jerry!) and willing to talk to you anytime. Though this apartment is farther away from campus than some others might be the management and the perks of living here ( i.e free coffee and donuts in the morning; cupcake and pie day; socials; and free breakfast during finals week) definitely makes up for itLaura R. Users/LauraRichstone 20140318 12:24:29 nbsp GLACIER POINT RESPONSE TO LAST REVIEW: Hello Christine and thank you for bringing this to our managers attention on Saturday I believe you spoke to her the day you reported this and your maintenance request is actively being met. Glacier Point recently welcomed our new Resident Manager, Heather, who is aware of this issue and it currently organizing a bike sweep and the installation of new bike racks. Unfortunately it seems our last Resident Manager didnt follow through with the last bike sweep for which we apologize. Glacier Point and Yolo Property Management has a reputation for being very responsive to our Resident requests and unfortunately a need for additional bike parking was not brought to our attention until Christines call into our Resident Manager this Saturday perhaps this was on account of the previous manager not relaying the need to our company which is unacceptable if that was the case. If any Residents or their guests ever feel for any reason that property needs are being overlooked by a Resident Manager, we urge you to call Yolo Property Management directly. Of course, we are confident that with our new Resident Manager, Heather, this situation and any future resident needs will be met swiftly as extraordinary customer service is our mission and what we are known for around Davis. Thanks again Christine. We understand your frustration and are working to improve bike parking. Users/D.Sparks
{ "alphanum_fraction": 0.7944322029, "author": null, "avg_line_length": 218.9058823529, "converted": null, "ext": "f", "file": null, "hexsha": "37d56945d91fbcc4517edac807c9bd55455da093", "include": null, "lang": "FORTRAN", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "voflo/Search", "max_forks_repo_path": "lab/davisWiki/Glacier_Point_Apartments.f", "max_issues_count": null, "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "voflo/Search", "max_issues_repo_path": "lab/davisWiki/Glacier_Point_Apartments.f", "max_line_length": 1261, "max_stars_count": null, "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "voflo/Search", "max_stars_repo_path": "lab/davisWiki/Glacier_Point_Apartments.f", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4094, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 18607 }
#!/usr/bin/env python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 ai : """Identify and flag sources as either stellar sources, extended sources or anomalous sources Anomalous sources fall into several categories: - Saturated sources: the pixel values in the cores of these sources are maxed out at the detector maximum. - Hot pixels: These sources have intensity profiles that rise much more sharply than a typical gaussian profile for that intensity should; Usually indicative of cosmic ray events or hot pixels in the detector. - Swam detection: These are false detections that occur in areas surrounding bright sources where structures associated with an actual bright source are mistaken for actual sources (e.g. diffraction spikes) - Edge detections: These false detections are from regions where there are a low (or a null) number of contributing exposures **Flag Value Identification Information** ========== ============ Flag Value Flag Meaning ---------- ------------ 0 Stellar Source 1 Extended Source (Concentration Index > CI Upper Limit) 4 Saturated Source 16 Concentration Index < CI Lower Limit (i.e. Hot Pixels) 32 Swarm Detection 64 Nexp filtered detections, i.e. edge detections ========== ============ Where the concentration index (CI) = mag1 - mag2 Dependencies ------------ * ci_table.py """ import glob import json import math import os import sys import time from astropy.io import fits as fits from astropy.table import Table import numpy import scipy import scipy.ndimage from drizzlepac.haputils import ci_table from stsci.tools import logutil from stwcs import wcsutil __taskname__ = 'hla_flag_filter' MSG_DATEFMT = '%Y%j%H%M%S' SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) def run_source_list_flagging(drizzled_image, flt_list, param_dict, exptime, plate_scale, median_sky, catalog_name, catalog_data, proc_type, drz_root_dir, hla_flag_msk, ci_lookup_file_path, output_custom_pars_file, log_level, diagnostic_mode): """Simple calling subroutine that executes the other flagging subroutines. Parameters ---------- drizzled_image : string drizzled filter product image filename flt_list : list list of calibrated images that were drizzle-combined to produce image specified by input parameter 'drizzled_image' param_dict : dictionary Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters exptime : float drizzled filter product exposure time in seconds plate_scale : float plate scale, in arcseconds/pixel median_sky : float median sky value catalog_name : string drizzled filter product catalog filename catalog_data : astropy.Table object drizzled filter product catalog data proc_type : string sourcelist generation type. drz_root_dir : string Root directory of drizzled images. hla_flag_msk : numpy.ndarray object mask array used by hla_nexp_flags(). ci_lookup_file_path : string final path elements of the concentration index lookup file output_custom_pars_file : string name of the output config file log_level : int The desired level of verboseness in the log statements displayed on the screen and written to the .log file. diagnostic_mode : bool write intermediate files? Returns ------- catalog_data : astropy.Table object drizzled filter product catalog data with updated flag values """ # set logging level to user-specified level log.setLevel(log_level) # Relevant equivalent column titles for aperture and segment catalogs all_column_titles = { "aperture": { "x_coltitle": "X-Center", "y_coltitle": "Y-Center", }, "segment": { "x_coltitle": "X-Centroid", "y_coltitle": "Y-Centroid", } } if proc_type not in all_column_titles.keys(): log.error("Unknown proc_type '{}', must be 'aperture' or 'segment'".format(proc_type)) raise ValueError("Unknown proc_type '{}', must be 'aperture' or 'segment'".format(proc_type)) column_titles = all_column_titles[proc_type] # ----------------------- # FLAG FILTER PROCESSING # ----------------------- log.info("************************** * * * HLA_FLAG_FILTER * * * **************************") # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Flag sources based on concentration index. log.info("Determining concentration indices for sources.") log.debug("ci_filter({} {} {} {} {} {} {} {} {} {})".format(drizzled_image, catalog_name, "<CATALOG DATA>", proc_type, param_dict, ci_lookup_file_path, output_custom_pars_file, column_titles, log_level, diagnostic_mode)) catalog_data = ci_filter(drizzled_image, catalog_name, catalog_data, proc_type, param_dict, ci_lookup_file_path, output_custom_pars_file, column_titles, log_level, diagnostic_mode) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Flag saturated sources log.info("Flagging saturated sources in the catalogs.") log.debug("hla_saturation_flags({} {} {} {} {} {} {} {} {})".format(drizzled_image, flt_list, catalog_name, "<Catalog Data>", proc_type, param_dict, plate_scale, column_titles, diagnostic_mode)) catalog_data = hla_saturation_flags(drizzled_image, flt_list, catalog_name, catalog_data, proc_type, param_dict, plate_scale, column_titles, diagnostic_mode) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Flag swarm sources # log.info("Flagging possible swarm features in catalogs") # log.debug("hla_swarm_flags({} {} {} {} {} {} {} {} {} {})".format(drizzled_image, catalog_name, "<Catalog Data>", # exptime, plate_scale, median_sky, proc_type, # param_dict, column_titles, diagnostic_mode)) # catalog_data = hla_swarm_flags(drizzled_image, catalog_name, catalog_data, exptime, plate_scale, median_sky, # proc_type, param_dict, column_titles, diagnostic_mode) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Flag sources from regions where there are a low (or a null) number of contributing exposures log.info("Flagging sources from regions observed with only a small number of exposures.") log.debug("hla_nexp_flags({} {} {} {} {} {} {} {} {} {})".format(drizzled_image, flt_list, param_dict, plate_scale, catalog_name, "<Catalog Data>", drz_root_dir, "<MASK_ARRAY>", column_titles, diagnostic_mode)) catalog_data = hla_nexp_flags(drizzled_image, flt_list, param_dict, plate_scale, catalog_name, catalog_data, drz_root_dir, hla_flag_msk, column_titles, diagnostic_mode) display_catalog_bit_populations(catalog_data['Flags']) return catalog_data # ====================================================================================================================== def ci_filter(drizzled_image, catalog_name, catalog_data, proc_type, param_dict, ci_lookup_file_path, output_custom_pars_file, column_titles, log_level, diagnostic_mode): """This subroutine flags sources based on concentration index. Sources below the minimum CI value are flagged as hot pixels/CRs (flag=16). Sources above the maximum (for stars) are flagged as extended (flag=1). It also flags sources below the detection limit in mag_aper2 (flag=8). Parameters ---------- drizzled_image : string drizzled filter product image filename catalog_name : string drizzled filter product catalog filename catalog_data : astropy.Table object drizzled filter product catalog data proc_type : string Sourcelist generation type param_dict : dictionary Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters ci_lookup_file_path : string final path elements of the concentration index lookup file output_custom_pars_file : string name of the output config file column_titles : dictionary Relevant column titles log_level : int The desired level of verboseness in the log statements displayed on the screen and written to the .log file. diagnostic_mode : bool write intermediate files? Returns ------- catalog_data : astropy.Table object drizzled filter product catalog data with updated flag values """ catalog_name_root = catalog_name.split('.')[0] par_dict = param_dict['quality control']['ci filter'][proc_type] ci_lower_limit = float(par_dict['ci_lower_limit']) ci_upper_limit = float(par_dict['ci_upper_limit']) snr = float(par_dict['bthresh']) if par_dict['lookup_ci_limits_from_table']: # replace CI limits with values from table if possible cidict = ci_table.get_ci_from_file(drizzled_image, ci_lookup_file_path, log_level, diagnostic_mode=diagnostic_mode, ci_lower=ci_lower_limit, ci_upper=ci_upper_limit) # TODO: add values for ACS/SBC ci_lower_limit = cidict['ci_lower_limit'] ci_upper_limit = cidict['ci_upper_limit'] # if an output custom param file was created and the CI values were updated by ci_table.get_ci_from_file, # update output custom param file with new CI values if output_custom_pars_file: if ci_lower_limit != float(par_dict['ci_lower_limit']) or \ ci_upper_limit != float(par_dict['ci_upper_limit']): log.info("CI limits updated.") with open(output_custom_pars_file) as f: json_data = json.load(f) if ci_lookup_file_path.startswith("default"): param_set = "default_values" else: param_set = "parameters" if ci_lower_limit != float(par_dict['ci_lower_limit']): json_data[drizzled_image[:-9]][param_set]["quality control"]["ci filter"][proc_type]["ci_lower_limit"]\ = ci_lower_limit if ci_upper_limit != float(par_dict['ci_upper_limit']): json_data[drizzled_image[:-9]][param_set]["quality control"]["ci filter"][proc_type]["ci_upper_limit"]\ = ci_upper_limit with open(output_custom_pars_file, 'w') as f: json.dump(json_data, f, indent=4) log.info("Updated custom pars file {}".format(output_custom_pars_file)) else: log.info("Using existing concentration index limits from parameter file") log.info(' ') log.info('ci limits for {}'.format(drizzled_image)) log.info('ci_lower_limit = {}'.format(ci_lower_limit)) log.info('ci_upper_limit = {}'.format(ci_upper_limit)) log.info(' ') failed_index_list = [] for i, table_row in enumerate(catalog_data): try: table_row["Flags"] = int(table_row["Flags"]) except ValueError: table_row["Flags"] = 0 ci_value = table_row["CI"] if ci_value: ci_value = float(ci_value) merr1 = table_row["MagErrAp1"] if not merr1: merr1 = numpy.nan else: merr1 = float(merr1) merr2 = table_row["MagErrAp2"] if not merr2: merr2 = numpy.nan else: merr2 = float(merr2) good_snr = merr2 <= 2.5 / (snr * numpy.log(10)) ci_err = numpy.sqrt(merr1 ** 2 + merr2 ** 2) if not good_snr: table_row["Flags"] |= 8 if not ci_value or (not numpy.isfinite(ci_err)) or ci_value < ci_lower_limit - ci_err: table_row["Flags"] |= 16 if not ci_value or ci_value > ci_upper_limit: table_row["Flags"] |= 1 if not ci_value and diagnostic_mode: failed_index_list.append(i) if diagnostic_mode: # Write out list of ONLY failed rows to to file catalog_name_failed = catalog_name_root + '_Failed-CI.txt' catalog_data_failed = catalog_data.copy() all_indicies = range(0, len(catalog_data)) rows_to_remove = [z for z in all_indicies if z not in failed_index_list] catalog_data_failed.remove_rows(rows_to_remove) catalog_data_failed.write(catalog_name_failed, delimiter=",", format='ascii') # Write out intermediate catalog with updated flags catalog_name = catalog_name_root + 'CIFILT.txt' catalog_data.write(catalog_name, delimiter=",", format='ascii') return catalog_data # ====================================================================================================================== def hla_saturation_flags(drizzled_image, flt_list, catalog_name, catalog_data, proc_type, param_dict, plate_scale, column_titles, diagnostic_mode): """Identifies and flags saturated sources. Parameters ---------- drizzled_image : string drizzled filter product image filename flt_list : list list of calibrated images that were drizzle-combined to produce image specified by input parameter 'drizzled_image' catalog_name : string drizzled filter product catalog filename to process catalog_data : astropy.Table object drizzled filter product catalog data to process proc_type : string sourcelist generation type. param_dict : dictionary Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters plate_scale : float plate scale, in arcseconds/pixel column_titles : dictionary Relevant column titles diagnostic_mode : bool write intermediate files? Returns ------- phot_table_rows : astropy.Table object drizzled filter product catalog data with updated flag values """ image_split = drizzled_image.split('/')[-1] channel = drizzled_image.split("_")[4].upper() if channel == 'IR': # TODO: Test and IR case just to make sure that IR shouldn't be skipped return catalog_data # ------------------------------------------------------------------- # STEP THROUGH EACH APPLICABLE FLT IMAGE, DETERMINE THE COORDINATES # FOR ALL SATURATION FLAGGED PIXELS, AND TRANSFORM THESE COORDINATES # INTO THE DRIZZLED IMAGE REFERENCE FRAME. # ------------------------------------------------------------------- num_flts_in_main_driz = len(flt_list) flt_list.sort() log.info(' ') log.info("Current Working Directory: {}".format(os.getcwd())) log.info(' ') log.info('LIST OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], flt_list)) log.info(' ') log.info('NUMBER OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], num_flts_in_main_driz)) log.info(' ') # ---------------------------------------------------- # EXTRACT DQ DATA FROM FLT IMAGE AND CREATE A LIST # OF "ALL" PIXEL COORDINATES WITH A FLAG VALUE OF 256 # ---------------------------------------------------- if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')): if channel.lower() in ['wfc', 'uvis']: image_ext_list = ["[sci,1]", "[sci,2]"] if channel.lower() in ['sbc', 'hrc']: image_ext_list = ["[sci,1]"] dq_sat_bit = 256 if channel.lower() == 'wfpc2': image_ext_list = ["[sci,1]", "[sci,2]", "[sci,3]", "[sci,4]"] dq_sat_bit = 8 if channel.lower() == 'pc': image_ext_list = ["[sci,1]"] dq_sat_bit = 8 # build list of arrays drz_sat_xy_coords_list = [] for flt_cnt, flt_image in enumerate(flt_list): for ext_cnt, image_ext in enumerate(image_ext_list): ext_part = image_ext.split(',')[1].split(']')[0] try: if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')): flt_data = fits.getdata(flt_image, 'DQ', int(ext_part)) if ((channel.lower() == 'wfpc2') or (channel.lower() == 'pc')): flt_data = fits.getdata(flt_image.replace("_c0m", "_c1m"), 'SCI', int(ext_part)) except KeyError: log.info(' ') log.info('WARNING: There is only one set of file extensions in {}'.format(flt_image)) log.info(' ') continue # TODO: Should we also look for pixels flagged with DQ value 2048 (A to D saturation) for ACS data? # ---------------------------------------------------- # DETERMINE IF ANY OF THE PIXELS LOCATED IN THE GRID # HAVE A BIT VALUE OF 256, I.E. FULL WELL SATURATION. # ---------------------------------------------------- # NOTE: NUMPY ARRAYS REPORT Y COORD VALUES FIRST AND # X COORD VALUES SECOND AS FOLLOWS: # # --> numpy.shape(flt_data) # (2051, 4096) # # WHERE 2051 IS THE NUMBER OF PIXELS IN THE Y # DIRECTION, AND 4096 IS THE NUMBER OF PIXELS # IN THE X DIRECTION. # ---------------------------------------------------- bit_flt_data = dq_sat_bit & flt_data complete_sat_coords = numpy.where(bit_flt_data == dq_sat_bit) if len(complete_sat_coords[0]) == 0: continue # ------------------------------------------------- # RESTRUCTURE THE LIST OF X AND Y COORDINATES FROM # THE FLT FILE THAT HAVE BEEN FLAGGED AS SATURATED # ------------------------------------------------- nsat = len(complete_sat_coords[0]) x_y_array = numpy.empty((nsat, 2), dtype=int) x_y_array[:, 0] = complete_sat_coords[1] x_y_array[:, 1] = complete_sat_coords[0] # --------------------------------------------------- # WRITE FLT COORDS TO A FILE FOR DIAGNOSTIC PURPOSES # --------------------------------------------------- if diagnostic_mode: flt_xy_coord_out = flt_image.split('/')[-1].split('.')[0] + '_sci' + str(ext_cnt + 1) + '.txt' outfile = open(flt_xy_coord_out, 'w') for flt_xy_coord in x_y_array: x = flt_xy_coord[0] y = flt_xy_coord[1] outfile.write(str(x) + ' ' + str(y) + '\n') outfile.close() # ---------------------------------------------------- # CONVERT SATURATION FLAGGED X AND Y COORDINATES FROM # THE FLT IMAGE INTO RA AND DEC # ---------------------------------------------------- flt_ra_dec_coords = xytord(x_y_array, flt_image, image_ext) # ------------------------------------------------- # CONVERT RA & DEC VALUES FROM FLT REFERENCE FRAME # TO THAT OF THE DRIZZLED IMAGE REFERENCE FRAME # ------------------------------------------------- drz_sat_xy_coords_list.append(rdtoxy(flt_ra_dec_coords, drizzled_image, "[sci,1]")) log.info(' ') log.info('FLT IMAGE = {}'.format(flt_image.split('/')[-1])) log.info('IMAGE EXT = {}'.format(image_ext)) log.info(' ') # ---------------------------------------------------------------- # IF NO SATURATION FLAGS EXIST IN ANY OF THE FLT FILES, THEN SKIP # ---------------------------------------------------------------- if len(drz_sat_xy_coords_list) == 0: log.info(' ') log.info('*******************************************************************************************') log.info('NO SATURATION FLAGGED PIXELS EXIST IN ANY OF THE FLT FILES FOR:') log.info(' --> {}'.format(drizzled_image.split('/')[-1])) log.info('*******************************************************************************************') log.info(' ') return catalog_data # ------------------------------ # now concatenate all the arrays # ------------------------------ full_sat_list = numpy.concatenate(drz_sat_xy_coords_list) # -------------------------------------------- # WRITE RA & DEC FLT CONVERTED X & Y DRIZZLED # IMAGE COORDINATES TO A TEXT FILE # -------------------------------------------- if diagnostic_mode: drz_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_ALL_FLT_SAT_FLAG_PIX.txt' drz_coord_out = open(drz_coord_file, 'w') for coord in full_sat_list: drz_coord_out.write(str(coord[0]) + ' ' + str(coord[1]) + '\n') drz_coord_out.close() # ---------------------------------------------------- # GET SOURCELIST X AND Y VALUES # ---------------------------------------------------- all_detections = catalog_data nrows = len(all_detections) full_coord_list = numpy.empty((nrows, 2), dtype=float) for row_count, detection in enumerate(all_detections): full_coord_list[row_count, 0] = float(detection[column_titles["x_coltitle"]]) full_coord_list[row_count, 1] = float(detection[column_titles["y_coltitle"]]) """ # This option to determine saturation from the drizzled image alone should complement # the computation based on the DQ array, since the IR (and MAMA?) detectors will not # have saturated sources that 'bleed' or 'top out'... # # Extract Ap2 radius from parameter dict # ap2 = param_dict['catalog generation']['aperture_2'] # # Convert source positions into slices # apers = CircularAperture(full_coord_list, ap2) # # Determine whether any source (slice) has more than 3 pixels # within 10% of the max value in the source slice. # If True, flag as saturated. # drz_img = fits.getdata(drizzled_image, ext=1) img_sat = numpy.zeros(len(full_coord_list), dtype=bool) for n,aper in enumerate(apers): if (drz_img[aper.bbox.slices] > drz_img[aper.bbox.slices].max() * 0.9).sum() > 3: img_sat[n] = True del drz_img """ # ---------------------------------------------------- # CREATE SUB-GROUPS OF SATURATION-FLAGGED COORDINATES # ---------------------------------------------------- proc_time1 = time.ctime() log.info(' ') log.info('PROC_TIME_1: {}'.format(proc_time1)) log.info(' ') # ---------------------------------- # Convert aperture radius to pixels # ---------------------------------- ap2 = param_dict['catalog generation']['aperture_2'] radius = round((ap2/plate_scale) + 0.5) * 2. log.info(' ') log.info('THE RADIAL DISTANCE BEING USED IS {} PIXELS'.format(str(radius))) log.info(' ') # do the cross-match using xymatch log.info('Matching {} saturated pixels with {} catalog sources'.format(len(full_sat_list), len(full_coord_list))) psat, pfull = xymatch(full_sat_list, full_coord_list, radius, multiple=True, verbose=False) log.info('Found cross-matches (including duplicates)'.format(len(psat))) saturation_flag = numpy.zeros(len(full_coord_list), dtype=bool) saturation_flag[pfull] = True proc_time2 = time.ctime() log.info(' ') log.info('PROC_TIME_2: {}'.format(proc_time2)) log.info(' ') # ------------------------------------------------------------------ # REMOVE DUPLICATE DETECTIONS FROM THE LIST, "group", CREATTED FROM # MATCHING SATURATION FLAGGED FLT PIXELS TO FINAL SOURCE DETECTIONS # ------------------------------------------------------------------ nsaturated = saturation_flag.sum() if nsaturated == 0: log.info(' ') log.info('**************************************************************************************') log.info('NOTE: NO SATURATED SOURCES WERE FOUND FOR: {}'.format(image_split)) log.info('**************************************************************************************') log.info(' ') return catalog_data else: log.info(' ') log.info('FLAGGED {} SOURCES'.format(nsaturated)) log.info(' ') if diagnostic_mode: sat_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_INTERMEDIATE.txt' sat_coord_out = open(sat_coord_file, 'w') for sat_coord in full_coord_list[saturation_flag, :]: sat_coord_out.write(str(sat_coord[0]) + ' ' + str(sat_coord[1]) + '\n') sat_coord_out.close() # -------------------------------------------------------------------------- # WRITE SAT FLAGS TO OUTPUT PHOT TABLE BASED ON flag_src_central_pixel_list # -------------------------------------------------------------------------- phot_table = catalog_name phot_table_root = phot_table.split('.')[0] phot_table_rows = catalog_data for i, table_row in enumerate(phot_table_rows): if saturation_flag[i]: table_row["Flags"] = int(table_row["Flags"]) | 4 phot_table_rows = flag4and8_hunter_killer(phot_table_rows, column_titles) if diagnostic_mode: phot_table_temp = phot_table_root + '_SATFILT.txt' phot_table_rows.write(phot_table_temp, delimiter=",", format='ascii') return phot_table_rows # ====================================================================================================================== def hla_swarm_flags(drizzled_image, catalog_name, catalog_data, exptime, plate_scale, median_sky, proc_type, param_dict, column_titles, diagnostic_mode): """Identifies and flags swarm sources. Parameters ---------- drizzled_image : string Name of drizzled image to process catalog_name : string drizzled filter product catalog filename to process catalog_data : astropy.Table object drizzled filter product catalog data to process exptime : float exposure of the specified drizzled image plate_scale : float plate scale, in arcseconds/pixel median_sky : float median sky value proc_type : string sourcelist generation type. param_dict : dictionary Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters column_titles : dictionary Relevant column titles diagnostic_mode : bool write intermediate files? Returns ------- catalog_data : astropy.Table object drizzled filter product catalog data with updated flag values """ drz_img_path_split = drizzled_image.split('/') drz_img_split = drz_img_path_split[-1].split('_') data_type = drz_img_split[4] log.info(' ') log.info('MEDIAN SKY VALUE = {}'.format(median_sky)) log.info(' ') # ========================================== # ------------------------------------------ # ------------------------------------------ # CREATE LIST OF POTENTIAL SWARM DETECTIONS # ------------------------------------------ # ------------------------------------------ # ========================================== phot_table_root = catalog_name.split('/')[-1].split('.')[0] ap2 = param_dict['catalog generation']['aperture_2'] if proc_type not in ('segment', 'aperture'): log.error("Unknown catalog type '{}', must be 'aperture' or 'segment'".format(proc_type)) raise ValueError("Unknown catalog type '%s'" % proc_type) # ---------------------------------------------------------- # Determine plate scale relative to default HLA plate scale # This is required since all swarm parameters were derived # using HLA products and are tuned to the characteristics of # the HLA images (plate scale, gain, ...). This scale factor # will be used to scale the pixel-based parameters to the # specific plate scale of the HAP products. # ---------------------------------------------------------- hla_plate_scale = float(param_dict["quality control"]["swarm filter"]["HLA_plate_scale"]) scale_to_hla = hla_plate_scale / plate_scale # ---------------------------------- # Convert aperture radius to pixels # ---------------------------------- radius = ap2 / plate_scale log.info(' ') log.info('Aperture Size = {}'.format(ap2)) log.info('Pixel Scale = {} arcsec per pixel'.format(plate_scale)) log.info(' ') area = math.pi * radius**2 nrows = len(catalog_data) complete_src_list = numpy.empty((nrows, 6), dtype=float) for row_num, row in enumerate(catalog_data[0:]): x_val = float(row[column_titles["x_coltitle"]]) y_val = float(row[column_titles["y_coltitle"]]) flux = row["FluxAp2"] sky = row["MSkyAp2"] if not flux: flux = 0.0 if not sky: sky = 0.0 electronpp = flux / area * exptime eppsky = electronpp / median_sky complete_src_list[row_num, :] = [x_val, y_val, flux, electronpp, sky, eppsky] if len(complete_src_list) == 0: return catalog_data # view into the complete_src_list array for convenience swarm_epp_list_a = complete_src_list[:, 3] # swarm flag array # this will get set as candidates to flag are found swarm_flag = numpy.zeros(nrows, dtype=bool) # ------------------------------------------------------------ # WRITE SUBSET SOURCE LIST TO AN OUTPUT FILE FOR VERIFICATION # ------------------------------------------------------------ if diagnostic_mode: final_complete_source_file = open(phot_table_root + '_SWFILT_COMPLETE_SOURCE_FILE.txt', 'w') final_complete_source_file.write("# {}\n".format("-" * 96)) final_complete_source_file.write("# HAP Plate scale: {}\n".format(plate_scale)) final_complete_source_file.write("# HAP Plate scale relative to HLA: {}\n".format(scale_to_hla)) final_complete_source_file.write("# {}\n".format("-" * 96)) swfilt_table_header = "# X-Center Y-Center Flux ElectronPP Sky EPPSKY_Ratio \n" final_complete_source_file.write(swfilt_table_header) final_complete_source_file.write("# {}\n".format("-" * 96)) for i, complete_src_value in enumerate(complete_src_list): final_complete_source_file.write(str(complete_src_value[0]) + ' ' + str(complete_src_value[1]) + ' ' + str(complete_src_value[2]) + ' ' + str(complete_src_value[3]) + ' ' + str(complete_src_value[4]) + ' ' + str(complete_src_value[5]) + '\n') final_complete_source_file.close() # ====================================================================== # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Introduce 2 thresholds: # ----------------------- # A minimum electronpp, and a minimum electronpp/sky. # The thresholds should have different values for IR and UVIS. # # For IR, sources that have electronpp > 100k, OR # ((electronpp > 100*sky) AND (electronpp > 10k)), should be considered. # # For UVIS, I would set the thresholds at (electronpp > 100k) OR # ((electronpp > 1000*sky) AND (electronpp > 10k)). # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ====================================================================== upper_epp_limit = float(param_dict["quality control"]["swarm filter"]["upper_epp_limit"]) lower_epp_limit = float(param_dict["quality control"]["swarm filter"]["lower_epp_limit"]) eppsky_limit_cfg = float(param_dict["quality control"]["swarm filter"]["eppsky_limit"]) selfradius = float(param_dict["quality control"]["swarm filter"]["selfradius"]) # TODO: optimize selfradius values for ACS/HRC, ACS/SBC in quality control param files selfradius *= scale_to_hla eppsky_limit = eppsky_limit_cfg * median_sky # ---------------------------------------------------------- # UVIS --> EPP > 100000. OR (EPP > 1000*sky AND EPP > 10000) # IR --> EPP > 100000. OR (EPP > 100*sky AND EPP > 10000) # ---------------------------------------------------------- initial_central_pixel_positions = numpy.where(numpy.logical_or(swarm_epp_list_a > upper_epp_limit, numpy.logical_and(swarm_epp_list_a > eppsky_limit, swarm_epp_list_a > lower_epp_limit)))[0] initial_central_pixel_list = complete_src_list[initial_central_pixel_positions, :] if len(initial_central_pixel_positions) == 0: # no bright objects # copy empty lists so output file is created anyway final_central_pixel_positions = initial_central_pixel_positions final_flag_src_central_pixel_list = initial_central_pixel_list else: # --------------------------------------------------------- # Remove duplicate central pixel position swarm candidates # Keep only objects that are the brightest within 20 pixels # --------------------------------------------------------- # ------------------------------------------- # match initial central pixel list to itself # ------------------------------------------- if data_type.upper() == 'IR': # ------------------------------------------------------- # Define EPP cut values for filtering multiple detections # from a given central positions for a swarm candidate # ------------------------------------------------------- cuts = param_dict["quality control"]["swarm filter"]["cuts_list"] cuts = list(map(float, cuts)) selfradii = param_dict["quality control"]["swarm filter"]["selfradii_list"] selfradii = list(map(float, selfradii)) selfradii = numpy.array(selfradii) * scale_to_hla p1 = [] p2 = [] for cut_cnt, cut in enumerate(cuts): # -------------------------------------------------------------------- # Extract indices of detections that are within the set EPP cut range # -------------------------------------------------------------------- if cut_cnt == 0: cut_value_positions = numpy.where(initial_central_pixel_list[:, 3:4] > cut)[0] else: cut_value_positions = numpy.where(numpy.logical_and(initial_central_pixel_list[:, 3:4] >= cut, initial_central_pixel_list[:, 3:4] <= cuts[cut_cnt - 1]))[0] # ----------------------------------------------- # If no detections exist for the specified EPP # cut range, then continue to the next cut range # ----------------------------------------------- if len(cut_value_positions) == 0: continue # ----------------------------------------------------------------------- # Determine all matches for detections in "cut_value_positions" # within the radius value identified for the cut range being implemented # ----------------------------------------------------------------------- p1_sub, p2_sub = xymatch(initial_central_pixel_list[cut_value_positions, :][:, 0:2], initial_central_pixel_list[:, 0:2], selfradii[cut_cnt], multiple=True, stack=False, verbose=False) # ------------------------------------------ # For each cut range, add the corresponding # matches to each detection to a final list # ------------------------------------------ for p1_arr in p1_sub: p1.append(p1_arr) for p2_arr in p2_sub: p2.append(p2_arr) # Not sure if this is still needed??? # ------------------------------------ if cut_cnt == len(cuts) - 1: if len(p1) == 0 and len(p2) == 0: p1, p2 = xymatch(initial_central_pixel_list[:, 0:2], initial_central_pixel_list[:, 0:2], selfradius, multiple=True, stack=False, verbose=False) # --------------------------------------------------------------------- # each object is guaranteed to have at least one match (itself) # get brightest of each group of matches by building a list of indices # --------------------------------------------------------------------- exclude_index = None for i1, i2 in zip(p1, p2): flux2 = initial_central_pixel_list[i2, 2] # ------------------------------------------------------------- # Verify that there is more than one detection in a given group # otherwise no detection is added to exclude index because # there is only one detection for the source being evaluated # ------------------------------------------------------------- if len(i2[numpy.where(flux2 < numpy.max(flux2))]) > 0: # ---------------------------------------------------------- # Add all detections in grouping with a flux value less than # that of the maximum flux value to an array to be excluded # ---------------------------------------------------------- if exclude_index is None: exclude_index = i2[numpy.where(flux2 < numpy.max(flux2))] else: exclude_index = numpy.concatenate((exclude_index, i2[numpy.where(flux2 < numpy.max(flux2))]), axis=0) exclude_index = exclude_index.astype(numpy.int32) # ----------------------------------------------------------- # exclude_index can have multiple copies of the same index # use exclude_bool array to get a list of the unique indices # ----------------------------------------------------------- exclude_bool = numpy.ones(len(initial_central_pixel_list), dtype=bool) if not (exclude_index is None): exclude_bool[exclude_index] = False out_values = numpy.where(exclude_bool)[0] # ------------------------------------------------------------------------------- # Create final source list based on where the excluded detection indices are not # ------------------------------------------------------------------------------- final_central_pixel_positions = initial_central_pixel_positions[out_values] final_flag_src_central_pixel_list = initial_central_pixel_list[out_values, :] else: p1, p2 = xymatch(initial_central_pixel_list[:, 0:2], initial_central_pixel_list[:, 0:2], selfradius, multiple=True, stack=False, verbose=False) # --------------------------------------------------------------------- # each object is guaranteed to have at least one match (itself) # get brightest of each group of matches by building a list of indices # --------------------------------------------------------------------- keep_index = numpy.arange(len(initial_central_pixel_list), dtype=int) for i1, i2 in zip(p1, p2): flux2 = initial_central_pixel_list[i2, 2] keep_index[i1] = i2[flux2.argmax()] # -------------------------------------------------------- # keep_index can have multiple copies of the same index # use keep_bool array to get a list of the unique indices # -------------------------------------------------------- keep_bool = numpy.zeros(len(initial_central_pixel_list), dtype=bool) keep_bool[keep_index] = True in_values = numpy.where(keep_bool)[0] final_central_pixel_positions = initial_central_pixel_positions[in_values] final_flag_src_central_pixel_list = initial_central_pixel_list[in_values, :] # --------------------------------------------------- # WRITE CENTRAL PIXEL POSITIONS FOR SWARMS TO A FILE # --------------------------------------------------- if diagnostic_mode: cetrl_pix_pos_file = phot_table_root + '_SWFILT_CENTRAL-PIX-POS.txt' drz_coord_out = open(cetrl_pix_pos_file, 'w') for i in range(len(final_flag_src_central_pixel_list)): drz_coord_out.write(str(final_flag_src_central_pixel_list[i, 0]) + ' ' + str(final_flag_src_central_pixel_list[i, 1]) + ' ' + str(final_flag_src_central_pixel_list[i, 2]) + ' ' + str(final_flag_src_central_pixel_list[i, 3]) + ' ' + str(final_flag_src_central_pixel_list[i, 4]) + ' ' + str(final_flag_src_central_pixel_list[i, 5]) + '\n') drz_coord_out.close() # ========================================================================== # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # EXTRACT THE CENTRAL PIXEL POSITIONS IN final_flag_src_central_pixel_list, # FROM swarm_x_list_b AND swarm_y_list_b # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ========================================================================== swarm_thresh = float(param_dict["quality control"]["swarm filter"]["swarm_thresh"]) clip_radius_list = param_dict["quality control"]["swarm filter"]["clip_radius_list"] clip_radius_list = numpy.array(list(map(float, clip_radius_list))) * scale_to_hla scale_factor_list = param_dict["quality control"]["swarm filter"]["scale_factor_list"] scale_factor_list = list(map(float, scale_factor_list)) log.info('SWARM FILTER CLIP_RADIUS_LIST: {}'.format(clip_radius_list)) log.info('SWARM FILTER SCALE_FACTOR_LIST: {}'.format(scale_factor_list)) # get list of objects not in the central pixel list keep = numpy.ones(nrows, dtype=bool) keep[final_central_pixel_positions] = False notcentral_index = numpy.where(keep)[0] swarm_list_b = complete_src_list[notcentral_index, :] # views into the swarm_list_b array for convenience swarm_x_list_b = swarm_list_b[:, 0] swarm_y_list_b = swarm_list_b[:, 1] # --------------------------------------------------------------------- # ITERATIVELY CLIP SOURCES CONTAINED WITHIN RINGS AT SPECIFIED RADIUS # VALUES, PROGRESSIVELY MOVING CLOSER TO THE CENTRAL SOURCE # --------------------------------------------------------------------- # do the cross-match using xymatch log.info('Matching {} swarm centers with {} catalog sources'.format(len(final_flag_src_central_pixel_list), len(swarm_list_b))) pcentral, pfull = xymatch(final_flag_src_central_pixel_list[:, 0:2], swarm_list_b[:, 0:2], clip_radius_list[0], multiple=True, stack=False, verbose=False) # TODO: RLW: the ring list is needed only for testing, get rid of it when code works if diagnostic_mode: ring_index_list = [] ring_refepp_list = [] ring_thresh_list = [] ring_count = [] for pindex, ii in enumerate(pcentral): central_pixel_value = final_flag_src_central_pixel_list[ii, :] log.debug(' ') log.debug('CENTRAL PIXEL VALUE: {}'.format(central_pixel_value)) log.debug(' ') base_epp = central_pixel_value[3] coords = central_pixel_value[0:2] allmatches = pfull[pindex] if len(allmatches) == 0: # (this should not happen using xymatch) log.info(' ') log.info('------------------------------------------') log.info('NOTE: NO SWARM CANDIDATES FOR THIS SOURCE ') log.info('------------------------------------------') log.info(' ') continue distsq = (swarm_x_list_b[allmatches]-coords[0])**2 + (swarm_y_list_b[allmatches]-coords[1])**2 sind = distsq.argsort() allmatches = allmatches[sind] distsq = distsq[sind] rcut = distsq.searchsorted(numpy.array(clip_radius_list)**2) for radius_cnt in range(1, len(clip_radius_list)): # ------------------------------------------- # ISOLATE THE DETECTIONS WITHIN A GIVEN RING # ------------------------------------------- matches = allmatches[rcut[radius_cnt]:rcut[radius_cnt-1]] if len(matches) == 0: log.debug(' ') log.debug('------------------------------------------') log.debug('NOTE: ALL MATCHES/DETECTIONS IN THIS RING ') log.debug(' HAVE PREVIOUSLY BEEN ACCOUNTED FOR ') log.debug('------------------------------------------') log.debug(' ') continue # ----------------------------------------------------------- # CALCULATE THE MEDIAN SKY VALUE FOR THE GROUP OF DETECTIONS # CONTAINED WITHIN THE SPECIFIED RING BEING PROCESSED # ----------------------------------------------------------- ref_epp = base_epp * scale_factor_list[radius_cnt-1] # ----------------------------------------------------------------------------------- # DIFFERENTIATE BETWEEN GOOD DETECTIONS AND SWARM DETECTIONS WITHIN SPECIFIED RINGS # ----------------------------------------------------------------------------------- ring = swarm_list_b[matches, :] w = numpy.where(ring[:, 3]/ref_epp < swarm_thresh) if len(w) > 0: swarm_flag[notcentral_index[matches[w]]] = True # TODO: RLW: following needed only for testing, get rid of it when code works if diagnostic_mode: ring_index_list.append(matches) ring_count.append(len(matches)) ring_refepp_list.append(ring[:, 3]/ref_epp) ring_thresh_list.append(swarm_thresh) # TODO: RLW: following needed only for testing, get rid of it when code works if diagnostic_mode: # ----------------------------------------------------------------------------------------- # WRITE CLIPPED SOURCES CONTAINED WITHIN RINGS TO AN OUTPUT FILE FOR INTERMEDIATE ANALYSIS # ----------------------------------------------------------------------------------------- ring_source_file = phot_table_root+'_SWFILT_RING-SOURCE-INFO.txt' ring_src_outfile = open(ring_source_file, 'w') ring_src_outfile.write("# {}\n".format("-"*96)) swfilt_ring_file_header = "# X-Center Y-Center Flux ElectronPP" swfilt_ring_file_header += " Sky SrcEPP/RefEPP Swarm Thresh \n" ring_src_outfile.write(swfilt_ring_file_header) ring_src_outfile.write("# {}\n".format("-"*96)) if ring_index_list: ring_index_list = numpy.concatenate(ring_index_list) # select just the lowest value of refepp/swarm threshold for each source # create array with extra columns ring_source_list = numpy.empty((len(ring_index_list), 9), dtype=float) ring_source_list[:, 0:6] = swarm_list_b[ring_index_list, :] ring_source_list[:, 6] = numpy.concatenate(ring_refepp_list) ring_source_list[:, 7] = numpy.repeat(ring_thresh_list, ring_count) ring_source_list[:, 8] = ring_source_list[:, 6] / ring_source_list[:, 7] # sort by x, y, and refepp # tricky here: get a view with named columns, then specify names as sort items ring_source_list.view(','.join(['f8']*9)).sort(order=['f0', 'f1', 'f8'], axis=0) # keep just first entry when the same source appears more than once keep = numpy.ones(len(ring_index_list), dtype=bool) keep[1:] = numpy.logical_or(ring_source_list[1:, 0] != ring_source_list[:-1, 0], ring_source_list[1:, 1] != ring_source_list[:-1, 1]) ring_source_list = ring_source_list[keep, :] for ring_source in ring_source_list: ring_src_outfile.write(str(ring_source[0]) + ' ' + str(ring_source[1]) + ' ' + str(ring_source[2]) + ' ' + str(ring_source[3]) + ' ' + str(ring_source[4]) + ' ' + str(ring_source[5]) + ' ' + str(ring_source[6]) + ' ' + str(ring_source[7]) + '\n') ring_src_outfile.close() # XXX RLW: end of testing code # =================================================================================== # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # ----- PROXIMITY FILTER ----- # EXTRACT ADDITIONAL SWARM DETECTIONS BASED ON THE SWARM CANDIDATE CENTRAL POSITIONS, # DEFINING THE REMOVAL RADIUS AROUND EACH POSITION BASED ON THAT SOURCE'S EPP # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # =================================================================================== proximity_flag = numpy.zeros(nrows, dtype=bool) proximity_choice = param_dict["quality control"]["swarm filter"]["proximity_binary"] if proximity_choice: if len(final_flag_src_central_pixel_list) > 0: ctr_list_radius_list = param_dict["quality control"]["swarm filter"]["ctrList_radiusList"] # TODO: optimize ctr_list_radius_list for ACS wfc, hrc, sbc in quality control config files ctr_list_radius_list = list(map(int, ctr_list_radius_list)) ctr_list_radius_list = numpy.array(ctr_list_radius_list) * scale_to_hla ctr_list_threshold_list = param_dict["quality control"]["swarm filter"]["ctrList_thresholdList"] # TODO: optimize ctr_list_threshold_list for ACS wfc, hrc, sbc in quality control config files ctr_list_threshold_list = list(map(int, ctr_list_threshold_list)) for ctr_list_cnt, (threshold, radius) in enumerate(zip(ctr_list_threshold_list, ctr_list_radius_list)): if ctr_list_cnt == 0: ctr_list_cut = final_flag_src_central_pixel_list[:, 3] > threshold else: ctr_list_cut = numpy.logical_and(final_flag_src_central_pixel_list[:, 3] > threshold, final_flag_src_central_pixel_list[:, 3] <= ctr_list_threshold_list[ctr_list_cnt - 1]) ctr_list_cut1 = final_flag_src_central_pixel_list[ctr_list_cut, :] pcentral, pfull = xymatch(ctr_list_cut1[:, 0:2], swarm_list_b[:, 0:2], radius, multiple=True, verbose=False) proximity_flag[notcentral_index[pfull]] = True log.info("Proximity filter flagged {} sources".format(proximity_flag.sum())) # -------------------------------------------------------------------------- # WRITE NEAR CENTRAL POSITION SWARM LIST TO AN OUTPUT FILE FOR VERIFICATION # -------------------------------------------------------------------------- if diagnostic_mode: near_swm_list = complete_src_list[proximity_flag, :] final_near_swarm_file = open(phot_table_root+'_SWFILT_NEAR_SWARM_FILE.txt', 'w') for swarm_value in near_swm_list: final_near_swarm_file.write(str(swarm_value[0]) + ' ' + str(swarm_value[1]) + ' ' + str(swarm_value[2]) + ' ' + str(swarm_value[3]) + ' ' + str(swarm_value[4]) + '\n') final_near_swarm_file.close() # ------------------------------------------------------------------------- # EXTRACT DETECTIONS FROM THE complete_src_list THAT ARE NOT FLAGGED # ------------------------------------------------------------------------- combined_flag = numpy.logical_or(swarm_flag, proximity_flag) final_swarm_list = complete_src_list[combined_flag, :] final_source_list = complete_src_list[numpy.logical_not(combined_flag), :] log.info(' ') log.info('************************************************') log.info('INITIAL LENGTH OF complete_src_list = {}'.format(len(complete_src_list))) log.info(' ') log.info('LENGTH OF final_source_list = {}'.format(len(final_source_list))) log.info('LENGTH OF final_swarm_list = {}'.format(len(final_swarm_list))) log.info('TOTAL LENGTH = {}'.format(len(final_source_list) + len(final_swarm_list))) log.info(' ') log.info('MEDIAN SKY VALUE = {}'.format(median_sky)) log.info('************************************************') log.info(' ') # ---------------------------------------------------- # WRITE SWARM LIST TO AN OUTPUT FILE FOR VERIFICATION # ---------------------------------------------------- if diagnostic_mode: final_swarm_file = open(phot_table_root+'_SWFILT_SWARM_FILE.txt', 'w') for swarm_value in final_swarm_list: final_swarm_file.write(str(swarm_value[0]) + ' ' + str(swarm_value[1]) + ' ' + str(swarm_value[2]) + ' ' + str(swarm_value[3]) + ' ' + str(swarm_value[4]) + '\n') final_swarm_file.close() # ---------------------------------------------------- # WRITE SOURCE LIST TO AN OUTPUT FILE FOR VERIFICATION # ---------------------------------------------------- if diagnostic_mode: final_source_file = open(phot_table_root+'_SWFILT_SOURCE_FILE.txt', 'w') for source_value in final_source_list: final_source_file.write(str(source_value[0]) + ' ' + str(source_value[1]) + ' ' + str(source_value[2]) + ' ' + str(source_value[3]) + ' ' + str(source_value[4]) + '\n') final_source_file.close() # Update catalog_data flag values for i, table_row in enumerate(catalog_data[0:]): if combined_flag[i]: table_row["Flags"] |= 32 if diagnostic_mode: # Write out intermediate catalog with updated flags phot_table_temp = phot_table_root + '_SWFILT.txt' catalog_data.write(phot_table_temp, delimiter=",", format='ascii') return catalog_data # ====================================================================================================================== def hla_nexp_flags(drizzled_image, flt_list, param_dict, plate_scale, catalog_name, catalog_data, drz_root_dir, mask_data, column_titles, diagnostic_mode): """flags out sources from regions where there are a low (or a null) number of contributing exposures drizzled_image : string Name of drizzled image to process flt_list : list list of calibrated images that were drizzle-combined to produce image specified by input parameter 'drizzled_image' param_dict : dictionary Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters. plate_scale : float plate scale, in arcseconds/pixel catalog_name : string drizzled filter product catalog filename to process catalog_data : astropy.Table object drizzled filter product catalog data to process drz_root_dir : dictionary of source lists keyed by drizzled image name. mask_data : numpy.ndarray object mask array used by hla_nexp_flags(). column_titles : dictionary Relevant column titles diagnostic_mode : bool write intermediate files? Returns ------- catalog_data : astropy.Table object drizzled filter product catalog data with updated flag values """ # ------------------ # CREATE NEXP IMAGE # ------------------ channel = drizzled_image.split("_")[4].upper() # if channel == 'IR': # TODO: This was commented out in the HLA classic era, prior to adaption to the HAP pipeline. Ask Rick about it. # return catalog_data drz_data = fits.getdata(drizzled_image, 1) component_drz_img_list = get_component_drz_list(drizzled_image, drz_root_dir, flt_list) nx = drz_data.shape[0] ny = drz_data.shape[1] nexp_array = numpy.zeros((nx, ny), dtype=numpy.int32) for comp_drz_img in component_drz_img_list: comp_drz_data = (fits.getdata(comp_drz_img) != 0).astype(numpy.int32) try: nexp_array += comp_drz_data except ValueError: log.info("WARNING: Astrodrizzle added an extra-row/column...") nexp_array += comp_drz_data[0:nx, 0:ny] # this bit is added to get the mask integrated into the exp map mask_array = (mask_data == 0.0).astype(numpy.int32) nexp_array = nexp_array * mask_array # ------------------------------------------------------- # EXTRACT FLUX/NEXP INFORMATION FROM NEXP IMAGE BASED ON # THE SOURCE DETECTION POSITIONS PREVIOUSLY ESTABLISHED # ------------------------------------------------------- phot_table_root = catalog_name.split('/')[-1].split('.')[0] nrows = len(catalog_data) cat_coords = numpy.empty((nrows, 2), dtype=float) for line_cnt, phot_table_line in enumerate(catalog_data): x_coord = phot_table_line[column_titles["x_coltitle"]] y_coord = phot_table_line[column_titles["y_coltitle"]] cat_coords[line_cnt, :] = [x_coord, y_coord] # ---------------------------------- # Convert aperture radius to pixels # ---------------------------------- ap2 = param_dict['catalog generation']['aperture_2'] radius = ap2/plate_scale num_exp = round(numpy.max(nexp_array)) if num_exp <= 1 or channel in ('IR', 'SBC'): # Keep everything that has an exposure for detectors without CRs or # when there is only one exposure artifact_filt = 0.5 elif num_exp > 5: # Flag sources with <= 2 exposures when there are > 5 total # We are always using the 'imedian' combination in that case, and it # does not do a very good job of rejecting CRs with only 2 available # exposures artifact_filt = 2.5 else: artifact_filt = 1.5 icoords = (cat_coords+0.5).astype(int) # note x & y are swapped so they can index the numpy array nexp_array # catalog x is second subscript, catalog y is first subscript ix = icoords[:, 1] iy = icoords[:, 0] # get list of neighboring pixels that are within radius iradius = int(radius+1) idiam = iradius*2+1 gx, gy = numpy.mgrid[0:idiam, 0:idiam] - iradius gx = gx.ravel() gy = gy.ravel() w = numpy.where(gx**2+gy**2 <= radius**2)[0] gx = gx[w] gy = gy[w] # check the pixel values for low nexp # this version uses numpy broadcasting sum gx+ix is [len(gx), nrows] gx = (gx[:, numpy.newaxis] + ix).clip(0, nexp_array.shape[0]-1) gy = (gy[:, numpy.newaxis] + iy).clip(0, nexp_array.shape[1]-1) artifact_flag = nexp_array[gx, gy].min(axis=0) < artifact_filt log.info('FLAGGING {} OF {} SOURCES'.format(artifact_flag.sum(), nrows)) # Add flag bit to appropriate sources for i, table_row in enumerate(catalog_data): if artifact_flag[i]: table_row["Flags"] |= 64 if diagnostic_mode: # Write out intermediate catalog with updated flags phot_table_temp = phot_table_root + '_NEXPFILT.txt' catalog_data.write(phot_table_temp, delimiter=",", format='ascii') return catalog_data # ====================================================================================================================== def get_component_drz_list(drizzled_image, drz_root_dir, flt_file_names): """Get a list of the drizzled exposure images associated with this combined drizzled image Usually this can just use glob to get a list of all the drizzled exposures for this filter, but it also handles the case where some exposures were not used (e.g., for scan mode images). drizzled_image : string Name of combined (level 2) drizzled image drz_root_dir : string Location of drizzled exposures filter_sorted_flt_dict : dictionary dictionary containing lists of calibrated images sorted (also keyed) by filter name. Returns ------- rv : list a list of drizzled exposure images associated with the specified combined drizzled image """ drizzle_file_suffex = drizzled_image[-8:-5] drz_img_split = drizzled_image.split('/')[-1].split("_"+drizzle_file_suffex) component_drz_img_list = glob.glob(os.path.join(drz_root_dir, drz_img_split[0])+'*_{}.fits'.format(drizzle_file_suffex)) component_drz_img_list.sort() for item in component_drz_img_list: if item.endswith(drizzled_image): component_drz_img_list.remove(item) drz_filter = drizzled_image.split("_")[5] if type(flt_file_names).__name__ == 'dict': list_of_flts = flt_file_names[drz_filter.lower()] if type(flt_file_names).__name__ == 'list': list_of_flts = flt_file_names if len(list_of_flts) == len(component_drz_img_list): # length match means we use them all return component_drz_img_list elif len(list_of_flts) > len(component_drz_img_list): # this must be a bug? log.info("ERROR: too few drizzled exposures for {}".format(drz_filter)) log.info("Drizzled exposure list: {}".format("\n".join(component_drz_img_list))) log.info("flt exposure list: {}".format("\n".join(list_of_flts))) log.info("Plowing ahead with the full drizzled list") return component_drz_img_list # check the drz headers to see which ipppssoots are included ipdict = {} for ipname in list_of_flts: fname = os.path.split(ipname)[-1] fname = fname.split('_')[0].lower() ipdict[fname] = 1 rv = [] for drzfile in component_drz_img_list: fh = fits.open(drzfile) rootname = fh[0].header.get('rootname', '') fh.close() fname = os.path.split(rootname)[-1] fname = fname.split('_')[0].lower() if fname in ipdict: rv.append(drzfile) if len(list_of_flts) != len(rv): # this must be a bug? log.info("ERROR: mismatch after filtering in exposure lists for {}".format(drz_filter)) log.info("Filtered drizzled exposure list: {}".format("\n".join(rv))) log.info("flt exposure list: {}".format("\n".join(list_of_flts))) log.info("Plowing ahead with the filtered drizzled list") return rv # ============================================================================= # ----------------------------------------------------------------------------- # ------------------------ AUXILIARY FUNCTIONS BELOW -------------------------- # ----------------------------------------------------------------------------- # ============================================================================= def xymatch(cat1, cat2, sep, multiple=False, stack=True, verbose=True): """Routine to match two lists of objects by position using 2-D Cartesian distances. Matches positions in cat1 with positions in cat2, for matches within separation (sep). If more than one match is found, the nearest is returned. Setting multiple=True returns all matching pairs rather than just the closest. Input catalogs need not be sorted. They should be 2-element arrays [:, 2] with cat1[:, 0] = x1 and cat1[:, 1] = y1. Returns an array of indices for cat2 that correspond to the closest match (within sep) in cat2 for each object in cat1, so x1[i] matches x2[return_value[i]]. Note that objects in cat1 with no match within sep have indices -N-1 with N the length of cat2, so that IndexErrors will be raised if trying to assign these indices. If multiple is true, returns a tuple (p1,p2) such that cat1[p1] and cat2[p2] are within sep. p1 and p2 may include multiple pointers to the same objects in cat1 or cat2. In this case objects that don't match are simply omitted from the lists. The stack parameter applies only when multiple is true. If stack is true (the default), the returned matching pointers are stacked into a single big array, so both p1 and p2 are 1-D arrays of length nmatches. If stack is false, p1 is a list of indices into cat1, and p2 is a list of *array* indices into cat2. So p2[k] is all the sources that match p1[k]. This version maybe be more useful if you need to look at the groups of sources associated with cat1 objects. Only cat1 objects that have a match are included in these lists. Set verbose to False to run without status info. Marcel Haas, 2012-06-29, after IDL routine xymatch.pro by Rick White With some tweaks by Rick White Parameters ---------- cat1 : numpy.ndarray list of x,y source coords to match. cat2 : numpy.ndarray list of x,y source coords to match. sep : float maximum separation (in pixels) allowed for source matching. multiple : Boolean If multiple is true, returns a tuple (p1,p2) such that cat1[p1] and cat2[p2] are within sep. p1 and p2 may include multiple pointers to the same objects in cat1 or cat2. In this case objects that don't match are simply omitted from the lists. Default value is 'False'. stack : Boolean If stack is true, the returned matching pointers are stacked into a single big array, so both p1 and p2 are 1-D arrays of length nmatches. Default value is 'True'. verbose : Boolean print verbose output? Default value is 'True'. Returns ------- Varies; Depending on inputs, either just 'p2', or 'p1' and 'p2'. p1 and p2 are lists of matched indices """ if not (isinstance(cat1, numpy.ndarray) and len(cat1.shape) == 2 and cat1.shape[1] == 2): log.error("catalog 1 must be a [N, 2] array") raise ValueError("cat1 must be a [N, 2] array") if not (isinstance(cat2, numpy.ndarray) and len(cat2.shape) == 2 and cat2.shape[1] == 2): log.error("catalog 2 must be a [N, 2] array") raise ValueError("cat2 must be a [N, 2] array") x1 = cat1[:, 0] y1 = cat1[:, 1] x2 = cat2[:, 0] y2 = cat2[:, 1] # Sort the arrays by increasing y-coordinate is1 = y1.argsort() x1 = x1[is1] y1 = y1[is1] is2 = y2.argsort() x2 = x2[is2] y2 = y2[is2] # find search limits in y2 for each object in y1 kvlo = y2.searchsorted(y1-sep, 'left').clip(0, len(y2)) kvhi = y2.searchsorted(y1+sep, 'right').clip(kvlo, len(y2)) nnomatch = 0 n1 = len(x1) if multiple: # build lists of array segments for matches p1 = [] p2 = [] else: p2 = numpy.zeros(n1, dtype='int') - len(x2) - 1 t0 = time.time() sepsq = sep**2 for i in range(n1): y = y1[i] x = x1[i] klo = kvlo[i] khi = kvhi[i] dx = numpy.abs(x2[klo:khi] - x) w = (dx <= sep).nonzero()[0] if len(w) == 0: # Nothing matched nnomatch += 1 else: distsq = (x - x2[klo+w])**2 + (y - y2[klo+w])**2 if multiple: ww = (distsq <= sepsq).nonzero()[0] if len(ww) == 0: nnomatch += 1 else: if stack: p1.append(numpy.zeros(len(ww), dtype='int')+is1[i]) else: p1.append(is1[i]) p2.append(is2[klo + w[ww]]) else: if dist.min() <= sep: p2[is1[i]] = is2[klo+w[dist.argmin()]] else: nnomatch += 1 if verbose and (i+1) % 10000 == 0: log.info("%.1f s: Finished %d of %d (%d unmatched)" % (time.time()-t0, i+1, n1, nnomatch)) if verbose: log.info("%.1f s: Finished %d (%d unmatched)" % (time.time()-t0, n1, nnomatch)) if multiple: if stack: if len(p1) == 0: # no matches found # return empty integer arrays that are still usable as indices return numpy.array([], dtype=int), numpy.array([], dtype=int) else: return numpy.concatenate(p1), numpy.concatenate(p2) else: return (p1, p2) else: return p2 # ====================================================================================================================== def rdtoxy(rd_coord_array, image, image_ext, origin=1): """converts RA and dec to x, y image coords. rd_coord_array : numpy.ndarray array containing RA and dec values to convert. image : string drizzled image whose WCS info will be used in the coordinate conversion. image_ext : string fits image extension to be used in the conversion. origin : int, optional the coordinate in the upper left corner of the image. In FITS and Fortran standards, this is 1. In Numpy and C standards this is 0. Default value is 1. Returns xy_arr: array array of converted x, y coordinate value pairs """ scifile = image + image_ext wcs = wcsutil.HSTWCS(scifile) try: xy_arr = wcs.wcs_sky2pix(rd_coord_array, origin) except AttributeError: xy_arr = wcs.wcs_world2pix(rd_coord_array, origin) return (xy_arr) # ====================================================================================================================== def xytord(xy_coord_array, image, image_ext, origin=1): """converts x, y image coords to RA and dec. xy_coord_array : numpy.ndarray array containing image x, y coord values to convert. image : string drizzled image whose WCS info will be used in the coordinate conversion. image_ext : string fits image extension to be used in the conversion. origin : int, optional the coordinate in the upper left corner of the image. In FITS and Fortran standards, this is 1. In Numpy and C standards this is 0. Default value is 1. Returns ------- rd_arr : array an array of converted RA and dec value pairs """ scifile = image + image_ext wcs = wcsutil.HSTWCS(scifile) try: rd_arr = wcs.all_pix2sky(xy_coord_array, origin) except AttributeError: rd_arr = wcs.all_pix2world(xy_coord_array, origin) return (rd_arr) # ====================================================================================================================== def flag4and8_hunter_killer(catalog_data, column_titles): """This function searches through photometry catalogs for sources whose flags contain both bits 4 (multi-pixel saturation), and 8 (faint magnitude limit). If found, the subroutine removes the "8" bit value from the set of flags for that source. Parameters ---------- catalog_data : astropy Table object catalog data to process column_titles : dictionary Relevant column titles Returns ------- catalog_data : astropy Table object input catalog data with updated flags """ conf_ctr = 0 log.info("Searching for flag 4 + flag 8 conflicts....") for catalog_line in catalog_data: if ((catalog_line["Flags"] & 4 > 0) and (catalog_line["Flags"] & 8 > 0)): conf_ctr += 1 catalog_line["Flags"] = int(catalog_line["Flags"]) - 8 if conf_ctr == 0: log.info("No conflicts found.") if conf_ctr == 1: log.info("{} conflict fixed.".format(conf_ctr)) if conf_ctr > 1: log.info("{} conflicts fixed.".format(conf_ctr)) return catalog_data # ====================================================================================================================== def make_mask_array(drz_image): """ Creates _msk.fits mask file that contains pixel values of 1 outside the drizzled image footprint and pixel values of 0 inside the footprint. This file is used by subroutine hla_nexp_flags(). Parameters ---------- drz_image : string drizzled image filename Returns ------- mask : numpy.ndarray object mask array """ mask = fits.open(drz_image)[1].data != 0 dilate = scipy.ndimage.morphology.binary_dilation erode = scipy.ndimage.morphology.binary_erosion kernel1 = numpy.ones((25, 25), dtype=int) kernel2 = numpy.ones((31, 31), dtype=int) # add padding around the edge so pixels close to image boundary are correct padding = 13 bigmask = numpy.pad(mask, padding, 'constant') # strip the padding back off after creating mask mask = (erode(dilate(bigmask, kernel1), kernel2) == 0)[padding:-padding, padding:-padding] mask = mask.astype(numpy.int16) return mask # ====================================================================================================================== def deconstruct_flag(flagval): """Breaks down an integer flag value into individual component bit values. Parameters ---------- flagval : int Flag value to deconstruct Returns ------- out_idx_list : list a 9-element numpy array of 0s and 1s. Each element of the array represents the presence of a particular bit value (element 0 = bit 0, element 1 = bit 1, ..., element 3 = bit 4 and so on...) """ bitlist = [1, 2, 4, 8, 16, 32, 64, 128] flagval = int(flagval) # out_bit_list = [] out_idx_list = numpy.zeros(9, dtype=int) if flagval == 0: # out_bit_list = [0] out_idx_list[0] = 1 if flagval > 0: idx = 1 for bit in bitlist: if flagval & bit > 0: # out_bit_list.append(bit) out_idx_list[idx] = 1 if bit > flagval: break idx += 1 return out_idx_list # ====================================================================================================================== def display_catalog_bit_populations(flag_data): """Breaks all input flag values down into their constituent bit values and displays a bit-by-bit population summary Parameters ---------- flag_data : astropy.table.column.Column object 'Flags' column of a given sourcelist to analyze Returns ------- Nothing. """ bit_list = [0, 1, 2, 4, 8, 16, 32, 64, 128] flag_meanings = ['Point Source', 'Extended Source', 'Single-Pixel Saturation', 'Multi-Pixel Saturation', 'Faint Magnitude Limit', 'Hot Pixel', 'Swarm Detection', 'Edge and Chip Gap', 'Bleeding and Cosmic Rays'] flag_counts = numpy.zeros(9, dtype=int) n_sources = len(flag_data) for flagval in flag_data: flag_counts += deconstruct_flag(flagval) max_length = 5 for bitval in flag_counts: max_length = max([max_length, len(str(bitval))]) log.info("{}".format("-"*60)) log.info("{}FLAG BIT VALUE POPULATION SUMMARY".format(" "*13)) log.info("Bit Meaning{}Count Percentage".format(" "*20)) fill_char = " " for ctr in range(0, len(bit_list)): bit_val = bit_list[ctr] pct_val = 100.0*(float(flag_counts[ctr])/float(n_sources)) padding1 = 6 - len(str(bit_val)) padding2 = 27 - len(flag_meanings[ctr]) padding3 = max_length-len(str(flag_counts[ctr])) if pct_val == 100.: padding4 = 3 elif pct_val >= 10.: padding4 = 4 else: padding4 = 5 log.info("{}{}{}{}{}{}{}{:.3f}%".format(bit_val, fill_char*padding1, flag_meanings[ctr], padding2*fill_char, fill_char*padding3, flag_counts[ctr], fill_char*padding4, pct_val)) log.info("{}".format(" -- " * 15)) log.info("NOTE: As the flag value for a given source can be composed ") log.info("of multiple bits, the above percentage values need not add") log.info("up to 100%.") log.info("{}\n".format("-" * 60))
{ "alphanum_fraction": 0.5315963275, "author": null, "avg_line_length": 44.1934770591, "converted": null, "ext": "py", "file": null, "hexsha": "b7b95102ce568cc4d0294700e0420cce7afa80bf", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 33, "max_forks_repo_forks_event_max_datetime": "2021-12-27T04:20:44.000Z", "max_forks_repo_forks_event_min_datetime": "2016-03-16T19:18:03.000Z", "max_forks_repo_head_hexsha": "19baaf5a416c72f272889800b13d251f33f76d2c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "check-spelling/drizzlepac", "max_forks_repo_path": "drizzlepac/haputils/hla_flag_filter.py", "max_issues_count": 822, "max_issues_repo_head_hexsha": "19baaf5a416c72f272889800b13d251f33f76d2c", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:25:34.000Z", "max_issues_repo_issues_event_min_datetime": "2016-03-10T01:19:28.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "check-spelling/drizzlepac", "max_issues_repo_path": "drizzlepac/haputils/hla_flag_filter.py", "max_line_length": 204, "max_stars_count": 28, "max_stars_repo_head_hexsha": "19baaf5a416c72f272889800b13d251f33f76d2c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "check-spelling/drizzlepac", "max_stars_repo_path": "drizzlepac/haputils/hla_flag_filter.py", "max_stars_repo_stars_event_max_datetime": "2022-03-27T15:39:29.000Z", "max_stars_repo_stars_event_min_datetime": "2016-08-16T04:16:32.000Z", "num_tokens": 17178, "path": null, "reason": "import numpy,import scipy,from astropy", "repo": null, "save_path": null, "sha": null, "size": 79946 }
/* * Copyright 2014 Antony Polukhin * Copyright 2015 Andrey Semashev * * Distributed under the Boost Software License, Version 1.0. * See http://www.boost.org/LICENSE_1_0.txt */ #ifndef BOOST_WINAPI_CRYPT_HPP_INCLUDED_ #define BOOST_WINAPI_CRYPT_HPP_INCLUDED_ #include <boost/winapi/basic_types.hpp> #include <boost/winapi/detail/cast_ptr.hpp> #if defined( BOOST_USE_WINDOWS_H ) // This header is not always included as part of windows.h #include <wincrypt.h> #endif #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once #endif #if !defined( BOOST_USE_WINDOWS_H ) namespace boost { namespace winapi { typedef ULONG_PTR_ HCRYPTPROV_; }} // Some versions of MinGW contain buggy declarations of CryptEnumProvidersA and CryptEnumProvidersW. // We cannot detect those broken versions, and we can't include the system header because it's incomplete. // So below we duplicate the broken declarations here and work around the problem with cast_ptr. These declarations // will have to be removed when MinGW is fixed. // // @@@ Looks like mingw 5.0-dev has this fixed and possibly in a 3.2x late release as well // See: https://sourceforge.net/p/mingw/bugs/2263/ extern "C" { #if BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM #if !defined( BOOST_NO_ANSI_APIS ) #if !defined( BOOST_WINAPI_IS_MINGW ) || !defined( UNICODE ) BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptEnumProvidersA( boost::winapi::DWORD_ dwIndex, boost::winapi::DWORD_ *pdwReserved, boost::winapi::DWORD_ dwFlags, boost::winapi::DWORD_ *pdwProvType, boost::winapi::LPSTR_ szProvName, boost::winapi::DWORD_ *pcbProvName); #else // Broken declaration in MinGW BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptEnumProvidersA( boost::winapi::DWORD_ dwIndex, boost::winapi::DWORD_ *pdwReserved, boost::winapi::DWORD_ dwFlags, boost::winapi::DWORD_ *pdwProvType, boost::winapi::LPWSTR_ szProvName, boost::winapi::DWORD_ *pcbProvName); #endif BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptAcquireContextA( boost::winapi::HCRYPTPROV_ *phProv, boost::winapi::LPCSTR_ pszContainer, boost::winapi::LPCSTR_ pszProvider, boost::winapi::DWORD_ dwProvType, boost::winapi::DWORD_ dwFlags); #endif // !defined( BOOST_NO_ANSI_APIS ) #if !defined( BOOST_WINAPI_IS_MINGW ) || defined( UNICODE ) BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptEnumProvidersW( boost::winapi::DWORD_ dwIndex, boost::winapi::DWORD_ *pdwReserved, boost::winapi::DWORD_ dwFlags, boost::winapi::DWORD_ *pdwProvType, boost::winapi::LPWSTR_ szProvName, boost::winapi::DWORD_ *pcbProvName); #else // Broken declaration in MinGW BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptEnumProvidersW( boost::winapi::DWORD_ dwIndex, boost::winapi::DWORD_ *pdwReserved, boost::winapi::DWORD_ dwFlags, boost::winapi::DWORD_ *pdwProvType, boost::winapi::LPSTR_ szProvName, boost::winapi::DWORD_ *pcbProvName); #endif BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptAcquireContextW( boost::winapi::HCRYPTPROV_ *phProv, boost::winapi::LPCWSTR_ szContainer, boost::winapi::LPCWSTR_ szProvider, boost::winapi::DWORD_ dwProvType, boost::winapi::DWORD_ dwFlags); BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptGenRandom( boost::winapi::HCRYPTPROV_ hProv, boost::winapi::DWORD_ dwLen, boost::winapi::BYTE_ *pbBuffer); #endif // BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM #if BOOST_WINAPI_PARTITION_APP_SYSTEM #if defined(_MSC_VER) && (_MSC_VER+0) >= 1500 && (_MSC_VER+0) < 1900 && BOOST_USE_NTDDI_VERSION < BOOST_WINAPI_NTDDI_WINXP // Standalone MS Windows SDK 6.0A and later until 10.0 provide a different declaration of CryptReleaseContext for Windows 2000 and older. // This is not the case for (a) MinGW and MinGW-w64, (b) MSVC 7.1 and 8, which are shipped with their own Windows SDK, // and (c) MSVC 14.0 and later, which are used with Windows SDK 10. BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptReleaseContext( boost::winapi::HCRYPTPROV_ hProv, boost::winapi::ULONG_PTR_ dwFlags); #else BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI CryptReleaseContext( boost::winapi::HCRYPTPROV_ hProv, boost::winapi::DWORD_ dwFlags); #endif #endif // BOOST_WINAPI_PARTITION_APP_SYSTEM } #endif // !defined( BOOST_USE_WINDOWS_H ) namespace boost { namespace winapi { #if defined( BOOST_USE_WINDOWS_H ) typedef ::HCRYPTPROV HCRYPTPROV_; #if BOOST_WINAPI_PARTITION_APP_SYSTEM const DWORD_ PROV_RSA_FULL_ = PROV_RSA_FULL; const DWORD_ CRYPT_VERIFYCONTEXT_ = CRYPT_VERIFYCONTEXT; const DWORD_ CRYPT_NEWKEYSET_ = CRYPT_NEWKEYSET; const DWORD_ CRYPT_DELETEKEYSET_ = CRYPT_DELETEKEYSET; const DWORD_ CRYPT_MACHINE_KEYSET_ = CRYPT_MACHINE_KEYSET; const DWORD_ CRYPT_SILENT_ = CRYPT_SILENT; #endif #else #if BOOST_WINAPI_PARTITION_APP_SYSTEM const DWORD_ PROV_RSA_FULL_ = 1; const DWORD_ CRYPT_VERIFYCONTEXT_ = 0xF0000000; const DWORD_ CRYPT_NEWKEYSET_ = 8; const DWORD_ CRYPT_DELETEKEYSET_ = 16; const DWORD_ CRYPT_MACHINE_KEYSET_ = 32; const DWORD_ CRYPT_SILENT_ = 64; #endif #endif #if BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM #if !defined( BOOST_NO_ANSI_APIS ) using ::CryptAcquireContextA; #endif using ::CryptAcquireContextW; using ::CryptGenRandom; #if !defined( BOOST_NO_ANSI_APIS ) BOOST_FORCEINLINE BOOL_ CryptEnumProvidersA( DWORD_ dwIndex, DWORD_ *pdwReserved, DWORD_ dwFlags, DWORD_ *pdwProvType, LPSTR_ szProvName, DWORD_ *pcbProvName) { return ::CryptEnumProvidersA(dwIndex, pdwReserved, dwFlags, pdwProvType, winapi::detail::cast_ptr(szProvName), pcbProvName); } BOOST_FORCEINLINE BOOL_ crypt_enum_providers( DWORD_ dwIndex, DWORD_ *pdwReserved, DWORD_ dwFlags, DWORD_ *pdwProvType, LPSTR_ szProvName, DWORD_ *pcbProvName) { return ::CryptEnumProvidersA(dwIndex, pdwReserved, dwFlags, pdwProvType, winapi::detail::cast_ptr(szProvName), pcbProvName); } BOOST_FORCEINLINE BOOL_ crypt_acquire_context( HCRYPTPROV_ *phProv, LPCSTR_ pszContainer, LPCSTR_ pszProvider, DWORD_ dwProvType, DWORD_ dwFlags) { return ::CryptAcquireContextA(phProv, pszContainer, pszProvider, dwProvType, dwFlags); } #endif BOOST_FORCEINLINE BOOL_ CryptEnumProvidersW( DWORD_ dwIndex, DWORD_ *pdwReserved, DWORD_ dwFlags, DWORD_ *pdwProvType, LPWSTR_ szProvName, DWORD_ *pcbProvName) { return ::CryptEnumProvidersW(dwIndex, pdwReserved, dwFlags, pdwProvType, winapi::detail::cast_ptr(szProvName), pcbProvName); } BOOST_FORCEINLINE BOOL_ crypt_enum_providers( DWORD_ dwIndex, DWORD_ *pdwReserved, DWORD_ dwFlags, DWORD_ *pdwProvType, LPWSTR_ szProvName, DWORD_ *pcbProvName) { return ::CryptEnumProvidersW(dwIndex, pdwReserved, dwFlags, pdwProvType, winapi::detail::cast_ptr(szProvName), pcbProvName); } BOOST_FORCEINLINE BOOL_ crypt_acquire_context( HCRYPTPROV_ *phProv, LPCWSTR_ szContainer, LPCWSTR_ szProvider, DWORD_ dwProvType, DWORD_ dwFlags) { return ::CryptAcquireContextW(phProv, szContainer, szProvider, dwProvType, dwFlags); } #endif // BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM #if BOOST_WINAPI_PARTITION_APP_SYSTEM BOOST_FORCEINLINE BOOL_ CryptReleaseContext(HCRYPTPROV_ hProv, DWORD_ dwFlags) { return ::CryptReleaseContext(hProv, dwFlags); } #endif } } #endif // BOOST_WINAPI_CRYPT_HPP_INCLUDED_
{ "alphanum_fraction": 0.7613983783, "author": null, "avg_line_length": 31.0867768595, "converted": null, "ext": "hpp", "file": null, "hexsha": "98c5958b432579bacdb8ac1642d247f79a94d278", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 122, "max_forks_repo_forks_event_max_datetime": "2022-02-22T14:25:49.000Z", "max_forks_repo_forks_event_min_datetime": "2016-12-22T17:38:09.000Z", "max_forks_repo_head_hexsha": "ebaa6fd06ec867f7e41a24cadf3adf4d141b16d5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dilawar/eyesthatblink", "max_forks_repo_path": "external/boost-winapi/include/boost/winapi/crypt.hpp", "max_issues_count": 203, "max_issues_repo_head_hexsha": "ebaa6fd06ec867f7e41a24cadf3adf4d141b16d5", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:46:55.000Z", "max_issues_repo_issues_event_min_datetime": "2016-12-27T12:09:03.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dilawar/eyesthatblink", "max_issues_repo_path": "external/boost-winapi/include/boost/winapi/crypt.hpp", "max_line_length": 137, "max_stars_count": 918, "max_stars_repo_head_hexsha": "ebaa6fd06ec867f7e41a24cadf3adf4d141b16d5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dilawar/eyesthatblink", "max_stars_repo_path": "external/boost-winapi/include/boost/winapi/crypt.hpp", "max_stars_repo_stars_event_max_datetime": "2022-03-22T06:21:35.000Z", "max_stars_repo_stars_event_min_datetime": "2016-12-22T02:53:08.000Z", "num_tokens": 2125, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 7523 }
from ..data import DATA_PATH from .. import simple_cov import pytest from pyuvdata import UVData import os import numpy as np @pytest.fixture def sky_model(): uvd = UVData() uvd.read_uvh5( os.path.join( DATA_PATH, "Garray_antenna_diameter2.0_fractional_spacing1.0_nant6_nf200_df100.000kHz_f0100.000MHzcompressed_True_autosFalse_gsm.uvh5", ) ) return uvd @pytest.mark.parametrize( "use_tensorflow, horizon, offset, min_dly, ant_dly", [(True, 1.0, 20.0, 0.0, 0.0), (False, 0.8, 123.0, 200.0, 0.0), (True, 1.0, 0.0, 0.0, 2 / 0.3)], ) def test_simple_cov(use_tensorflow, horizon, offset, min_dly, sky_model, ant_dly): sky_model.select(bls=[(0, 1)]) blvecs = sky_model.uvw_array freqs = sky_model.freq_array[0] nfreqs = len(freqs) fg0, fg1 = np.meshgrid(freqs, freqs) bldly = np.max([np.linalg.norm(blvecs[0]) * horizon / 0.3 + offset, min_dly]) tcov = np.sinc(2 * bldly * (fg0 - fg1) / 1e9) if ant_dly > 0: tcov *= np.sinc(2 * (fg0 - fg1) / 1e9 * ant_dly) scov = simple_cov.simple_cov_matrix( blvecs, freqs, ant_dly=ant_dly, horizon=horizon, offset=offset, min_dly=min_dly, dtype=np.float64, use_tensorflow=use_tensorflow, ) assert np.allclose(scov, tcov)
{ "alphanum_fraction": 0.6354790419, "author": null, "avg_line_length": 29.0434782609, "converted": null, "ext": "py", "file": null, "hexsha": "4b60a15ddc160dc1b28a093ef2568d7975072c4a", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "73affeae3519febf21b54e73550e08906a314e2c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "LBJ-Wade/calamity", "max_forks_repo_path": "calamity/tests/test_simple_cov.py", "max_issues_count": 32, "max_issues_repo_head_hexsha": "73affeae3519febf21b54e73550e08906a314e2c", "max_issues_repo_issues_event_max_datetime": "2022-03-08T05:35:10.000Z", "max_issues_repo_issues_event_min_datetime": "2021-06-13T07:27:35.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "LBJ-Wade/calamity", "max_issues_repo_path": "calamity/tests/test_simple_cov.py", "max_line_length": 136, "max_stars_count": 4, "max_stars_repo_head_hexsha": "73affeae3519febf21b54e73550e08906a314e2c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "LBJ-Wade/calamity", "max_stars_repo_path": "calamity/tests/test_simple_cov.py", "max_stars_repo_stars_event_max_datetime": "2021-12-17T05:21:28.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-15T21:20:23.000Z", "num_tokens": 461, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1336 }
""" Forced DA Analysis ------------------ Top-level script to run the forced DA analysis, following the procedure described in `CarlierForcedDA2019`_. Arguments: *--Required--* - **beam** *(int)*: Beam to use. Flags: **['-b', '--beam']** Choices: ``[1, 2]`` - **energy** *(MultiClass)*: Beam energy in GeV. Flags: **['-e', '--energy']** - **kick_directory** *(MultiClass)*: Analysis kick_directory containing kick files. Flags: **['-k', '--kickdir']** - **plane** *(str)*: Plane of the kicks. Flags: **['-p', '--plane']** Choices: ``['X', 'Y']`` *--Optional--* - **emittance_outlier_limit** *(float)*: Limit, i.e. cut from mean, on emittance outliers in meter. Default: ``5e-07`` - **emittance_tfs** *(MultiClass)*: Dataframe or Path of pre-saved emittance tfs. - **emittance_type** *(str)*: Which BSRT data to use (from database). Choices: ``['fit_sigma', 'average']`` Default: ``average`` - **emittance_window_length** *(int)*: Length of the moving average window. (# data points) Default: ``100`` - **fill** *(int)*: Fill that was used. If not given, check out time_around_kicks. Flags: **['-f', '--fill']** - **fit** *(str)*: Fitting function to use (rearranges parameters to make sense). Choices: ``['exponential', 'linear']`` Default: ``exponential`` - **intensity_tfs** *(MultiClass)*: Dataframe or Path of pre-saved intensity tfs. - **intensity_time_after_kick** *(int)*: Defines the times after the kicks (in seconds) which is used for intensity averaging to calculate the losses. Default: ``[5, 30]`` - **intensity_time_before_kick** *(int)*: Defines the times before the kicks (in seconds) which is used for intensity averaging to calculate the losses. Default: ``[30, 5]`` - **normalized_emittance** *(float)*: Assumed NORMALIZED nominal emittance for the machine. Default: ``3.7499999999999997e-06`` - **output_directory** *(MultiClass)*: Output kick_directory, if not given subfolder in kick kick_directory Flags: **['-o', '--outdir']** - **pagestore_db** *(MultiClass)*: (Path to-) presaved timber database - **show**: Show plots. Action: ``store_true`` - **show_wirescan_emittance** *(BoolOrPathOrDataFrame)*: Flag if the emittance from wirescan should also be shown, can also be a Dataframe or Path of pre-saved emittance bws tfs. Default: ``False`` - **timber_db** *(str)*: Which timber database to use. Choices: ``['all', 'mdb', 'ldb', 'nxcals']`` Default: ``all`` - **time_around_kicks** *(int)*: If no fill is given, this defines the time (in minutes) when data before the first and after the last kick is extracted. Default: ``10`` - **plot_styles** *(str)*: Which plotting styles to use, either from omc3 styles or default mpl. Default: ``['standard']`` - **manual_style** *(DictAsString)*: Additional style rcParameters which update the set of predefined ones. Default: ``{}`` :author: jdilly .. _CarlierForcedDA2019: https://journals.aps.org/prab/pdf/10.1103/PhysRevAccelBeams.22.031002 """ import os from collections import defaultdict from contextlib import suppress from pathlib import Path from typing import Tuple import matplotlib as mpl import matplotlib.colors as mcolors import matplotlib.dates as mdates import matplotlib.pyplot as plt import matplotlib.transforms as mtrans import numpy as np import pandas as pd import scipy.odr import scipy.optimize import tfs from generic_parser import EntryPointParameters, entrypoint from generic_parser.entry_datatypes import ( DictAsString, FALSE_ITEMS, TRUE_ITEMS, get_instance_faker_meta, get_multi_class, ) from generic_parser.tools import DotDict from omc3.optics_measurements import toolbox from omc3.plotting.utils import annotations, colors, lines, style from omc3.tune_analysis.bbq_tools import clean_outliers_moving_average from omc3.utils import logging_tools from omc3.utils.iotools import save_config from omc3.utils.mock import cern_network_import from omc3.utils.time_tools import CERNDatetime from pandas import DataFrame, Series from pandas.plotting import register_matplotlib_converters from tfs import TfsDataFrame from tfs.tools import significant_digits pytimber = cern_network_import('pytimber') PageStore = cern_network_import('pytimber.pagestore.PageStore') from pylhc.constants.forced_da_analysis import ( BSRT_EMITTANCE_TO_METER, BWS_DIRECTIONS, BWS_EMITTANCE_TO_METER, HEADER_BSRT_OUTLIER_LIMIT, HEADER_BSRT_ROLLING_WINDOW, HEADER_ENERGY, HEADER_TIME_AFTER, HEADER_TIME_BEFORE, INITIAL_DA_FIT, INTENSITY, INTENSITY_AFTER, INTENSITY_BEFORE, INTENSITY_KEY, INTENSITY_LOSSES, KICKFILE, MAX_CURVEFIT_FEV, OUTFILE_INTENSITY, OUTLIER_LIMIT, PLOT_FILETYPES, RESULTS_DIR, ROLLING_AVERAGE_WINDOW, TIME_AFTER_KICK_S, TIME_AROUND_KICKS_MIN, TIME_BEFORE_KICK_S, YPAD, bsrt_emittance_key, bws_emittance_key, column_action, column_bws_norm_emittance, column_emittance, column_norm_emittance, err_col, header_da, header_da_error, header_nominal_emittance, header_norm_nominal_emittance, mean_col, outfile_emittance, outfile_emittance_bws, outfile_kick, outfile_plot, rel_col, sigma_col, ) from pylhc.constants.general import ( LHC_NOMINAL_EMITTANCE, TFS_SUFFIX, TIME_COLUMN, get_proton_beta, get_proton_gamma, ) LOG = logging_tools.get_logger(__name__) # Weird Datatypes class BoolOrPathOrDataFrame( metaclass=get_instance_faker_meta(bool, Path, str, tfs.TfsDataFrame, pd.DataFrame, type(None)) ): """ A class that behaves like a `boolean` when possible, otherwise like a `Path`, `string` or `Dataframe`. """ def __new__(cls, value): if isinstance(value, str): value = value.strip("'\"") # behavior like dict-parser if value in TRUE_ITEMS: return True elif value in FALSE_ITEMS: return False else: try: return Path(value) except TypeError: return value def _get_pathclass(*other_classes): class SomethingOrPath(metaclass=get_instance_faker_meta(Path, str, *other_classes, type(None))): """A class that behaves like a if possible `Path`, `string` or something else.""" def __new__(cls, value): if isinstance(value, str): value = value.strip("'\"") # Needs to be done for strings in config-files try: return Path(value) except TypeError: return value return SomethingOrPath PathOrDataframe = _get_pathclass(tfs.TfsDataFrame, pd.DataFrame) PathOrPagestore = _get_pathclass(PageStore) PathOrString = _get_pathclass() def get_params(): return EntryPointParameters( kick_directory=dict( flags=["-k", "--kickdir"], required=True, type=PathOrString, help="Analysis kick_directory containing kick files.", ), output_directory=dict( flags=["-o", "--outdir"], type=PathOrString, help="Output kick_directory, if not given subfolder in kick kick_directory", ), energy=dict( flags=["-e", "--energy"], required=True, type=get_multi_class(float, int), help="Beam energy in GeV.", ), fill=dict( flags=["-f", "--fill"], type=get_multi_class(int, type(None)), help="Fill that was used. If not given, check out time_around_kicks.", ), beam=dict( flags=["-b", "--beam"], required=True, choices=[1, 2], type=int, help="Beam to use." ), plane=dict( flags=["-p", "--plane"], choices=["X", "Y"], required=True, type=str, help=( "Plane of the kicks." # " Give 'XY' for using both planes (e.g. diagonal kicks)." # Future release ), ), time_around_kicks=dict( type=int, default=TIME_AROUND_KICKS_MIN, help=( "If no fill is given, this defines the time (in minutes) " "when data before the first and after the last kick is extracted." ), ), intensity_time_before_kick=dict( type=int, nargs=2, default=TIME_BEFORE_KICK_S, help=( "Defines the times before the kicks (in seconds) " "which is used for intensity averaging to calculate the losses." ), ), intensity_time_after_kick=dict( type=int, nargs=2, default=TIME_AFTER_KICK_S, help=( "Defines the times after the kicks (in seconds) " "which is used for intensity averaging to calculate the losses." ), ), normalized_emittance=dict( type=float, default=LHC_NOMINAL_EMITTANCE, help="Assumed NORMALIZED nominal emittance for the machine.", ), emittance_tfs=dict( type=PathOrDataframe, help="Dataframe or Path of pre-saved emittance tfs.", ), intensity_tfs=dict( type=PathOrDataframe, help="Dataframe or Path of pre-saved intensity tfs.", ), show_wirescan_emittance=dict( default=False, type=BoolOrPathOrDataFrame, help=( "Flag if the emittance from wirescan should also be shown, " "can also be a Dataframe or Path of pre-saved emittance bws tfs." ), ), timber_db=dict( type=str, default="all", choices=["all", "mdb", "ldb", "nxcals"], help="Which timber database to use.", ), pagestore_db=dict(type=PathOrPagestore, help="(Path to-) presaved timber database"), fit=dict( type=str, default="exponential", choices=["exponential", "linear"], help="Fitting function to use (rearranges parameters to make sense).", ), emittance_window_length=dict( help="Length of the moving average window. (# data points)", type=int, default=ROLLING_AVERAGE_WINDOW, ), emittance_outlier_limit=dict( help="Limit, i.e. cut from mean, on emittance outliers in meter.", type=float, default=OUTLIER_LIMIT, ), emittance_type=dict( type=str, default="average", choices=["fit_sigma", "average"], help="Which BSRT data to use (from database).", ), show=dict(action="store_true", help="Show plots.",), plot_styles=dict( type=str, nargs="+", default=["standard"], help="Which plotting styles to use, either from omc3 styles or default mpl.", ), manual_style=dict( type=DictAsString, default={}, help="Additional style rcParameters which update the set of predefined ones.", ), ) @entrypoint(get_params(), strict=True) def main(opt): LOG.debug("Starting Forced DA analysis.") _log_opt(opt) kick_dir, out_dir = _get_output_dir(opt.kick_directory, opt.output_directory) with suppress(PermissionError): save_config(out_dir, opt, __file__) # get data kick_df = _get_kick_df(kick_dir, opt.plane) intensity_df, emittance_df, emittance_bws_df = _get_dataframes( kick_df.index, opt.get_subdict( [ "fill", "beam", "plane", "time_around_kicks", "emittance_tfs", "intensity_tfs", "show_wirescan_emittance", "timber_db", "pagestore_db", "emittance_window_length", "emittance_outlier_limit", "emittance_type", "normalized_emittance", ] ), ) _check_all_times_in(kick_df.index, intensity_df.index[0], intensity_df.index[-1]) # add data to kicks kick_df = _add_intensity_and_losses_to_kicks( kick_df, intensity_df, opt.intensity_time_before_kick, opt.intensity_time_after_kick ) kick_df = _add_emittance_to_kicks( opt.plane, opt.energy, kick_df, emittance_df, opt.normalized_emittance ) kick_df = _do_fit(opt.plane, kick_df, opt.fit) kick_df = _convert_to_sigmas(opt.plane, kick_df) # output _write_tfs(out_dir, opt.plane, kick_df, intensity_df, emittance_df, emittance_bws_df) # plotting figs = dict() register_matplotlib_converters() # for datetime plotting style.set_style(opt.plot_styles, opt.manual_style) figs["emittance"] = _plot_emittances( out_dir, opt.beam, opt.plane, emittance_df, emittance_bws_df, kick_df.index ) figs["intensity"] = _plot_intensity(out_dir, opt.beam, opt.plane, kick_df, intensity_df) for fit_type in ("exponential", "linear", "norm"): figs[f"da_fit_{fit_type}"] = _plot_da_fit(out_dir, opt.beam, opt.plane, kick_df, fit_type) if opt.show: plt.show() LOG.debug("Forced DA analysis finished.") return figs # Helper --- def _log_opt(opt: DotDict): """Show options in log.""" LOG.info("Performing ForcedDA Analysis for:") if opt.fill is not None: LOG.info(f" Fill: {opt.fill}") LOG.info(f" Energy: {opt.energy} GeV") LOG.info(f" Beam: {opt.beam}") LOG.info(f" Plane: {opt.plane}") LOG.info(f" Analysis Directory: '{opt.kick_directory}'") def _write_tfs( out_dir: Path, plane: str, kick_df: DataFrame, intensity_df: DataFrame, emittance_df: DataFrame, emittance_bws_df: DataFrame, ): """Write out gathered data.""" LOG.debug("Writing tfs files.") for df in (kick_df, intensity_df, emittance_df, emittance_bws_df): if df is not None: df.insert(0, TIME_COLUMN, [CERNDatetime(dt).cern_utc_string() for dt in df.index]) try: tfs.write(out_dir / outfile_kick(plane), kick_df) tfs.write(out_dir / OUTFILE_INTENSITY, intensity_df) tfs.write(out_dir / outfile_emittance(plane), emittance_df) if emittance_bws_df is not None: tfs.write(out_dir / outfile_emittance_bws(plane), emittance_bws_df) except (FileNotFoundError, IOError): LOG.error(f"Cannot write into directory: {str(out_dir)} ") def _check_all_times_in(series: Series, start: CERNDatetime, end: CERNDatetime): """Check if all times in series are between start and end.""" if any(s for s in series if s < start or s > end): raise ValueError( "Some of the kick-times are outside of the fill times! " "Check if correct kick-file or fill number are used." ) def _convert_time_index(list_: list, path: Path = None) -> pd.Index: """Tries to convert time index to cerntime, first from datetime, then string, then timestamp.""" for index_convert in ( _datetime_to_cerntime_index, _string_to_cerntime_index, _timestamp_to_cerntime_index, ): with suppress(TypeError): return index_convert(list_) msg = f"Unrecognized format in column '{TIME_COLUMN}'" if path: msg += f" in '{str(path)}'" raise TypeError(msg) def _string_to_cerntime_index(list_): return pd.Index((CERNDatetime.from_cern_utc_string(t) for t in list_), dtype=object) def _timestamp_to_cerntime_index(list_): return pd.Index((CERNDatetime.from_timestamp(t) for t in list_), dtype=object) def _datetime_to_cerntime_index(list_): return pd.Index((CERNDatetime(t) for t in list_), dtype=object) def _drop_duplicate_indices(df): duplicate_mask = [True] + [ df.index[idx] != df.index[idx - 1] for idx in range(1, len(df.index)) ] return df.loc[duplicate_mask, :] # TFS Data Loading ------------------------------------------------------------- def _get_dataframes( kick_times: pd.Index, opt: DotDict ) -> Tuple[TfsDataFrame, TfsDataFrame, TfsDataFrame]: """Gets the intensity and emittance dataframes from either input, files or (timber) database.""" db = _get_db(opt) if opt.fill is not None: timespan_ts = _get_fill_times(db, opt.fill) timespan_dt = _convert_time_index(timespan_ts) else: td = pd.Timedelta(minutes=opt.time_around_kicks) timespan_dt = (kick_times.min() - td, kick_times.max() + td) timespan_ts = tuple(t.timestamp() for t in timespan_dt) if opt.intensity_tfs: intensity_df = _read_tfs(opt.intensity_tfs, timespan_dt) else: intensity_df = _get_bctrf_beam_intensity_from_timber(opt.beam, db, timespan_ts) if opt.emittance_tfs: emittance_df = _read_tfs(opt.emittance_tfs, timespan_dt) else: emittance_df = _get_bsrt_bunch_emittances_from_timber( opt.beam, opt.plane, db, timespan_ts, opt.emittance_type, opt.normalized_emittance ) emittance_df = _filter_emittance_data( emittance_df, opt.plane, opt.emittance_window_length, opt.emittance_outlier_limit ) if opt.show_wirescan_emittance is True: emittance_bws_df = _get_bws_emittances_from_timber(opt.beam, opt.plane, db, timespan_ts) elif opt.show_wirescan_emittance: emittance_bws_df = _read_tfs(opt.show_wirescan_emittance, timespan_dt) else: emittance_bws_df = None return intensity_df, emittance_df, emittance_bws_df def _read_tfs(tfs_file_or_path, timespan): """Read previously gathered data (see :meth:`pylhc.forced_da_analysis._write_tfs`).""" try: tfs_df = tfs.read_tfs(tfs_file_or_path, index=TIME_COLUMN) except IOError: tfs_df = tfs_file_or_path # hopefully tfs_df.index = _convert_time_index(tfs_df.index) return tfs_df.loc[slice(*timespan), :] def _filter_emittance_data(df, planes, window_length, limit): """Cleans emittance data via outlier filter and moving average.""" for plane in planes: LOG.debug(f"Filtering emittance data in plane {plane}.") col_nemittance = column_norm_emittance(plane) # col_err_nemittance = err_col(col_nemittance) col_mean = mean_col(col_nemittance) col_err_mean = err_col(col_mean) mav, std, mask = clean_outliers_moving_average( df[col_nemittance], length=window_length, limit=limit ) df[col_mean] = mav df[col_err_mean] = std # if any(df[col_err_nemittance]): # df[col_err_mean] = _rolling_errors(df[col_err_nemittance], ~mask, window_length) df = df.dropna(axis="index") if len(df.index) == 0: raise IndexError("Not enough emittance data extracted. Try to give a fill number.") df.headers[HEADER_BSRT_ROLLING_WINDOW] = window_length df.headers[HEADER_BSRT_OUTLIER_LIMIT] = limit df = _maybe_add_sum_for_planes(df, planes, column_norm_emittance) df = _maybe_add_sum_for_planes( df, planes, lambda p: mean_col(column_norm_emittance(p)), lambda p: err_col(mean_col(column_norm_emittance(p))), ) return df # Timber Data ------------------------------------------------------------------ def _get_db(opt): """Get the database either presaved or from timber.""" db = None if opt.pagestore_db: db = opt.pagestore_db try: db_path = Path(db) except TypeError: pass else: LOG.debug(f"Loading database from file {str(db_path)}") db = PageStore(f"file:{str(db_path)}", str(db_path.with_suffix(""))) if opt.fill is not None: raise EnvironmentError("'fill' can't be used with pagestore database.") else: LOG.debug(f" Trying to load database from timber.") try: db = pytimber.LoggingDB(source=opt["timber_db"]) except AttributeError: LOG.debug(f" Loading from timber failed.") if not db: error_msg = "" if opt.fill is not None: error_msg += "'fill' is given, " if opt.emittance_tfs is None: error_msg += "'emittance_tfs' is not given, " if opt.intensity_tfs is None: error_msg += "'intensity_tfs' is not given, " if opt.show_wirescan_emittance is True: error_msg += "wirescan emittance is requested, " if len(error_msg): error_msg += ( "but there is no database given and no access to timber databases. Aborting." ) raise EnvironmentError(error_msg) return db def _get_fill_times(db, fill): """Extract Fill times from database.""" LOG.debug(f"Getting Timespan from fill {fill}") filldata = db.getLHCFillData(fill) return filldata["startTime"], filldata["endTime"] def _get_bctrf_beam_intensity_from_timber(beam, db, timespan): LOG.debug(f"Getting beam intensity from bctfr for beam {beam}.") intensity_key = INTENSITY_KEY.format(beam=beam) LOG.debug(f" Key: {intensity_key}") x, y = db.get(intensity_key, *timespan)[intensity_key] df = tfs.TfsDataFrame( data=y, index=_timestamp_to_cerntime_index(x), columns=[INTENSITY], dtype=float ) df = _drop_duplicate_indices(df) LOG.debug(f" Returning dataframe of shape {df.shape}") return df def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, nominal_emittance): dfs = {p: None for p in planes} for plane in planes: LOG.debug(f"Getting emittance from BSRT for beam {beam} and plane {plane}.") bunch_emittance_key = bsrt_emittance_key(beam, plane, key_type) LOG.debug(f" Key: {bunch_emittance_key}") col_nemittance = column_norm_emittance(plane) all_columns = [f(col_nemittance) for f in (lambda s: s, mean_col, err_col)] + [ err_col(mean_col(col_nemittance)) ] x, y = db.get(bunch_emittance_key, *timespan)[bunch_emittance_key] y_std = np.zeros_like(x) if key_type == "fit_sigma": # add all data with the same timestamp y_new = defaultdict(list) for x_elem, y_elem in zip(x, y): y_new[f"{x_elem:.3f}"] += y_elem.tolist() # get average and std per timestamp x = np.array([float(elem) for elem in y_new.keys()]) y = np.array([np.average(elem) for elem in y_new.values()]) * nominal_emittance y_std = np.array([np.std(elem) for elem in y_new.values()]) * nominal_emittance elif key_type == "average": y *= BSRT_EMITTANCE_TO_METER y_std *= BSRT_EMITTANCE_TO_METER # remove entries with zero emittance as unphysical x, y, y_std = x[y != 0], y[y != 0], y_std[y != 0] df = tfs.TfsDataFrame( index=_timestamp_to_cerntime_index(x), columns=all_columns, dtype=float, ) df[col_nemittance] = y df[err_col(col_nemittance)] = y_std dfs[plane] = df df = _merge_df_planes(dfs, planes) LOG.debug(f" Returning dataframe of shape {df.shape}") return df def _get_bws_emittances_from_timber(beam, planes, db, timespan): dfs = {p: None for p in planes} for plane in planes: LOG.debug(f"Getting emittance from BWS for beam {beam} and plane {plane}.") all_columns = [column_bws_norm_emittance(plane, d) for d in BWS_DIRECTIONS] df = None for direction in BWS_DIRECTIONS: emittance_key = bws_emittance_key(beam, plane, direction) LOG.debug(f" Key: {emittance_key}") column_nemittance = column_bws_norm_emittance(plane, direction) x, y = db.get(emittance_key, *timespan)[emittance_key] if df is None: df = tfs.TfsDataFrame( index=_timestamp_to_cerntime_index(x), columns=all_columns, dtype=float ) df[column_nemittance] = y * BWS_EMITTANCE_TO_METER df[column_nemittance] = df[column_nemittance].apply( np.mean ) # BWS can give multiple values df[err_col(column_nemittance)] = df[column_nemittance].apply( np.std ) # BWS can give multiple values dfs[plane] = df df = _merge_df_planes(dfs, planes) for direction in BWS_DIRECTIONS: df = _maybe_add_sum_for_planes( df, planes, lambda p: column_bws_norm_emittance(p, direction), lambda p: err_col(column_bws_norm_emittance(p, direction)), ) LOG.debug(f" Returning dataframe of shape {df.shape}") return df # Kick Data -------------------------------------------------------------------- def _get_kick_df(kick_dir, plane): def column_action_error(x): return err_col(column_action(x)) try: df = _get_new_kick_file(kick_dir, plane) except FileNotFoundError: LOG.debug("Reading of kickfile failed. Looking for old kickfile.") df = _get_old_kick_file(kick_dir, plane) df = _maybe_add_sum_for_planes(df, plane, column_action, column_action_error) return df[[column_action(plane), column_action_error(plane)]] def _get_old_kick_file(kick_dir, plane): """Kick files from ``Beta-Beat.src``.""" path = kick_dir / "getkickac.out" LOG.debug(f"Reading kickfile '{str(path)}'.'") df = tfs.read(path) df = df.set_index(TIME_COLUMN) df.index = _convert_time_index(df.index, path) rename_dict = {} for p in plane: # can be XY rename_dict.update( { f"2J{p}RES": column_action(p), f"2J{p}STDRES": err_col(column_action(p)), f"J{p}2": column_action(p), # pre 2017 f"J{p}STD": err_col(column_action(p)), # pre 2017 } ) df = df.rename(rename_dict, axis="columns") renamed_cols = list(set(rename_dict.values())) df.loc[:, renamed_cols] = df.loc[:, renamed_cols] * 1e-6 return df def _get_new_kick_file(kick_dir, planes): """Kick files from ``omc3``.""" dfs = {p: None for p in planes} for plane in planes: path = kick_dir / f"{KICKFILE}_{plane.lower()}{TFS_SUFFIX}" LOG.debug(f"Reading kickfile '{str(path)}'.'") df = tfs.read(path, index=TIME_COLUMN) df.index = pd.Index([CERNDatetime.from_cern_utc_string(t) for t in df.index], dtype=object) dfs[plane] = df return _merge_df_planes(dfs, planes) def _get_output_dir(kick_directory, output_directory): kick_path = Path(kick_directory) if output_directory: output_path = Path(output_directory) else: output_path = kick_path / RESULTS_DIR try: output_path.mkdir(exist_ok=True) except PermissionError: LOG.warn( f"You have no writing permission in '{str(output_path)}', " f"output data might not be created." ) LOG.info(f"All output will be written to {str(output_path)}") return kick_path, output_path # Intensity at Kicks ----------------------------------------------------------- def _add_intensity_and_losses_to_kicks(kick_df, intensity_df, time_before, time_after): LOG.debug("Calculating intensity and losses for the kicks.") col_list = [INTENSITY_BEFORE, INTENSITY_AFTER, INTENSITY_LOSSES] new_columns = [col for col in col_list + [err_col(c) for c in col_list]] kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + new_columns) kick_df = _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after) kick_df = _calculate_intensity_losses_at_kicks(kick_df) return kick_df def _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after): LOG.debug("Calculating beam intensity before and after kicks.") # input signs and order does not matter time_before = sorted(-np.abs(t) for t in time_before) time_after = sorted(np.abs(t) for t in time_after) kick_df.headers[HEADER_TIME_BEFORE] = str(time_before) kick_df.headers[HEADER_TIME_AFTER] = str(time_after) for i, time in enumerate(kick_df.index): # calculate intensity before and after kicks (with error) for column, time_delta in ((INTENSITY_BEFORE, time_before), (INTENSITY_AFTER, time_after)): t_from, t_to = ( time + pd.Timedelta(seconds=time_delta[0]), time + pd.Timedelta(seconds=time_delta[1]), ) data = intensity_df.loc[ t_from:t_to, INTENSITY ] # awesome pandas can handle time intervals! kick_df.loc[time, [column, err_col(column)]] = data.mean(), data.std() return kick_df def _calculate_intensity_losses_at_kicks(kick_df): LOG.debug("Calculating intensity losses.") # absolute losses kick_df[INTENSITY_LOSSES] = kick_df[INTENSITY_BEFORE] - kick_df[INTENSITY_AFTER] kick_df[err_col(INTENSITY_LOSSES)] = np.sqrt( np.square(kick_df[err_col(INTENSITY_BEFORE)]) + np.square(kick_df[err_col(INTENSITY_AFTER)]) ) # relative losses, error from error-propagation formular for losses / I_before = 1 - I_after / I_before kick_df[rel_col(INTENSITY_LOSSES)] = kick_df[INTENSITY_LOSSES] / kick_df[INTENSITY_BEFORE] kick_df[rel_col(err_col(INTENSITY_LOSSES))] = np.sqrt( np.square(kick_df[INTENSITY_AFTER] / kick_df[INTENSITY_BEFORE]) * ( np.square(kick_df[err_col(INTENSITY_AFTER)] / kick_df[INTENSITY_AFTER]) + np.square(kick_df[err_col(INTENSITY_BEFORE)] / kick_df[INTENSITY_BEFORE]) ) ) return kick_df # Emittance at Kicks ----------------------------------------------------------- def _add_emittance_to_kicks(plane, energy, kick_df, emittance_df, nominal): LOG.debug("Retrieving normalized emittance at the kicks.") kick_df.headers[HEADER_ENERGY] = energy kick_df.headers[HEADER_BSRT_ROLLING_WINDOW] = ROLLING_AVERAGE_WINDOW col_nemittance = column_norm_emittance(plane) cols_emitt = [mean_col(col_nemittance), err_col(mean_col(col_nemittance))] cols_kick = [col_nemittance, err_col(col_nemittance)] kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + cols_kick) idx_emitt = [emittance_df.columns.get_loc(c) for c in cols_emitt] for time in kick_df.index: idx_kick = emittance_df.index.get_loc(time, method="nearest") kick_df.loc[time, cols_kick] = emittance_df.iloc[idx_kick, idx_emitt].values # add de-normalized emittance normalization = get_proton_gamma(energy) * get_proton_beta( energy ) # norm emittance to emittance col_emittance = column_emittance(plane) kick_df.headers[header_norm_nominal_emittance(plane)] = nominal kick_df.headers[header_nominal_emittance(plane)] = nominal / normalization kick_df[col_emittance] = kick_df[col_nemittance] / normalization kick_df[err_col(col_emittance)] = kick_df[err_col(col_nemittance)] / normalization return kick_df # Forced DA Fitting ------------------------------------------------------------ def fun_exp_decay(p, x): # fit and plot """sp = DA_J, x[0] = action (2J res), x[1] = emittance""" return np.exp(-(p - (0.5 * x[0])) / x[1]) def fun_exp_sigma(p, x): # only used for plotting """p = DA_sigma, x = action (J_sigma)""" return np.exp(-0.5 * (p ** 2 - x ** 2)) def fun_linear(p, x): # fit and plot """p = DA_J, x = action (2J res)""" return x * 0.5 - p def swap_fun_parameters(fun): """Parameter swapped for Curvefit.""" return lambda x, p: fun(p, x) def _do_fit(plane, kick_df, fit_type): LOG.debug("Fitting forced da to exponential. ") action, emittance, rel_losses = _get_fit_data(kick_df, plane) init_guess = [INITIAL_DA_FIT * kick_df.headers[header_nominal_emittance(plane)]] get_fit_param = {"linear": _linear_fit_parameters, "exponential": _exponential_fit_parameters}[ fit_type ] fit_fun, x, y, sx, sy = get_fit_param(action, emittance, rel_losses) # do prelim fit init_fit, _ = _fit_curve(swap_fun_parameters(fit_fun), x, y, init_guess) # do odr odr = _fit_odr(fit_fun, x, y, sx, sy, init_fit) # add DA to kick da = odr.beta[0], odr.sd_beta[0] kick_df.headers[header_da(plane)], kick_df.headers[header_da_error(plane)] = da LOG.info(f"Forced DA (wrt. J) in {plane} [m]: {da[0]} ± {da[1]}") return kick_df def _get_fit_data(kick_df, plane): """Extracts necessary data from ``kick-df``. Returns tri-tuple of tuples (data, std).""" col_action = column_action(plane) col_emittance = column_emittance(plane) col_losses = rel_col(INTENSITY_LOSSES) # get data action = kick_df[col_action], _no_nonzero_errors(kick_df[err_col(col_action)]) emittance = kick_df[col_emittance], _no_nonzero_errors(kick_df[err_col(col_emittance)]) rel_losses = kick_df[col_losses], _no_nonzero_errors(kick_df[err_col(col_losses)]) return action, emittance, rel_losses def _exponential_fit_parameters(action, emittance, rel_losses): """Returns exponential fit function and parameters. All inputs are tuples of (data, std).""" x = action[0], emittance[0] y = rel_losses[0] sx = [action[1], emittance[1]] sy = rel_losses[1] return fun_exp_decay, x, y, sx, sy def _linear_fit_parameters(action, emittance, rel_losses): """ Returns linear fit function and parameters. All inputs are tuples of (data, std).""" log_losses = np.log(rel_losses[0]) x = action[0] y = emittance[0] * log_losses sx = action[1] sy = np.sqrt( (log_losses * emittance[1]) ** 2 + ((emittance[0] * rel_losses[1]) / rel_losses[0]) ** 2 ) return fun_linear, x, y, sx, sy def _fit_curve(fun, x, y, init): """Initial curve fit, without errors.""" fit, cov = scipy.optimize.curve_fit(fun, x, y, p0=init, maxfev=MAX_CURVEFIT_FEV) LOG.info(f"Initial DA fit: {fit} with cov {cov}") return fit, np.sqrt(np.diag(cov)) def _fit_odr(fun, x, y, sx, sy, init): """ODR Fit (includes errors).""" # fill zero errors with the minimum error - otherwise fit will not work fit_model_sigma = scipy.odr.Model(fun) data_model_sigma = scipy.odr.RealData(x=x, y=y, sx=sx, sy=sy,) da_odr = scipy.odr.ODR(data_model_sigma, fit_model_sigma, beta0=init) # da_odr.set_job(fit_type=2) odr_output = da_odr.run() logging_tools.odr_pprint(LOG.info, odr_output) return odr_output def _no_nonzero_errors(series): """Removes all zero-erros and replaces them with minimum errors in set.""" series = series.copy() nonzero = series[series != 0] if len(nonzero) == 0: raise ValueError("All errors are exact zero. Can't do ODR fit.") series[series == 0] = np.abs(series[series != 0]).min() return series def _convert_to_sigmas(plane, kick_df): """Converts the DA and the Action into Sigma-Units.""" LOG.debug("Calculating action and da in sigmas.") nominal_emittance = kick_df.headers[header_nominal_emittance(plane)] emittance = kick_df[column_emittance(plane)] emittance_mean, emittance_std = emittance.mean(), emittance.std() emittance_sign, emittance_sign_std = significant_digits( emittance_mean * 1e12, emittance_std * 1e12 ) LOG.info( f"Measured Emittance {emittance_sign} ± {emittance_sign_std} pm" f" (Nominal {nominal_emittance*1e12: .2f} pm)" ) # DA (in units of J) to DA_sigma da, da_err = kick_df.headers[header_da(plane)], kick_df.headers[header_da_error(plane)] da_sigma, da_sigma_err = ( np.sqrt(2 * da / emittance_mean), da_err / np.sqrt(2 * da * emittance_mean), ) kick_df.headers[header_da(plane, unit="sigma")] = da_sigma kick_df.headers[header_da_error(plane, unit="sigma")] = da_sigma_err LOG.info(f"Forced DA {plane} in N-sigma: {da_sigma} ± {da_sigma_err}") # Action (in units of 2J) to J_sigma col_action = column_action(plane) # kick_df[sigma_col(col_action)] = np.sqrt(kick_df[col_action] / nominal_emittance) # kick_df[err_col(sigma_col(col_action))] = ( # 0.5 * kick_df[err_col(col_action)] / np.sqrt(kick_df[col_action] * nominal_emittance) # ) kick_df[sigma_col(col_action)] = np.sqrt(kick_df[col_action] / emittance) kick_df[err_col(sigma_col(col_action))] = ( 0.5 * kick_df[err_col(col_action)] / np.sqrt(kick_df[col_action] * emittance) ) return kick_df # Plotting --------------------------------------------------------------------- def _plot_intensity(directory, beam, plane, kick_df, intensity_df): """ Plots beam intensity. For losses, the absolute values are used and then normalized to the Intensity before the kicks, to get the percentage relative to that (global) value. """ LOG.debug("Plotting beam intensity") fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(16.80, 7.68)) x_span = (kick_df.index.max() - kick_df.index.min()).seconds * np.array( [0.03, 0.09] ) # defines x-limits # convert to % relative to before first kick idx_before = intensity_df.index.get_loc( kick_df.index.min() - pd.Timedelta(seconds=x_span[0]), method="ffill" ) idx_intensity = intensity_df.columns.get_loc(INTENSITY) # for iloc intensity_start = intensity_df.iloc[idx_before, idx_intensity] norm = intensity_start / 100.0 # plot intensity ax.plot( _date2num(intensity_df.index), intensity_df[INTENSITY] / norm, marker=".", markersize=mpl.rcParams["lines.markersize"] * 0.5, fillstyle="full", color=colors.get_mpl_color(0), label=f"Intensity", ) # plot losses per kick normalized_intensity = kick_df.loc[:, [INTENSITY_BEFORE, INTENSITY_AFTER]] / norm normalized_intensity_error = ( kick_df.loc[:, [err_col(INTENSITY_BEFORE), err_col(INTENSITY_AFTER)]] / norm ) normalized_losses = kick_df.loc[:, [INTENSITY_LOSSES, err_col(INTENSITY_LOSSES)]] / norm normalized_losses_kick = ( kick_df.loc[:, [rel_col(INTENSITY_LOSSES), err_col(rel_col(INTENSITY_LOSSES))]] * 100 ) for idx, kick in enumerate(kick_df.index): ax.errorbar( [_date2num(kick)] * 2, normalized_intensity.loc[kick, :], yerr=normalized_intensity_error.loc[kick, :], color=colors.get_mpl_color(1), marker=".", linestyle="-", label="__nolegend__" if idx > 0 else "Losses", ) ax.text( _date2num(kick), 0.5 * sum(normalized_intensity.loc[kick, :]), " -{:.1f}$\pm${:.1f} %\n".format(*normalized_losses.loc[kick, :]) + " (-{:.1f}$\pm${:.1f} %)".format(*normalized_losses_kick.loc[kick, :]), va="bottom", color=colors.get_mpl_color(1), fontdict=dict(fontsize=mpl.rcParams["font.size"] * 0.8), ) _plot_kicks_and_scale_x(ax, kick_df.index, pad=x_span) ylim = [normalized_intensity.min().min(), normalized_intensity.max().max()] ypad = 0.1 * (ylim[1] - ylim[0]) ax.set_ylim([ylim[0] - ypad, ylim[1] + ypad]) ax.set_ylabel(r"Beam Intensity [%]") annotations.make_top_legend(ax, ncol=3) plt.tight_layout() annotations.set_name(f"Intensity Beam {beam}, Plane {plane}", fig) annotations.set_annotation( f"Intensity at 100%: {intensity_start*1e-10:.3f}" "$\;\cdot\;10^{{10}}$ charges", ax=ax, position="left", ) _save_fig(directory, plane, fig, "intensity") return fig def _plot_emittances(directory, beam, plane, emittance_df, emittance_bws_df, kick_times): LOG.debug("Plotting normalized emittances") fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10.24, 7.68)) col_norm_emittance = column_norm_emittance(plane) bsrt_color = colors.get_mpl_color(0) bws_color = colors.get_mpl_color(1) ax.errorbar( _date2num(emittance_df.index), emittance_df[col_norm_emittance] * 1e6, # Actual BSRT measurement yerr=emittance_df[err_col(col_norm_emittance)] * 1e6, color=bsrt_color, marker="o", markeredgewidth=2, linestyle="None", label=f"From BSRT", ) ax.errorbar( _date2num(emittance_df.index), emittance_df[mean_col(col_norm_emittance)] * 1e6, yerr=emittance_df[err_col(mean_col(col_norm_emittance))] * 1e6, color=colors.change_color_brightness(bsrt_color, 0.7), marker="", label=f"Moving Average (window = {ROLLING_AVERAGE_WINDOW})", ) if emittance_bws_df is not None and len(emittance_bws_df.index): for d in BWS_DIRECTIONS: label = "__nolegend__" if d == BWS_DIRECTIONS[1] else f"From BWS" color = ( bws_color if d == BWS_DIRECTIONS[1] else colors.change_color_brightness(bws_color, 0.5) ) col_bws_nemittance = column_bws_norm_emittance(plane, d) ax.errorbar( _date2num(emittance_bws_df.index), emittance_bws_df[col_bws_nemittance] * 1e6, yerr=emittance_bws_df[err_col(col_bws_nemittance)] * 1e6, linestyle="None", marker="o", color=color, label=label, markersize=mpl.rcParams["lines.markersize"] * 1.5, ) _plot_kicks_and_scale_x(ax, kick_times) ax.set_ylabel(r"$\epsilon_{n}$ $[\mu m]$") annotations.make_top_legend(ax, ncol=2) plt.tight_layout() annotations.set_name(f"Emittance Beam {beam}, Plane {plane}", fig) _save_fig(directory, plane, fig, "emittance") return fig def _plot_da_fit(directory, beam, plane, k_df, fit_type): """ Plot the Forced Dynamic Aperture fit. (I do not like the complexity of this function. jdilly). """ LOG.debug(f"Plotting Dynamic Aperture Fit for {fit_type}") col_action = column_action(plane) col_action_sigma = sigma_col(col_action) col_emittance = column_emittance(plane) col_intensity = rel_col(INTENSITY_LOSSES) kick_df = k_df.copy() kick_df = kick_df.sort_values(by=col_action) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10.24, 7.68)) # Plot Measurement Data intensity = kick_df[col_intensity] intensity_err = kick_df[err_col(col_intensity)] if fit_type == "linear": intensity_err = np.abs(1 / intensity) * intensity_err intensity = np.log(intensity) else: intensity *= 100 intensity_err *= 100 if fit_type == "norm": action = kick_df[col_action_sigma] action_err = kick_df[err_col(col_action_sigma)] action_x = action action_xerr = action_err else: action = kick_df[col_action] action_err = kick_df[err_col(col_action)] action_x = action * 1e6 action_xerr = action_err * 1e6 ax.errorbar( action_x, intensity, xerr=action_xerr, yerr=intensity_err, marker=".", color=colors.get_mpl_color(0), label=f"Kicks", ) # Plot Fit emittance = kick_df[col_emittance] da, da_err = kick_df.headers[header_da(plane)], kick_df.headers[header_da_error(plane)] da_mu, da_err_mu = significant_digits(da * 1e6, da_err * 1e6) da_label = f"Fit: DA$_J$= ${da_mu} \pm {da_err_mu} \mu m$" if fit_type == "linear": fit_fun = fun_linear fit_data = action multiplier = 1 / emittance # DA-J/emittance = -ln(I/Io) elif fit_type == "exponential": fit_fun = fun_exp_decay fit_data = (action, emittance) multiplier = 100 # for percentages elif fit_type == "norm": da, da_err = ( kick_df.headers[header_da(plane, unit="sigma")], kick_df.headers[header_da_error(plane, unit="sigma")], ) da_round, da_err_round = significant_digits(da, da_err) da_label = f"Fit: DA= ${da_round} \pm {da_err_round} N_{{\sigma}}$" fit_fun = fun_exp_sigma fit_data = action multiplier = 100 # for percentages fit_mean = fit_fun(da, fit_data) * multiplier fit_min = fit_fun(da - da_err, fit_data) * multiplier fit_max = fit_fun(da + da_err, fit_data) * multiplier color = colors.get_mpl_color(1) ax.fill_between(action_x, fit_min, fit_max, facecolor=mcolors.to_rgba(color, 0.3)) ax.plot(action_x, fit_mean, ls="--", c=color, label=da_label) # extend fit to 100% losses color_ext = "#7f7f7f" action_max = action.max() emittance_at_max = emittance[action == action_max][0] if fit_type in ["linear", "exponential"]: da_x = da * 2 * 1e6 da_string = "2DA$_J$" elif fit_type == "norm": da_x = da da_string = "DA$_\sigma$" if action_max < da: if fit_type in ["linear", "exponential"]: action_ext = np.linspace(action_max, 2 * da, 10) action_x_ext = action_ext * 1e6 if fit_type == "exponential": fit_data_ext = (action_ext, emittance_at_max) elif fit_type == "linear": fit_data_ext = action_ext multiplier = 1 / emittance_at_max else: action_ext = np.linspace(action_max, da, 10) action_x_ext = action_ext fit_data_ext = action_ext fit_ext = fit_fun(da, fit_data_ext) * multiplier ax.plot( action_x_ext, fit_ext, ls="--", color=mcolors.to_rgba(color_ext, 0.3), label="__nolegend__", ) # DA Marker ax.axvline(da_x, ls="--", color=color_ext, marker="", label="__nolegend__") trans = mtrans.blended_transform_factory(ax.transData, ax.transAxes) # x is data, y is axes ax.text( x=da_x, y=1.0, s=da_string, va="bottom", ha="center", zorder=-1, color=color_ext, transform=trans, ) # Format figure if fit_type == "norm": nominal_emittance = kick_df.headers[header_nominal_emittance(plane)] emittance_mean, emittance_std = emittance.mean(), emittance.std() emittance_sign, emittance_sign_std = significant_digits( emittance_mean * 1e12, emittance_std * 1e12 ) ax.text( x=0, y=1.00, s=( f"$\epsilon_{{mean}}$ = {emittance_sign} $\pm$ {emittance_sign_std} pm " f"($\epsilon_{{nominal}}$ = {nominal_emittance*1e12: .2f} pm)" ), transform=ax.transAxes, va="bottom", ha="left", ) ax.set_xlabel( f"$N_{{\sigma}} = \sqrt{{2J_{{{plane if len(plane) == 1 else ''}}}/\epsilon}}$" ) else: ax.set_xlabel(f"$2J_{{{plane if len(plane) == 1 else ''}}} \; [\mu m]$") if fit_type == "linear": ax.set_ylabel(r"ln($I/I_0$)") else: ax.set_ylabel(r"Beam Losses [%]") ax.set_ylim([0, intensity.max() * (1 + YPAD)]) ax.set_xlim([0, None]) annotations.make_top_legend(ax, ncol=3) plt.tight_layout() annotations.set_name( f"DA {'' if fit_type == 'norm' else 'J'} {fit_type} Fit {beam}, Plane {plane}", fig ) _save_fig(directory, plane, fig, f"dafit_{fit_type}") return fig def _get_fit_plot_data(da, da_err, data, fit_type): fit_fun = {"exponential": fun_exp_decay, "linear": fun_linear}[fit_type] multiplier = 100 # for percentages if fit_type == "linear": multiplier = 1 / data[1] # DA-J/emittance = -ln(I/Io) data = data[0] fit_mean = fit_fun(da, data) * multiplier fit_min = fit_fun(da - da_err, data) * multiplier fit_max = fit_fun(da + da_err, data) * multiplier return fit_mean, fit_min, fit_max # Helper --- def _plot_kicks_and_scale_x(ax, kick_times, pad=20): lines.plot_vertical_lines_fast( ax, kick_times, color="grey", linestyle="--", alpha=0.8, marker="", label="Kicks" ) first_kick, last_kick = kick_times.min(), kick_times.max() try: time_delta = [pd.Timedelta(seconds=pad[i]) for i in range(2)] except TypeError: time_delta = [pd.Timedelta(seconds=pad) for _ in range(2)] # ax.set_xlim([(first_kick - time_delta[0]).timestamp, last_kick + time_delta[1]]) # worked in the past ax.set_xlim([_date2num(first_kick - time_delta[0]), _date2num(last_kick + time_delta[1])]) ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S")) ax.set_xlabel("Time") annotations.set_annotation(f"Date: {first_kick.strftime('%Y-%m-%d')}", ax, "left") def _merge_df_planes(df_dict, planes): """In case planes == 'XY' merge the ``df_dict`` into one dataframe..""" if len(planes) == 1: return df_dict[planes] return pd.merge(*df_dict.values(), how="inner", left_index=True, right_index=True) def _maybe_add_sum_for_planes(df, planes, col_fun, col_err_fun=None): """In case planes == 'XY' add the two plane columns and their errors.""" if len(planes) > 1: if col_err_fun is not None: cols = lambda p: [col_fun(p), col_err_fun(p)] x_cols, y_cols = [cols(p) for p in planes] df = df.reindex(columns=df.columns.to_list() + cols(planes)) df[cols(planes)] = np.array( toolbox.df_sum_with_err( df, a_col=x_cols[0], b_col=y_cols[0], a_err_col=x_cols[1], b_err_col=y_cols[1] ) ).T else: x_col, y_col = [col_fun(p) for p in planes] df[col_fun(planes)] = toolbox.df_sum(df, a_col=x_col, b_col=y_col) return df def _date2num(times): """ Convert CERNDatetime to mpl-number (days). Converts input times to plain-datetime first as date2num causes infinite loop with CernDateTimes in **Python 3.8**. """ try: times = [cdt.datetime for cdt in times] except AttributeError: pass # probably datetime already except TypeError: try: # not iterable times = times.datetime except AttributeError: pass # probably datetime already return mdates.date2num(times) def _save_fig(directory, plane, fig, ptype): try: for ftype in PLOT_FILETYPES: path = os.path.join(directory, outfile_plot(ptype, plane, ftype)) LOG.debug(f"Saving Figure to {path}") fig.savefig(path) except IOError: LOG.error(f"Couldn't create output files for {ptype} plots.") # Script Mode ------------------------------------------------------------------ if __name__ == "__main__": main()
{ "alphanum_fraction": 0.6332804015, "author": null, "avg_line_length": 35.447533009, "converted": null, "ext": "py", "file": null, "hexsha": "e12e962e29477d438462ea394ddccc482d4b42f8", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2017-01-25T11:15:05.000Z", "max_forks_repo_forks_event_min_datetime": "2015-06-26T07:11:37.000Z", "max_forks_repo_head_hexsha": "39add449af78dff534ba58281f472578bde797de", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "pylhc/PyLHC", "max_forks_repo_path": "pylhc/forced_da_analysis.py", "max_issues_count": 52, "max_issues_repo_head_hexsha": "39add449af78dff534ba58281f472578bde797de", "max_issues_repo_issues_event_max_datetime": "2022-02-21T13:11:26.000Z", "max_issues_repo_issues_event_min_datetime": "2019-04-30T23:35:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "pylhc/PyLHC", "max_issues_repo_path": "pylhc/forced_da_analysis.py", "max_line_length": 178, "max_stars_count": 3, "max_stars_repo_head_hexsha": "39add449af78dff534ba58281f472578bde797de", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "pylhc/PyLHC", "max_stars_repo_path": "pylhc/forced_da_analysis.py", "max_stars_repo_stars_event_max_datetime": "2021-04-07T18:11:54.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-10T14:16:56.000Z", "num_tokens": 13177, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 51009 }
import os import scipy.io import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import torch import time import sys sys.path.insert(0, '../prroi_pool') from modules.prroi_pool import PrRoIPool2D def append_params(params, module, prefix): for child in module.children(): for k, p in child.named_parameters(): if p is None: continue if isinstance(child, nn.BatchNorm2d): name = prefix + '_bn_' + k else: name = prefix + '_' + k if name not in params: params[name] = p else: raise RuntimeError("Duplicated param name: %s" % name) class LRN(nn.Module): def __init__(self, local_size=1, alpha=0.0001, beta=0.75, ACROSS_CHANNELS=False): super(LRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if self.ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(2.0).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(2.0).pow(self.beta) x = x.div(div) return x class MDNetRgbtPixelLevelFusion(nn.Module): def __init__(self, model_path=None, K=1): super(MDNetRgbtPixelLevelFusion, self).__init__() self.K = K self.layers = nn.Sequential(OrderedDict([ ('conv1', nn.Sequential(nn.Conv2d(4, 96, kernel_size=7, stride=2), nn.ReLU(), LRN(), nn.MaxPool2d(kernel_size=3, stride=2) )), ('conv2', nn.Sequential(nn.Conv2d(96, 256, kernel_size=5, stride=2, dilation=1), nn.ReLU(), LRN(), )), ('conv3', nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=1, dilation=3), nn.ReLU(), )), ('fc4', nn.Sequential(nn.Linear(512 * 3 * 3, 512), nn.ReLU())), ('fc5', nn.Sequential(nn.Dropout(0.5), nn.Linear(512, 512), nn.ReLU()))])) self.branches = nn.ModuleList([nn.Sequential(nn.Dropout(0.5), nn.Linear(512, 2)) for _ in range(K)]) self.roi_pool_model = PrRoIPool2D(3, 3, 1. / 8) self.receptive_field = 75. # it is receptive fieald that a element of feat_map covers. feat_map is bottom layer of ROI_align_layer if model_path is not None: if os.path.splitext(model_path)[1] == '.pth': self.load_model(model_path) elif os.path.splitext(model_path)[1] == '.mat': self.load_mat_model(model_path) else: raise RuntimeError("Unkown model format: %s" % model_path) self.build_param_dict() def build_param_dict(self): self.params = OrderedDict() for name, module in self.layers.named_children(): append_params(self.params, module, name) for k, module in enumerate(self.branches): append_params(self.params, module, 'fc6_%d' % k) def set_learnable_params(self, layers): for k, p in self.params.items(): if any([k.startswith(l) for l in layers]): p.requires_grad = True else: p.requires_grad = False def get_learnable_params(self): params = OrderedDict() for k, p in self.params.items(): if p.requires_grad: params[k] = p return params def forward(self, x, k=0, in_layer='conv1', out_layer='fc6'): run = False for name, module in self.layers.named_children(): if name == in_layer: run = True if run: x = module(x) if name == out_layer: return x x = self.branches[k](x) if out_layer == 'fc6': return x elif out_layer == 'fc6_softmax': return F.softmax(x) def load_model(self, model_path): states = torch.load(model_path) shared_layers = states['shared_layers'] try: self.layers.load_state_dict(shared_layers) except: self.layers.load_state_dict(shared_layers, strict=False) print('Missing key(s) in state_dict, already set strict=False') print('load .pth model finish......') def load_mat_model(self, matfile): mat = scipy.io.loadmat(matfile) mat_layers = list(mat['layers'])[0] # copy conv weights for i in range(3): weight, bias = mat_layers[i * 4]['weights'].item()[0] if i == 0: self.layers[i][0].weight.data = torch.cat(( torch.from_numpy(np.transpose(weight, (3, 2, 0, 1)))[:, 0, :, :].view( weight.shape[3], 1, weight.shape[0], weight.shape[1]), torch.from_numpy(np.transpose(weight, (3, 2, 0, 1)))), 1) self.layers[i][0].bias.data = torch.from_numpy(bias[:, 0]) else: self.layers[i][0].weight.data = torch.from_numpy(np.transpose(weight, (3, 2, 0, 1))) self.layers[i][0].bias.data = torch.from_numpy(bias[:, 0]) print('load .mat model finish......') def trainSpatialTransform(self, image, bb): return class BinaryLoss(nn.Module): def __init__(self): super(BinaryLoss, self).__init__() def forward(self, pos_score, neg_score): pos_loss = -F.log_softmax(pos_score, dim=1)[:, 1] neg_loss = -F.log_softmax(neg_score, dim=1)[:, 0] loss = (pos_loss.sum() + neg_loss.sum()) / (pos_loss.size(0) + neg_loss.size(0)) return loss class Accuracy: def __call__(self, pos_score, neg_score): pos_correct = (pos_score[:, 1] > pos_score[:, 0]).sum().float() neg_correct = (neg_score[:, 1] < neg_score[:, 0]).sum().float() pos_acc = pos_correct / (pos_score.size(0) + 1e-8) neg_acc = neg_correct / (neg_score.size(0) + 1e-8) return pos_acc.item(), neg_acc.item() class Precision: def __call__(self, pos_score, neg_score): scores = torch.cat((pos_score[:, 1], neg_score[:, 1]), 0) topk = torch.topk(scores, pos_score.size(0))[1] prec = (topk < pos_score.size(0)).float().sum() / (pos_score.size(0) + 1e-8) return prec.item() if __name__ == "__main__": model = MDNetRgbtPixelLevelFusion('/home/pjc/MyProgram/RT-MDNet/models/imagenet-vgg-m.mat') print(model.layers[0][0].weight.shape) print(model.layers[0][0].bias.shape) print(model.layers[1][0].weight.shape) print(model.layers[1][0].bias.shape) print(model.layers[2][0].weight.shape) print(model.layers[2][0].bias.shape) x = torch.rand(1, 4, 107, 107) model.cuda() y = model(x.cuda(), out_layer='conv3') z = model.roi_pool_model(y, torch.tensor([[0, 0, 0, 10, 10], ]).float().cuda()) z = z.view(z.size(0), -1) z = model(z, in_layer='fc4', out_layer='fc6')
{ "alphanum_fraction": 0.5364779874, "author": null, "avg_line_length": 36.1363636364, "converted": null, "ext": "py", "file": null, "hexsha": "71dc0de1f9b4e56ed28314886c45b7d5949d386e", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "49e83501f81515aebca211351e315896da7afc54", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "PengJingchao/DFNet", "max_forks_repo_path": "modules/backbone/rtmdnet_RGBT_pixel_level_fusion.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "49e83501f81515aebca211351e315896da7afc54", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "PengJingchao/DFNet", "max_issues_repo_path": "modules/backbone/rtmdnet_RGBT_pixel_level_fusion.py", "max_line_length": 139, "max_stars_count": null, "max_stars_repo_head_hexsha": "49e83501f81515aebca211351e315896da7afc54", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "PengJingchao/DFNet", "max_stars_repo_path": "modules/backbone/rtmdnet_RGBT_pixel_level_fusion.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1953, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 7950 }
import functools import pickle import random from baselines.common.vec_env import VecEnvWrapper import gym import numpy as np import os.path as osp import tensorflow as tf from rllab.envs.base import Env, EnvSpec import rllab.misc.logger as rl_logger from sandbox.rocky.tf.envs.base import TfEnv, to_tf_space from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline from rllab.envs.gym_env import convert_gym_space from rllab.core.serializable import Serializable from rllab.spaces import Box from sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian from sandbox.rocky.tf.policies.base import StochasticPolicy from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy from airl.algos.irl_trpo import IRLTRPO from airl.models.airl_state import AIRL as AIRLStateOnly from airl.models.imitation_learning import AIRLStateAction from airl.utils.log_utils import rllab_logdir from pirl.agents.sample import SampleVecMonitor from pirl.utils import sanitize_env_name class VecInfo(VecEnvWrapper): def reset(self): return self.venv.reset() def step_wait(self): obs, rewards, dones, env_infos = self.venv.step_wait() #SOMEDAY: handle env_infos with different keys #The problem is bench.Monitor adds an episode key only when an episode #ends. stack_tensor_dict_list assumes constant keys, so this breaks #when some but not all envirnoments are done. #env_infos is only used for some debugging code, so just removing this. #env_infos = tensor_utils.stack_tensor_dict_list(env_infos) return obs, rewards, dones, {} def terminate(self): # Normally we'd close environments, but pirl.experiments handles this pass class VecGymEnv(Env): def __init__(self, venv): self.venv = venv self._observation_space = convert_gym_space(venv.observation_space) self._action_space = convert_gym_space(venv.action_space) @property def observation_space(self): return self._observation_space @property def action_space(self): return self._action_space def terminate(self): # Normally we'd close environments, but pirl.experiments handles this. pass @property def vectorized(self): return True def vec_env_executor(self, n_envs, max_path_length): # SOMEDAY: make these parameters have an effect? # We're powerless as the environments have already been created. # But I'm not too bothered by this, as we can tweak them elsewhere. return VecInfo(self.venv) class GaussianPolicy(StochasticPolicy, Serializable): def __init__(self, env_spec, name=None, mean=0.0, log_std=1.0): with tf.variable_scope(name): assert isinstance(env_spec.action_space, Box) Serializable.quick_init(self, locals()) self.action_dim = env_spec.action_space.flat_dim self._dist = DiagonalGaussian(self.action_dim) self.mean = mean * np.ones(self.action_dim) self.log_std = log_std * np.ones(self.action_dim) self.mean_tf = tf.constant(self.mean, dtype=tf.float32) self.log_std_tf = tf.constant(self.log_std, dtype=tf.float32) self.dummy_var = tf.get_variable(name='dummy', shape=self.action_dim) super(GaussianPolicy, self).__init__(env_spec=env_spec) @property def vectorized(self): return True def get_action(self, observation): rnd = np.random.normal(size=(self.action_dim, )) action = self.mean + np.exp(self.log_std) * rnd info = dict(mean=self.mean, log_std=self.log_std) return action, info def get_actions(self, observations): n = len(observations) shape = (n, self.action_dim) mean = np.broadcast_to(self.mean, shape) log_std = np.broadcast_to(self.log_std, shape) rnd = np.random.normal(size=shape) action = mean + np.exp(log_std) * rnd info = dict(mean=mean, log_std=log_std) return action, info @property def distribution(self): return self._dist def dist_info_sym(self, obs_var, state_info_vars): return dict(mean=self.mean_tf, log_std=self.log_std_tf) def dist_info(self, obs, state_infos): return dict(mean=self.mean, log_std=self.log_std) def get_params_internal(self, **tags): # Fake it as RLLab gets confused if we have no variables return [self.dummy_var] def _convert_trajectories(trajs): '''Convert trajectories from format used in PIRL to that expected in AIRL. Args: - trajs: trajectories in AIRL format. That is, a list of 2-tuples (obs, actions), where obs and actions are equal-length lists containing observations and actions. Returns: trajectories in AIRL format. A list of dictionaries, containing keys 'observations' and 'actions', with values that are equal-length numpy arrays.''' return [{'observations': np.array(obs), 'actions': np.array(actions)} for obs, actions in trajs] def irl(venv, trajectories, discount, seed, log_dir, *, tf_cfg, model_cfg=None, policy_cfg=None, training_cfg={}): envs = VecGymEnv(venv) envs = TfEnv(envs) experts = _convert_trajectories(trajectories) train_graph = tf.Graph() with train_graph.as_default(): tf.set_random_seed(seed) if model_cfg is None: model_cfg = {'model': AIRLStateOnly, 'state_only': True, 'max_itrs': 10} model_kwargs = dict(model_cfg) model_cls = model_kwargs.pop('model') irl_model = model_cls(env_spec=envs.spec, expert_trajs=experts, **model_kwargs) if policy_cfg is None: policy_cfg = {'policy': GaussianMLPPolicy, 'hidden_sizes': (32, 32)} else: policy_cfg = dict(policy_cfg) policy_fn = policy_cfg.pop('policy') policy = policy_fn(name='policy', env_spec=envs.spec, **policy_cfg) training_kwargs = { 'n_itr': 1000, 'batch_size': 10000, 'max_path_length': 500, 'irl_model_wt': 1.0, 'entropy_weight': 0.1, # paths substantially increase storage requirements 'store_paths': False, } training_kwargs.update(training_cfg) algo = IRLTRPO( env=envs, policy=policy, irl_model=irl_model, discount=discount, sampler_args=dict(n_envs=venv.num_envs), zero_environment_reward=True, baseline=LinearFeatureBaseline(env_spec=envs.spec), **training_kwargs ) with rllab_logdir(algo=algo, dirname=log_dir): with tf.Session(config=tf_cfg): algo.train() reward_params = irl_model.get_params() # Side-effect: forces policy to cache all parameters. # This ensures they are saved/restored during pickling. policy.get_params() # Must pickle policy rather than returning it directly, # since parameters in policy will not survive across tf sessions. policy_pkl = pickle.dumps(policy) reward = model_cfg, reward_params return reward, policy_pkl def metalearn(venvs, trajectories, discount, seed, log_dir, *, tf_cfg, outer_itr=1000, lr=1e-2, model_cfg=None, policy_cfg=None, training_cfg={}, policy_per_task=False): envs = {k: TfEnv(VecGymEnv(v)) for k, v in venvs.items()} env_spec = list(envs.values())[0].spec num_envs = list(venvs.values())[0].num_envs tasks = list(envs.keys()) experts = {k: _convert_trajectories(v) for k, v in trajectories.items()} train_graph = tf.Graph() with train_graph.as_default(): tf.set_random_seed(seed) if model_cfg is None: model_cfg = {'model': AIRLStateOnly, 'state_only': True, 'max_itrs': 10} model_kwargs = dict(model_cfg) model_cls = model_kwargs.pop('model') irl_model = model_cls(env_spec=env_spec, **model_kwargs) if policy_cfg is None: policy_cfg = {'policy': GaussianMLPPolicy, 'hidden_sizes': (32, 32)} else: policy_cfg = dict(policy_cfg) policy_fn = policy_cfg.pop('policy') policy = policy_fn(name='policy', env_spec=env_spec, **policy_cfg) pol_params = {} training_kwargs = { 'n_itr': 10, 'batch_size': 10000, 'max_path_length': 500, 'irl_model_wt': 1.0, 'entropy_weight': 0.1, # paths substantially increase storage requirements 'store_paths': False, } training_kwargs.update(training_cfg) algos = {k: IRLTRPO(env=env, policy=policy, irl_model=irl_model, discount=discount, sampler_args=dict(n_envs=num_envs), zero_environment_reward=True, baseline=LinearFeatureBaseline(env_spec=env_spec), **training_kwargs) for k, env in envs.items()} with tf.Session(config=tf_cfg) as sess: sess.run(tf.global_variables_initializer()) meta_reward_params = irl_model.get_params() for i in range(outer_itr): task = random.choice(tasks) pol_task = task if policy_per_task else None itr_logdir = osp.join(log_dir, '{}_{}'.format(i, sanitize_env_name(task))) with rllab_logdir(algo=algos[task], dirname=itr_logdir): with rl_logger.prefix('outer itr {} | task {}'.format(i, task)): irl_model.set_demos(experts[task]) # TODO: rather than specifying these as initializers, # might be more efficient to have AIRL not overwrite # these variables each call to train()? algos[task].init_irl_params = meta_reward_params algos[task].init_pol_params = pol_params.get(pol_task) algos[task].train() # Meta-update reward # {meta,task}_reward_params are lists of NumPy arrays task_reward_params = irl_model.get_params() assert len(task_reward_params) == len(meta_reward_params) for i in range(len(task_reward_params)): meta, task = meta_reward_params[i], task_reward_params[i] # Reptile update: meta <- meta + lr * (task - meta) #TODO: use Adam optimizer? meta_reward_params[i] = (1 - lr) * meta + lr * task # Store policy update (joint if not policy_per_task) pol_params[pol_task] = policy.get_param_values() reward = model_kwargs, meta_reward_params return reward def finetune(metainit, venv, trajectories, discount, seed, log_dir, *, tf_cfg, pol_itr=100, irl_itr=100, model_cfg=None, policy_cfg=None, training_cfg={}): envs = VecGymEnv(venv) envs = TfEnv(envs) experts = _convert_trajectories(trajectories) train_graph = tf.Graph() with train_graph.as_default(): tf.set_random_seed(seed) if model_cfg is None: model_cfg = {'model': AIRLStateOnly, 'state_only': True, 'max_itrs': 10} model_kwargs = dict(model_cfg) model_cls = model_kwargs.pop('model') irl_model = model_cls(env_spec=envs.spec, expert_trajs=experts, **model_kwargs) if policy_cfg is None: policy_cfg = {'policy': GaussianMLPPolicy, 'hidden_sizes': (32, 32)} else: policy_cfg = dict(policy_cfg) policy_fn = policy_cfg.pop('policy') policy = policy_fn(name='policy', env_spec=envs.spec, **policy_cfg) training_kwargs = { 'batch_size': 10000, 'max_path_length': 500, 'irl_model_wt': 1.0, 'entropy_weight': 0.1, # paths substantially increase storage requirements 'store_paths': False, } training_kwargs.update(training_cfg) _kwargs, reward_params = metainit algo = IRLTRPO( env=envs, policy=policy, irl_model=irl_model, discount=discount, sampler_args=dict(n_envs=venv.num_envs), zero_environment_reward=True, baseline=LinearFeatureBaseline(env_spec=envs.spec), init_irl_params=reward_params, train_irl=False, n_itr=pol_itr, **training_kwargs ) with tf.Session(config=tf_cfg): # First round: just optimize the policy, do not update IRL model with rllab_logdir(algo=algo, dirname=osp.join(log_dir, 'pol')): with rl_logger.prefix('finetune policy |'): algo.train() pol_params = policy.get_param_values() # Second round: we have a good policy (generator), update IRL with rllab_logdir(algo=algo, dirname=osp.join(log_dir, 'all')): with rl_logger.prefix('finetune all |'): algo.train_irl = True algo.init_pol_params = pol_params algo.n_itr = irl_itr algo.train() reward_params = irl_model.get_params() # Side-effect: forces policy to cache all parameters. # This ensures they are saved/restored during pickling. policy.get_params() # Must pickle policy rather than returning it directly, # since parameters in policy will not survive across tf sessions. policy_pkl = pickle.dumps(policy) reward = model_cfg, reward_params return reward, policy_pkl def sample(venv, policy_pkl, num_episodes, seed, tf_cfg): venv = SampleVecMonitor(venv) infer_graph = tf.Graph() with infer_graph.as_default(): tf.set_random_seed(seed) with tf.Session(config=tf_cfg): policy = pickle.loads(policy_pkl) completed = 0 obs = venv.reset() while completed < num_episodes: a, _info = policy.get_actions(obs) obs, _r, dones, _info = venv.step(a) completed += np.sum(dones) return venv.trajectories def _setup_model(env, new_reward, tf_cfg): env_spec = EnvSpec( observation_space=to_tf_space(convert_gym_space(env.observation_space)), action_space=to_tf_space(convert_gym_space(env.action_space))) model_cfg, reward_params = new_reward infer_graph = tf.Graph() with infer_graph.as_default(): model_kwargs = dict(model_cfg) model_cls = model_kwargs.pop('model') irl_model = model_cls(env_spec=env_spec, expert_trajs=None, **model_kwargs) if model_cls == AIRLStateOnly: reward_var = irl_model.reward elif model_cls == AIRLStateAction: reward_var = irl_model.energy else: assert False, "Unsupported model type" sess = tf.Session(config=tf_cfg) with sess.as_default(): irl_model.set_params(reward_params) return sess, irl_model, reward_var class AIRLRewardWrapper(gym.Wrapper): """Wrapper for a Env, using a reward network.""" def __init__(self, env, new_reward, tf_cfg): self.sess, self.irl_model, self.reward_var = _setup_model(env, new_reward, tf_cfg) super().__init__(env) def step(self, action): obs, old_reward, done, info = self.env.step(action) feed_dict = {self.irl_model.act_t: np.array([action]), self.irl_model.obs_t: np.array([obs])} new_reward = self.sess.run(self.reward_var, feed_dict=feed_dict) return obs, new_reward[0][0], done, info def reset(self, **kwargs): return self.env.reset(**kwargs) def close(self): self.sess.close() self.env.close() class AIRLVecRewardWrapper(VecEnvWrapper): """Wrapper for a VecEnv, using a reward network.""" def __init__(self, venv, new_reward, tf_cfg): self.sess, self.irl_model, self.reward_var = _setup_model(venv, new_reward, tf_cfg) super().__init__(venv) def step_async(self, actions): self.last_actions = actions self.venv.step_async(actions) def step_wait(self): obs, _old_rewards, dones, info = self.venv.step_wait() feed_dict = {self.irl_model.act_t: np.array(self.last_actions), self.irl_model.obs_t: np.array(obs)} new_reward = self.sess.run(self.reward_var, feed_dict=feed_dict) return obs, new_reward.flat, dones, info def reset(self): return self.venv.reset() def close(self): self.sess.close() self.venv.close() def airl_reward_wrapper(env, new_reward, tf_cfg): cls = AIRLVecRewardWrapper if hasattr(env, 'num_envs') else AIRLRewardWrapper return cls(env, new_reward, tf_cfg)
{ "alphanum_fraction": 0.6176670827, "author": null, "avg_line_length": 38.0691144708, "converted": null, "ext": "py", "file": null, "hexsha": "447354c5e894cea53e3b76fd46177f39196e18f2", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-04-01T09:39:04.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-20T01:09:41.000Z", "max_forks_repo_head_hexsha": "c0881829adb750a9e43e90ce632851eed3e3a5e5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "HumanCompatibleAI/population-irl", "max_forks_repo_path": "pirl/irl/airl.py", "max_issues_count": 9, "max_issues_repo_head_hexsha": "c0881829adb750a9e43e90ce632851eed3e3a5e5", "max_issues_repo_issues_event_max_datetime": "2022-01-17T02:39:35.000Z", "max_issues_repo_issues_event_min_datetime": "2018-04-22T22:05:22.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "HumanCompatibleAI/population-irl", "max_issues_repo_path": "pirl/irl/airl.py", "max_line_length": 111, "max_stars_count": 18, "max_stars_repo_head_hexsha": "c0881829adb750a9e43e90ce632851eed3e3a5e5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "HumanCompatibleAI/population-irl", "max_stars_repo_path": "pirl/irl/airl.py", "max_stars_repo_stars_event_max_datetime": "2022-02-25T11:45:31.000Z", "max_stars_repo_stars_event_min_datetime": "2018-07-26T05:36:24.000Z", "num_tokens": 3954, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 17626 }
[STATEMENT] lemma scene_union_foldr_remove_element: assumes "set xs \<subseteq> set Vars" shows "a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: set xs \<subseteq> set Vars goal (1 subgoal): 1. a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) [PROOF STEP] proof (induct xs) [PROOF STATE] proof (state) goal (2 subgoals): 1. set [] \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S [] = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a []) 2. \<And>aa xs. \<lbrakk>set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); set (aa # xs) \<subseteq> set Vars\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S (aa # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (aa # xs)) [PROOF STEP] case Nil [PROOF STATE] proof (state) this: set [] \<subseteq> set Vars goal (2 subgoals): 1. set [] \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S [] = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a []) 2. \<And>aa xs. \<lbrakk>set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); set (aa # xs) \<subseteq> set Vars\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S (aa # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (aa # xs)) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: set [] \<subseteq> set Vars [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: set [] \<subseteq> set Vars goal (1 subgoal): 1. a \<squnion>\<^sub>S \<Squnion>\<^sub>S [] = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a []) [PROOF STEP] by simp [PROOF STATE] proof (state) this: a \<squnion>\<^sub>S \<Squnion>\<^sub>S [] = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a []) goal (1 subgoal): 1. \<And>aa xs. \<lbrakk>set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); set (aa # xs) \<subseteq> set Vars\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S (aa # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (aa # xs)) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>aa xs. \<lbrakk>set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); set (aa # xs) \<subseteq> set Vars\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S (aa # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (aa # xs)) [PROOF STEP] case (Cons a xs) [PROOF STATE] proof (state) this: set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) set (a # xs) \<subseteq> set Vars goal (1 subgoal): 1. \<And>aa xs. \<lbrakk>set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); set (aa # xs) \<subseteq> set Vars\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S (aa # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (aa # xs)) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) set (a # xs) \<subseteq> set Vars [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: set xs \<subseteq> set Vars \<Longrightarrow> a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) set (a # xs) \<subseteq> set Vars goal (1 subgoal): 1. a \<squnion>\<^sub>S \<Squnion>\<^sub>S (a # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (a # xs)) [PROOF STEP] apply auto [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); a \<in> set Vars; set xs \<subseteq> set Vars; a = a\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S (a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs)) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs) 2. \<lbrakk>a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); a \<in> set Vars; set xs \<subseteq> set Vars; a \<noteq> a\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S (a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs) = a \<squnion>\<^sub>S (a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs)) [PROOF STEP] apply (metis order_trans scene_space.Vars_scene_space scene_space_foldr scene_space_union_assoc scene_union_idem set_Vars_scene_space) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs); a \<in> set Vars; set xs \<subseteq> set Vars; a \<noteq> a\<rbrakk> \<Longrightarrow> a \<squnion>\<^sub>S (a \<squnion>\<^sub>S \<Squnion>\<^sub>S xs) = a \<squnion>\<^sub>S (a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a xs)) [PROOF STEP] apply (smt (verit, best) Diff_subset dual_order.trans removeAll_id scene_space_foldr scene_space_union_assoc scene_union_commute set_Vars_scene_space set_removeAll subset_iff) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done [PROOF STATE] proof (state) this: a \<squnion>\<^sub>S \<Squnion>\<^sub>S (a # xs) = a \<squnion>\<^sub>S \<Squnion>\<^sub>S (removeAll a (a # xs)) goal: No subgoals! [PROOF STEP] qed
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Optics_Scene_Spaces", "hexsha": null, "include": null, "lang": null, "length": 15, "llama_tokens": 2475, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import torch as tc import torch.nn as nn import torch.nn.functional as F from transformers import BertModel , BertTokenizer import pdb import math def loss_1(pred , anss , ents , no_rel , class_weight , pad_ix = -100): ''' 直接平均,按类别加权 and unweighted avg ''' import numpy as np bs , ne , _ , d = pred.size() if no_rel < 0: no_rel = pad_ix #ignore index num = 0 rel_map2 = np.zeros((bs, ne, ne))+no_rel _ = [[rel_map2.itemset((i,u,v),t) for u,v,t in b] for i,b in enumerate(anss)] rel_map2 = tc.LongTensor(rel_map2).to(pred.device) for _b in range(bs): tmp = rel_map2[_b] * 0 + pad_ix t_tmp = rel_map2[_b][:len(ents[_b]) , :len(ents[_b])] #t_tmp = tc.tril(rel_map2[_b][:len(ents[_b]) , :len(ents[_b])] , diagonal = -1) #t_tmp = t_tmp - tc.triu( tc.ones(t_tmp.size() , device = t_tmp.device) ) * 100 tmp[:len(ents[_b]) , :len(ents[_b])] = t_tmp rel_map2[_b] = tmp num += len(ents[_b]) * len(ents[_b]) #assert num == (rel_map2!=-100).long().sum() #----- style 1 ----- loss_f = F.cross_entropy( pred.view(-1, pred.size(-1)), rel_map2.view(-1), weight=tc.FloatTensor(class_weight).to(pred), ignore_index=pad_ix , reduction = "sum") loss_f = loss_f / num assert float(loss_f) == float(loss_f) return loss_f def loss_2(pred , anss , ents , no_rel , class_weight , pad_ix = -100): ''' 直接平均,按类别加权 and weighted avg ''' import numpy as np bs , ne , _ , d = pred.size() if no_rel < 0: no_rel = pad_ix #ignore index num = 0 rel_map2 = np.zeros((bs, ne, ne))+no_rel _ = [[rel_map2.itemset((i,u,v),t) for u,v,t in b] for i,b in enumerate(anss)] rel_map2 = tc.LongTensor(rel_map2).to(pred.device) for _b in range(bs): tmp = rel_map2[_b] * 0 + pad_ix t_tmp = rel_map2[_b][:len(ents[_b]) , :len(ents[_b])] #t_tmp = tc.tril(rel_map2[_b][:len(ents[_b]) , :len(ents[_b])] , diagonal = -1) #t_tmp = t_tmp - tc.triu( tc.ones(t_tmp.size() , device = t_tmp.device) ) * 100 tmp[:len(ents[_b]) , :len(ents[_b])] = t_tmp rel_map2[_b] = tmp num += len(ents[_b]) * len(ents[_b]) loss_f = F.cross_entropy( pred.view(-1, pred.size(-1)), rel_map2.view(-1), weight=tc.FloatTensor(class_weight).to(pred), ignore_index=pad_ix) assert float(loss_f) == float(loss_f) return loss_f def get_loss_func(name): return { "loss_1" : loss_1 , "loss_2" : loss_2 , }[name]
{ "alphanum_fraction": 0.6378865979, "author": null, "avg_line_length": 26.7586206897, "converted": null, "ext": "py", "file": null, "hexsha": "efa88fa24b2ca0d4e8b1227f03037e819e86c2d3", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-08-10T01:04:16.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-18T16:47:31.000Z", "max_forks_repo_head_hexsha": "a099e98f3708a39debeed4dc522ff57c4f6b960d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "FFTYYY/RoR_relation_extraction", "max_forks_repo_path": "loss/losses.py", "max_issues_count": 7, "max_issues_repo_head_hexsha": "a099e98f3708a39debeed4dc522ff57c4f6b960d", "max_issues_repo_issues_event_max_datetime": "2021-08-04T08:39:10.000Z", "max_issues_repo_issues_event_min_datetime": "2020-06-21T08:32:26.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "FFTYYY/RoR_relation_extraction", "max_issues_repo_path": "loss/losses.py", "max_line_length": 88, "max_stars_count": 25, "max_stars_repo_head_hexsha": "a099e98f3708a39debeed4dc522ff57c4f6b960d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "FFTYYY/RoR_relation_extraction", "max_stars_repo_path": "loss/losses.py", "max_stars_repo_stars_event_max_datetime": "2021-12-22T10:47:18.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-09T01:25:14.000Z", "num_tokens": 845, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2328 }
import numpy as np import itertools from .displacements import Displacements from .kvectors import Kvectors class Lattice(): """Class to generate the lattice.""" __vecsLattice = np.array([], dtype=np.float) __vecsBasis = np.array([], dtype=np.float) __idxBasis = np.array([]) __idxSub = np.array([]) __vecsReciprocal = np.array([], dtype=np.float) __posBrillouinZone = np.array([], dtype=np.float) __posBrillouinPath = np.array([], dtype=np.float) __specialPoints = { } __tol = 1e-16 def __init__(self): self.initialize() def initialize(self): pass @property def vecsReciprocal(self): return self.__vecsReciprocal @property def vecsBasis(self): pass #TODO @vecsBasis.setter def vecsBasis(self, value): pass #TODO def getSpecialPoints(self, reciprocalBasis = False): """Return the list of userdefined and automatically generated special points that can be used to describe a path through the Brillouin zone ( e.g. 'G' stands for automatically generated gamma point).""" userdefinedSpecialPoints = self.__specialPoints.copy() automaticSpecialPoints = { } # === standardize the lattice vectors === if self.getDimensionality() >= 1: vec1 = self.__vecsReciprocal[0] if np.vdot(vec1,[1,0]) < 0: vec1 *= -1 if self.getDimensionality() >= 2: vec2 = self.__vecsReciprocal[1] if np.vdot(vec1,vec2) < 0: vec2 *= -1 if np.arctan2(vec1[1],vec1[0]) < np.arctan2(vec2[1],vec2[0]): vec1, vec2 = vec2, vec1 # === calculate special points === # --- special points for 0D and higher dimensions --- automaticSpecialPoints['G'] = [0, 0] # --- special points for 1D and higher dimensions --- if self.getDimensionality() >= 1: automaticSpecialPoints['X'] = vec1/2 automaticSpecialPoints['-X'] = -automaticSpecialPoints['X'] # --- special points for 2D --- if self.getDimensionality() >= 2: automaticSpecialPoints['Y'] = vec2/2 automaticSpecialPoints['-Y'] = -automaticSpecialPoints['Y'] automaticSpecialPoints['Z'] = (vec2-vec1)/2 automaticSpecialPoints['-Z'] = -automaticSpecialPoints['Z'] automaticSpecialPoints['A'] = self._calcCircumcenter(2*automaticSpecialPoints['X'],2*automaticSpecialPoints['Y']) automaticSpecialPoints['-A'] = -automaticSpecialPoints['A'] automaticSpecialPoints['B'] = self._calcCircumcenter(2*automaticSpecialPoints['Y'],2*automaticSpecialPoints['Z']) automaticSpecialPoints['-B'] = -automaticSpecialPoints['B'] automaticSpecialPoints['C'] = self._calcCircumcenter(2*automaticSpecialPoints['Z'],2*automaticSpecialPoints['-X']) automaticSpecialPoints['-C'] = -automaticSpecialPoints['C'] # === explicit lattice vector dependency? === if self.getDimensionality() != 0: if self.getDimensionality() == 1: normal = self.__vecsReciprocal[0].copy()[::-1] normal[1] *= -1 trafo = np.array([self.__vecsReciprocal[0],normal]).T if self.getDimensionality() == 2: trafo = np.array([self.__vecsReciprocal[0],self.__vecsReciprocal[1]]).T # get rid of the explicit lattice vector dependency if reciprocalBasis: trafo = np.linalg.inv(trafo) for k in iter(automaticSpecialPoints.keys()): automaticSpecialPoints[k] = np.dot(trafo,automaticSpecialPoints[k]) # introduce the explicit lattice vector dependency if not reciprocalBasis: for k in iter(userdefinedSpecialPoints.keys()): userdefinedSpecialPoints[k] = np.dot(trafo,userdefinedSpecialPoints[k]) for k in iter(userdefinedSpecialPoints.keys()): automaticSpecialPoints[k] = userdefinedSpecialPoints[k] return automaticSpecialPoints def addSpecialPoint(self,label,pos): """Add a special point.""" self.__specialPoints[label] = pos def addLatticevector(self,vector): """Add a lattice vector and calculate the reciprocal vectors.""" # === add lattice vector === # validation of vector shape vector = np.array(vector) if vector.shape != (2,): raise Exception("Lattice vectors have to be 2D vectors.") # append vector to the array of lattice vectors if self.__vecsLattice.shape[0] == 0: self.__vecsLattice = np.array([vector], dtype=np.float) else: self.__vecsLattice = np.append(self.__vecsLattice,[vector], axis=0) # validation of lattice vector number if self.__vecsLattice.shape[0] > 2: raise Exception("There must be at most 2 lattice vectors.") self.__vecsReciprocal = self.getReciprocalVectors() def addBasisvector(self,vector): """Add a basis vector.""" # === add basis vector === # validation of vector shape vector = np.array(vector) if vector.shape != (2,): raise Exception("Basis vectors have to be 2D vectors.") # append vector to the array of lattice vectors if self.__vecsBasis.shape[0] == 0: self.__vecsBasis = np.array([vector], dtype=np.float) self.__idxBasis = np.array([0]) self.__idxSub = np.array([0]) else: self.__vecsBasis = np.append(self.__vecsBasis,[vector], axis=0) self.__idxBasis = np.append(self.__idxBasis,self.__idxBasis[-1]+1) self.__idxSub = np.append(self.__idxSub,self.__idxSub[-1]+1) def _calcCircumcenter(self,vectorB, vectorC): """See http://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates.""" D = 2*(vectorC[1]*vectorB[0]-vectorB[1]*vectorC[0]) x = (vectorC[1]*np.vdot(vectorB,vectorB)-vectorB[1]*np.vdot(vectorC,vectorC))/D y = (vectorB[0]*np.vdot(vectorC,vectorC)-vectorC[0]*np.vdot(vectorB,vectorB))/D return np.array([x,y]) def getKvectorsZone(self, resolution, dilation = True): """Calculate a matrix that contains all the kvectors of the Brillouin zone. kvectors = getKvectorsZone(resolution, dilation = True) kvectors[idxX, idxY, idxCoordinate]""" if self.__vecsReciprocal.shape[0] == 0: raise Exception("The 0D Brillouin zone is just a point. Use kvecs=None in System.solve instead.") elif self.__vecsReciprocal.shape[0] == 1: # === 1D Brillouin zone === pos = self.__vecsReciprocal[0]/2 positions = np.transpose([np.linspace(-pos[0],pos[0],resolution,endpoint=False), np.linspace(-pos[1],pos[1],resolution,endpoint=False)]) step = positions[1]-positions[0] positions = np.array([positions[0]-step]+positions.tolist()+[positions[-1]+step]) positionsMask = np.ones(positions.shape[:-1],dtype=np.bool) positionsMask[1:-1] = False positions = Kvectors(positions, mask = positionsMask) else: # === 2D Brillouin zone === # reciprocal positions (contains the boundaries of the desired BZ) matTrafo = np.array([self.__vecsReciprocal[0], self.__vecsReciprocal[1]]).T reciprocalpositions = np.empty((3*3,2)) for n,[x,y] in enumerate(itertools.product([0,-1,1],[0,-1,1])): reciprocalpositions[n] = np.dot(matTrafo, [x,y]) # calculate "radius" of the BZ (the resulting BZ will be too large; the areas which ware not relevant for the desired BZ will be masked) radius = np.max(np.sqrt(np.sum(self.__vecsReciprocal**2,axis=-1))) # generate a matrix [IdxX, IdxY, Coord] that stores the positions inside the too large BZ positions=np.mgrid[-radius:radius:2j*resolution, -radius:radius:2j*resolution,].transpose(1,2,0) # calculate the distances of the matrix points from the reciprocal positions of the desired BZ distances = np.tile(positions, (reciprocalpositions.shape[0],1,1,1)) distances -= reciprocalpositions[:,None,None,:] distances = np.sqrt(np.sum(distances**2,axis=-1)) # --- mask all points that are not close to the central position --- positionsMask = np.argmin(distances,axis=0) > 0 # slice the matrices si, se = np.where(~positionsMask) slice = np.s_[si.min()-1:si.max() + 2, se.min()-1:se.max() + 2] # TODO why not "si.min():si.max() + 1, se.min():se.max() + 1"? positions = Kvectors(positions[slice], mask = positionsMask[slice]) return positions def getKvectorsBox(self, resolution): if self.__vecsReciprocal.shape[0] == 0: # === 0D Brillouin box === positions = Kvectors([[[0,0]]]) elif self.__vecsReciprocal.shape[0] == 1: # === 1D Brillouin box === l1 = np.linalg.norm(self.__vecsReciprocal[0]) x,step = np.linspace(0, l1, resolution,endpoint=False,retstep=True) x = np.array([x[0]-step]+x.tolist()+[x[-1]+step]) y = np.zeros_like(x) positions=np.transpose([x,y],(1,0)) a = -np.arctan2(self.__vecsReciprocal[0,1],self.__vecsReciprocal[0,0]) matRotate = np.array([[np.cos(a),np.sin(a)],[-np.sin(a),np.cos(a)]]).T positions = np.dot(positions,matRotate) positionsMask = np.ones(positions.shape[:-1],dtype=np.bool) positionsMask[1:-1] = False positions = Kvectors(positions, mask = positionsMask) else: # === 2D Brillouin box === l1 = np.linalg.norm(self.__vecsReciprocal[0]) l2 = np.linalg.norm(self.__vecsReciprocal[1]) angle = np.abs(np.arccos(np.dot(self.__vecsReciprocal[0],self.__vecsReciprocal[1])/(l1*l2))) l2*=np.sin(angle) x,step = np.linspace(0, l1, resolution,endpoint=False,retstep=True) x = np.array([x[0]-step]+x.tolist()+[x[-1]+step]) y,step = np.linspace(0, l2, resolution,endpoint=False,retstep=True) y = np.array([y[0]-step]+y.tolist()+[y[-1]+step]) positions=np.transpose(np.meshgrid(x, y),(2,1,0)) a = -np.arctan2(self.__vecsReciprocal[0,1],self.__vecsReciprocal[0,0]) matRotate = np.array([[np.cos(a),np.sin(a)],[-np.sin(a),np.cos(a)]]).T positions = np.dot(positions,matRotate) positionsMask = np.ones(positions.shape[:-1],dtype=np.bool) positionsMask[1:-1,1:-1] = False positions = Kvectors(positions, mask = positionsMask) return positions def getKvectorsRhomboid(self, resolution): if self.__vecsReciprocal.shape[0] == 0: # 0d BZ positions = Kvectors([[[0,0]]]) elif self.__vecsReciprocal.shape[0] == 1: # 1d BZ positions = self.__vecsReciprocal[0][None,:]*np.linspace(0, 1, resolution,endpoint=False)[:,None] positions = Kvectors(positions) else: # 2d BZ positions1 = self.__vecsReciprocal[0][None,:]*np.linspace(0, 1, resolution,endpoint=False)[:,None] positions2 = self.__vecsReciprocal[1][None,:]*np.linspace(0, 1, resolution,endpoint=False)[:,None] positions = positions2[:,None,:]+positions1[None,:,:] positions = Kvectors(positions) return positions def getKvectorsPath(self, resolution, pointlabels=None, points=None): """Calculate an array that contains the kvectors of a path through the Brillouin zone kvectors, length = getKvectorsPath(resolution, pointlabels=["G","X"]) kvectors[idxPosition, idxCoordinate]""" if pointlabels is not None: specialPoints = self.getSpecialPoints() points = np.array([specialPoints[p] for p in pointlabels]) elif points is not None: points = np.array(points) else: points = np.array(["G","G"]) numPoints = points.shape[0] # path through the points stepsize = np.sum(np.sqrt(np.sum(np.diff(points,axis=0)**2,axis=-1)))/resolution positions = [None]*(numPoints-1) for n in range(1,numPoints): start = points[n-1] end = points[n] if stepsize == 0: steps = 1 else: steps = max(int(np.round(np.linalg.norm(end-start)/stepsize)),1) newpos = np.transpose([np.linspace(start[0],end[0],steps,endpoint=False), np.linspace(start[1],end[1],steps,endpoint=False)]) if n == 1: # first round step = newpos[1]-newpos[0] positions[n-1] = np.array([newpos[0]-step]+newpos.tolist()) elif n == numPoints-1: # last round step = newpos[1]-newpos[0] positions[n-1] = np.array(newpos.tolist()+[newpos[-1]+step]) else: positions[n-1] = newpos positions = np.vstack(positions) # save the labels and positions of special points pos = positions.copy() specialpoints_idx = [] for p in points: idx = np.nanargmin(np.sum((pos-p)**2,axis=-1)) specialpoints_idx.append(idx) pos[:,0][idx] = np.nan pos[:,1][idx] = np.nan specialpoints_labels = pointlabels # mask positionsMask = np.ones(positions.shape[:-1],dtype=np.bool) positionsMask[1:-1] = False return Kvectors(positions, positionsMask, specialpoints_idx, specialpoints_labels) def getPositions(self, cutoff): """Generate all positions from the lattice vectors using [0,0] as the basis vector. positions = getPositions(cutoff) positions[idxPosition, idxCoordinate]""" # value that is added to the cutoff to be on the save side numSubs = self.numSublattices() pos = np.tile(self.__vecsBasis, (numSubs,1,1)) pos -= pos.transpose(1,0,2) safetyregion = np.max(np.sqrt(np.sum(pos**2,axis=-1))) # array that will contain all positions positions = [] # --- first shift (do it only if two lattice vectors exist) --- shiftidx1 = 0 boolLoop1 = True while boolLoop1: if self.__vecsLattice.shape[0] >= 2: shiftedpos1 = shiftidx1*self.__vecsLattice[1] # substract the other lattice vector to be as central as possible inside the cutoff region shiftedpos1 -= np.round(np.vdot(shiftedpos1,self.__vecsLattice[0])/ np.linalg.norm(self.__vecsLattice[0])**2 )*self.__vecsLattice[0] if shiftidx1 < 0: shiftidx1 -= 1 else: shiftidx1 += 1 # change looping direction / break if the shift is larger than a cutoff if np.linalg.norm(shiftedpos1) > cutoff+safetyregion: if shiftidx1 > 0: shiftidx1 = -1 continue else: break else: shiftedpos1 = np.array([0,0]) boolLoop1 = False # --- second shift (do it only if at least one lattice vector exists) --- shiftidx0 = 0 boolLoop0 = True while boolLoop0: if self.__vecsLattice.shape[0] >= 1: shiftedpos0 = shiftidx0*self.__vecsLattice[0] # add together all shifts shiftedpos = shiftedpos1+shiftedpos0 if shiftidx0 < 0: shiftidx0 -= 1 else: shiftidx0 += 1 # change looping direction / break if the sum of shifts is larger than a cutoff if np.linalg.norm(shiftedpos) > cutoff+safetyregion: if shiftidx0 > 0: shiftidx0 = -1 continue else: break else: shiftedpos = np.array([0,0]) boolLoop0 = False # append the sum of shifts to the array of positions positions.append(shiftedpos) return np.array(positions,dtype=np.float) def getGeometry(self, cutoff): """Generate all positions from the lattice vectors using all the basis vectors. geometry = getGeometry(cutoff) geometry[idxSublattice, idxPosition, idxCoordinate]""" numSubs = self.numSublattices() # === creation of all positions === positions = self.getPositions(cutoff) positionsAll = np.tile(positions, (numSubs,1,1)) + self.__vecsBasis[:,None] return positionsAll def getVecsLattice(self): """Get array of lattice vectors""" return self.__vecsLattice def getVecsBasis(self): """Get array of basis vectors""" return self.__vecsBasis def getIdxsBasis(self): """Get array of basis indices""" return self.__idxBasis def getIdxsSub(self): """Get array of sub lattice indices""" return self.__idxSub def getNumLattice(self): """Get length of array of lattice vectors""" return len(self.__vecsLattice) def getNumBasis(self): """Get length of array of basis vectors""" return len(self.__vecsBasis) def makeFiniteCircle(self, cutoff, center=[0,0]): """Generate a finite circular lattice. makeFiniteCircle(radius, center=[x,Y])""" numSubs = self.numSublattices() positions = self.getPositions(cutoff) positionsAll = (np.tile(positions, (numSubs,1,1)) + self.__vecsBasis[:,None]).reshape(-1,2) # save which sublattice corresponds to which position self.__idxSub = (np.zeros((numSubs,len(positions)),dtype=np.int) + self.__idxSub[:,None]).reshape(-1) # masking positionsAllAbs = np.sqrt(np.sum((positionsAll-center)**2,axis=-1)) positionsAllMask = (positionsAllAbs > cutoff) positionsAll = positionsAll[~positionsAllMask] self.__idxSub = self.__idxSub[~positionsAllMask] # save the finite system as basisvectors self.__vecsLattice = np.array([]) self.__vecsReciprocal = np.array([]) self.__vecsBasis = positionsAll self.__idxBasis = np.arange(len(self.__vecsBasis)) def makeFiniteRectangle(self, cutoffX, cutoffY, center=[0,0]): """Generate a finite rectangular lattice. makeFiniteRectangle(2*width, 2*height, center=[x,y])""" numSubs = self.numSublattices() positions = self.getPositions(np.sqrt(cutoffX**2+cutoffY**2)) positionsAll = (np.tile(positions, (numSubs,1,1)) + self.__vecsBasis[:,None]).reshape(-1,2) # save which sublattice corresponds to which position self.__idxSub = (np.zeros((numSubs,len(positions)),dtype=np.int) + self.__idxSub[:,None]).reshape(-1) # masking positionsAllMask = (np.abs(positionsAll[:,0]-center[0]) > cutoffX) | \ (np.abs(positionsAll[:,1]-center[1]) > cutoffY) positionsAll = positionsAll[~positionsAllMask] self.__idxSub = self.__idxSub[~positionsAllMask] # save the finite system as basisvectors self.__vecsLattice = np.array([]) self.__vecsReciprocal = np.array([]) self.__vecsBasis = positionsAll self.__idxBasis = np.arange(len(self.__vecsBasis)) def makeFiniteAlongdirection(self, idxVecLattice, repetitions): """Make the basis finite in the direction of a lattice vector. makeFiniteAlongdirection(idxVecLattice, repetitions)""" numLatticevectors = self.__vecsLattice.shape[0] r = np.ones(numLatticevectors) r[idxVecLattice] = repetitions f = np.zeros(numLatticevectors, dtype=np.bool) f[idxVecLattice] = True self.enlargeBasis(r,f) def clipFiniteRectangle(self, cutoffX = np.inf, cutoffY = np.inf, center=[0,0]): """Clip basis in shape of a rectangle. clipFiniteRectangle(self, 2*width, 2*height, center=[x,y])""" # masking basisMask = (np.abs(self.__vecsBasis[:,0]-center[0]) > cutoffX) | \ (np.abs(self.__vecsBasis[:,1]-center[1]) > cutoffY) self.__vecsBasis = self.__vecsBasis[~basisMask] self.__idxSub = self.__idxSub[~basisMask] self.__idxBasis = np.arange(len(self.__vecsBasis)) def enlargeBasis(self, repetitions, makefinite=False): """Enlarge the basis (and make it finite if desired) in the direction of the lattice vectors. enlargeBasis(repetitions, makefinite)""" numLatticevectors = self.__vecsLattice.shape[0] if type(repetitions) is int: repetitions = np.ones(numLatticevectors)*repetitions if type(makefinite) is bool: makefinite = np.ones(numLatticevectors,dtype=np.bool)*makefinite # save new basis vectors for idxVecLattice, rep in enumerate(repetitions): numSubs = self.__vecsBasis.shape[0] positions = np.arange(rep)[:,None]*self.__vecsLattice[idxVecLattice][None,:] positionsAll = (np.tile(positions, (numSubs,1,1)) + self.__vecsBasis[:,None]).reshape(-1,2) self.__vecsBasis = positionsAll # save which sublattice corresponds to which position self.__idxSub = (np.zeros((numSubs,len(positions)),dtype=np.int) + self.__idxSub[:,None]).reshape(-1) # rescale lattice vectors self.__vecsLattice[idxVecLattice] *= rep self.__idxBasis = np.arange(len(self.__vecsBasis)) # remove lattice vectors if desired boolarr = np.ones(self.__vecsLattice.shape[0],dtype=np.bool) boolarr[np.array(makefinite)] = False self.__vecsLattice = self.__vecsLattice[boolarr] # generate new reciprocal vectors self.__vecsReciprocal = self.getReciprocalVectors() def addRandomVacanciesByDensity(self, density, fixed = None): """Randomly remove basis vectors (useful for finite systems or large unit cells). The parameter `density` determines the density of vacancies. The parameter `fixed` specify a lattice site which must not be removed. """ numLeft = int(len(self.__vecsBasis) * (1 - density)) for n in range(10000): idxarray = np.arange(len(self.__vecsBasis)) np.random.shuffle(idxarray) if fixed is None or fixed in self.__idxBasis[idxarray][:numLeft]: break else: raise Exception("Unable to remove lattice sites.") self.__vecsBasis = self.__vecsBasis[idxarray][:numLeft] self.__idxBasis = self.__idxBasis[idxarray][:numLeft] self.__idxSub = self.__idxSub[idxarray][:numLeft] def addRandomVacanciesByProbability(self, probability, fixed = None): """Randomly remove basis vectors (useful for finite systems or large unit cells). The parameter `density` determines the density of vacancies. The parameter `fixed` specify a lattice site which must not be removed. """ for n in range(10000): boolarray = np.random.rand(self.numSublattices()) > probability if fixed is None or fixed in self.__idxBasis[boolarray]: break else: raise Exception("Unable to remove lattice sites.") self.__vecsBasis = self.__vecsBasis[boolarray] self.__idxBasis = self.__idxBasis[boolarray] self.__idxSub = self.__idxSub[boolarray] def addRandomShifts(self, standarddev): """Randomly shift lattice sites""" self.__vecsBasis += np.random.normal(scale=standarddev,size=self.__vecsBasis.shape) # bring basis vectors back into unit cell, the random shifts might have brought the basis vectors outside the cell for idx in range(len(self.__vecsBasis)): for vecLattice in self.__vecsLattice: # projection into the direction of the lattice vector proj = np.vdot(self.__vecsBasis[idx], vecLattice)/np.linalg.norm(vecLattice) # subtract lattice vectors self.__vecsBasis[idx] -= np.floor(proj/np.linalg.norm(vecLattice)) * vecLattice def numSublattices(self): """Returns the number of sublattices""" return self.__vecsBasis.shape[0] def getDimensionality(self): """Returns the number of lattice vectors (number of periodic directions)""" return self.__vecsLattice.shape[0] def getReciprocalVectors(self): """Returns the reciprocal lattice vectors (and saves them internally).""" dim = self.getDimensionality() if dim == 0: return np.array([]) elif dim == 1: return np.array([ 2*np.pi*self.__vecsLattice[0]/np.linalg.norm(self.__vecsLattice[0])**2 ]) elif dim == 2: vecs = np.array([ np.dot(np.array([[0,1],[-1,0]]),self.__vecsLattice[1]), np.dot(np.array([[0,-1],[1,0]]),self.__vecsLattice[0]) ],dtype=np.float) vecs[0] = 2*np.pi*vecs[0]/ (np.vdot(self.__vecsLattice[0], vecs[0])) vecs[1] = 2*np.pi*vecs[1]/ (np.vdot(self.__vecsLattice[1], vecs[1])) return vecs else: raise Exception("Lattices with more than 2 lattice vectors are not supported") def plot(self, filename=None,show=True,cutoff=10): """Plot the lattice.""" import matplotlib.pyplot as plt fig = plt.gcf() for p,b in zip(self.getGeometry(cutoff),self.__vecsBasis): line, = plt.plot(p[:,0],p[:,1], 'o',ms=4) fig.gca().add_artist(plt.Circle(b,cutoff, fill = False , ec=line.get_color(),alpha=0.5,lw=1)) plt.plot(b[0],b[1], 'kx',ms=7,mew=1) plt.axes().set_aspect('equal') plt.xlim(-1.5*cutoff,1.5*cutoff) plt.ylim(-1.5*cutoff,1.5*cutoff) if filename is not None: plt.savefig(filename) if show: plt.show() def getDisplacements(self, cutoff): """Create a Displacements object that contains all vectors from the central position of a sublattice to all positions of another one.""" # positions generated from the lattice vectors positions = self.getPositions(cutoff) sorter = np.argsort(np.sum(positions**2,axis=-1)) positions = positions[sorter] # shifts given by the basisvectors shifts = self.__vecsBasis # === numbers === # maximal number of links between the central position of a sublattice and all positions of another one numLinks = positions.shape[0] # number of sublattices numSubs = shifts.shape[0] # === creation of the distance matrix === # array of central positions [Sub, Coord] that will be repeated to create the matrix matDeltaR positionsCentral = shifts # array of all positions [Sub, Link, Coord] that will be repeated to create the matrix matDeltaR positionsAll = np.tile(positions, (numSubs,1,1)) + positionsCentral[:,None] # creation of the matrix matDeltaR [Sub1, Sub2, Link, Coord] matPositionsCentral = np.tile(positionsCentral, (numLinks,numSubs, 1,1)).transpose(2,1,0,3) matPositionsAll = np.tile(positionsAll, (numSubs,1,1,1)) matDeltaR = matPositionsAll-matPositionsCentral # masking of the matrix matDeltaR [Sub1, Sub2, Link, Coord] matDeltaRAbs = np.sqrt(np.sum(matDeltaR**2,axis=-1)) matDeltaRMask = (matDeltaRAbs > cutoff) | (matDeltaRAbs < self.__tol) unnecessaryLinks = np.all(matDeltaRMask,axis=(0,1)) return Displacements(matDeltaR[:, :, ~unnecessaryLinks], positions[~unnecessaryLinks], matDeltaRMask[:, :, ~unnecessaryLinks], self.__idxSub) def __str__(self): return str({'vecsLattice': self.__vecsLattice, 'vecsBasis': self.__vecsBasis})
{ "alphanum_fraction": 0.6054194637, "author": null, "avg_line_length": 39.6281337047, "converted": null, "ext": "py", "file": null, "hexsha": "eaa97c9cd1b48d772d2a8e83836abe3955cebb1a", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 12, "max_forks_repo_forks_event_max_datetime": "2022-02-16T06:55:41.000Z", "max_forks_repo_forks_event_min_datetime": "2017-09-28T04:14:03.000Z", "max_forks_repo_head_hexsha": "b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sharkdp/bandstructure", "max_forks_repo_path": "bandstructure/lattice/lattice.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sharkdp/bandstructure", "max_issues_repo_path": "bandstructure/lattice/lattice.py", "max_line_length": 148, "max_stars_count": 7, "max_stars_repo_head_hexsha": "b74b688afc2b15b20ec1a8ebcf72ba8699b6bf96", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sharkdp/bandstructure", "max_stars_repo_path": "bandstructure/lattice/lattice.py", "max_stars_repo_stars_event_max_datetime": "2021-06-08T07:43:16.000Z", "max_stars_repo_stars_event_min_datetime": "2016-12-23T11:19:35.000Z", "num_tokens": 7097, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 28453 }
# Created byMartin.cz # Copyright (c) Martin Strohalm. All rights reserved. import numpy # comparison with tolerance def equals(v1, v2, epsilon): """ Returns True if difference between given values is less then tolerance. Args: v1: float Value one. v2: float Value two epsilon: float Max allowed difference. Returns: bool True if values equal, False otherwise. """ return abs(v1-v2) <= epsilon def between(v, min_v, max_v, epsilon): """ Returns True if given value is equal or between minimum and maximum by specified tolerance. Args: v: float Value to check. min_v: float Minimum value. max_v: float Maximum value. epsilon: float Max allowed difference. Returns: bool True if value is equal or between, False otherwise. """ return (min_v <= v <= max_v) or equals(v, min_v, epsilon) or equals(v, max_v, epsilon) # angle calculations def rads(angle): """ Converts given angle from degrees to radians. Args: angle: float Angle in degrees. Returns: float Angle in radians. """ return numpy.deg2rad(angle) def degs(angle): """ Converts given angle from radians to degrees. Args: angle: float Angle in radians. Returns: float Angle in degrees. """ return numpy.rad2deg(angle) def angle(p1, p2, p3): """ Calculates angle between two lines. Args: p1: (float, float) First point as (x, y) coordinates. p2: (float, float) Origin point as (x, y) coordinates. p3: (float, float) Second point as (x, y) coordinates. Returns: float Angle in radians. """ dx1 = p1[0] - p2[0] dy1 = p1[1] - p2[1] dx2 = p3[0] - p2[0] dy2 = p3[1] - p2[1] cross = dx1*dy2 - dy1*dx2 dot = dx1*dx2 + dy1*dy2 return numpy.arctan2(cross, dot) def inclination(p1, p2): """ Calculates inclination angle the line has with x-axis. Args: p1: (float, float) Origin point as (x, y) coordinates. p2: (float, float) Second point as (x, y) coordinates. Returns: float Angle in radians. """ dx = p2[0] - p1[0] dy = p2[1] - p1[1] return numpy.arctan2(dy, dx) def bisector(p1, p2, p3): """ Calculates bisector angle between two lines. Args: p1: (float, float) First point as (x, y) coordinates. p2: (float, float) Origin point as (x, y) coordinates. p3: (float, float) Second point as (x, y) coordinates. Returns: float Angle in radians. """ a1 = inclination(p2, p1) a2 = inclination(p2, p3) return 0.5 * (a1 + a2) def normal_angle(angle): """ Normalizes given angle to be in interval of -2pi to 2pi. Args: angle: float Angle in radians. Returns: float Normalized angle in radians. """ return angle % (2*numpy.pi) def angle_difference(start_angle, end_angle, clockwise): """ Calculates difference between two given angles. Args: start_angle: float Start angle in radians. end_angle: float End angle in radians. clockwise: bool Specifies the direction of measurement. Returns: float Angle difference in radians. """ start = normal_angle(start_angle) end = normal_angle(end_angle) diff = end-start if clockwise else start-end if diff < 0: diff = 2 * numpy.pi - abs(diff) if not clockwise: diff *= -1 return diff # points calculations def distance(p1, p2): """ Calculates Euclidean distance between two points. Args: p1: (float, float) Point 1 as (x, y) coordinates. p2: (float,float) Point 2 as (x, y) coordinates. Returns: float Distance between the points. """ dx = p1[0] - p2[0] dy = p1[1] - p2[1] sq = dx*dx + dy*dy return numpy.sqrt(sq) if sq > 0 else 0 def rotate(p, angle, center=(0, 0)): """ Rotates given point around specified center. Args: p: (float, float) Point to rotate. angle: float Angle in radians. center: (float, float) Center of rotation. """ dx = p[0]-center[0] dy = p[1]-center[1] sin = numpy.sin(angle) cos = numpy.cos(angle) x = center[0] + dx * cos - dy * sin y = center[1] + dx * sin + dy * cos return x, y def ray(c, angle, distance): """ Calculates point coordinates with distance and angle from origin. Args: c: (float, float) Coordinates of the origin. angle: float Angle in radians. distance: float Distance from origin. Returns: (float, float) Coordinates of calculated point. """ x = c[0] + distance * numpy.cos(angle) y = c[1] + distance * numpy.sin(angle) return x, y def polygon_centroid(*points): """ Calculates center point of given polygon. Args: points: ((float, float),) Collection of points as (x, y) coordinates. Returns: (float, float) Center point as (x, y) coordinates. """ x = sum(p[0] for p in points) / len(points) y = sum(p[1] for p in points) / len(points) return x, y def triangle_incircle(p1, p2, p3): """ Calculates position and size of the biggest circle inscribed into specified triangle. Args: p1: (float, float) Point 1 as (x, y) coordinates. p2: (float, float) Point 2 as (x, y) coordinates. p3: (float, float) Point 3 as (x, y) coordinates. Returns: (float, float) Coordinates of the circle center. float Circle radius. """ # calc sides a = distance(p1, p2) b = distance(p2, p3) c = distance(p3, p1) # calc radius p = 0.5 * sum((a, b, c)) area = numpy.sqrt(p * (p - a) * (p - b) * (p - c)) radius = area / p # calc center bis1 = bisector(p2, p1, p3) bis2 = bisector(p1, p2, p3) c = intersect_rays(p1, bis1, p2, bis2) return c, radius # position calculations def is_inline(*points): """ Checks whether all given points are on a single line. The points order is not important. Args: *points: ((float, float),) Collection of points to check as (x,y) coordinates. Returns: bool Returns True if all points are on a single line, False otherwise. """ if len(points) < 3: return True x1, y1 = points[0] x2, y2 = points[1] dx1 = x1 - x2 dy1 = y1 - y2 for x2, y2 in points[2:]: dx2 = x1 - x2 dy2 = y1 - y2 if (dy1*dx2) != (dx1*dy2): return False return True def is_point_in_circle(p, center, radius): """ Checks whether given point is within specified circle. Args: p: (float, float) Coordinates of the point to test. center: (float, float) Coordinates of the circle center. radius: float Radius of the circle. Returns: bool Returns True if the point is inside the circle, False otherwise. """ return distance(center, p) < radius def is_point_in_triangle(p, p1, p2, p3): """ Checks whether given point is within specified triangle. Args: p: (float, float) Coordinates of the point to test. p1: (float, float) Coordinates of the triangle point. p2: (float, float) Coordinates of the triangle point. p3: (float, float) Coordinates of the triangle point. Returns: bool Returns True if the point is inside the circle, False otherwise. """ xp, yp = p x1, y1 = p1 x2, y2 = p2 x3, y3 = p3 c1 = (x2-x1)*(yp-y1)-(y2-y1)*(xp-x1) c2 = (x3-x2)*(yp-y2)-(y3-y2)*(xp-x2) c3 = (x1-x3)*(yp-y3)-(y1-y3)*(xp-x3) return (c1 < 0 and c2 < 0 and c3 < 0) or (c1 > 0 and c2 > 0 and c3 > 0) def is_point_in_polygon(p, *polygon): """ Checks whether given point is within specified polygon. This technique is bases on the sum of all angles between given point and polygon vertices, which must be 2pi if point is inside. This assumption only works for non- overlapping polygons. Args: p: (float, float) Coordinates of the point to test. *polygon: ((float, float),) Collection of points defining the polygon as (x,y) coordinates. Returns: bool Returns True if the point lies inside the circle, False otherwise. """ total_angle = 0 polygon = list(polygon) + [polygon[0]] p1 = polygon[0] for p2 in polygon[1:]: total_angle += abs(angle(p1, p, p2)) % numpy.pi p1 = p2 return equals(total_angle, 2*numpy.pi, 1e-6) def is_circle_in_circle(c1, r1, c2, r2): """ Checks whether the first circle is inside the second circle.. Args: c1: (float, float) Coordinates of the center of circle A. r1: float Radius of the circle A. c2: (float, float) Coordinates of the center of circle B. r2: float Radius of the circle b. Returns: ((float, float), (float, float)) or None Coordinates of the two intersection points. """ return distance(c1, c2) <= r2 - r1 # intersections calculations def intersect_circles(c1, r1, c2, r2): """ Calculates intersection points between two circles. Args: c1: (float, float) Coordinates of the center of circle A. r1: float Radius of the circle A. c2: (float, float) Coordinates of the center of circle B. r2: float Radius of the circle b. Returns: ((float, float), (float, float)) or None Coordinates of the two intersection points. """ # calc distance dx = c2[0] - c1[0] dy = c2[1] - c1[1] dist = numpy.sqrt(dx*dx + dy*dy) # non intersecting if dist >= r1 + r2: return () # one inside another if dist <= abs(r1-r2): return () # same circles if dist == 0 and r1 == r2: return () # calc intersections a = (r1**2 - r2**2 + dist**2) / (2*dist) n = r1**2 - a**2 h = numpy.sqrt(n if n > 0 else 0) x = c1[0] + a*dx / dist y = c1[1] + a*dy / dist x1 = x + h*dy / dist y1 = y - h*dx / dist x2 = x - h*dy / dist y2 = y + h*dx / dist return (x1, y1), (x2, y2) def intersect_lines(p1, p2, p3, p4): """ Calculates intersection point between two lines defined as (p1, p2) and (p3, p4). Args: p1: (float, float) Point 1 as (x, y) coordinates. p2: (float, float) Point 2 as (x, y) coordinates. p3: (float, float) Point 3 as (x, y) coordinates. p4: (float, float) Point 4 as (x, y) coordinates. Returns: (float, float) or None Coordinates of the intersection point. Returns None if there is no intersection. """ a_dx = p2[0] - p1[0] a_dy = p1[1] - p2[1] a_sq = p2[0]*p1[1] - p1[0]*p2[1] b_dx = p4[0] - p3[0] b_dy = p3[1] - p4[1] b_sq = p4[0]*p3[1] - p3[0]*p4[1] d = a_dy * b_dx - a_dx * b_dy dx = a_sq * b_dx - a_dx * b_sq dy = a_dy * b_sq - a_sq * b_dy if d == 0: return None return dx/d, dy/d def intersect_rays(p1, a1, p2, a2): """ Calculates intersection point between two lines defined as a point coordinates and angle. Args: p1: (float, float) First ray point as (x, y) coordinates. a1: float First ray angle in radians. p2: (float, float) Second ray point as (x, y) coordinates. a2: float Second ray angle in radians. Returns: (float, float) or None Coordinates of the intersection point. Returns None if there is no intersection. """ l1 = (p1[0] + numpy.cos(a1), p1[1] + numpy.sin(a1)) l2 = (p2[0] + numpy.cos(a2), p2[1] + numpy.sin(a2)) return intersect_lines(p1, l1, p2, l2) def intersect_line_ray(p1, p2, p3, a): """ Calculates intersection point between two lines defined as a point coordinates and angle. Args: p1: (float, float) Line point 1 as (x, y) coordinates. p2: (float, float) Line point 2 as (x, y) coordinates. p3: (float, float) Ray point as (x, y) coordinates. a: float Second line angle in radians. Returns: (float, float) or None Coordinates of the intersection point. Returns None if there is no intersection. """ p4 = (p3[0] + numpy.cos(a), p3[1] + numpy.sin(a)) return intersect_lines(p1, p2, p3, p4)
{ "alphanum_fraction": 0.5142696866, "author": null, "avg_line_length": 21.8058103976, "converted": null, "ext": "py", "file": null, "hexsha": "cc9973f62074bef567e5b0023d2295010a56f7c1", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2022-01-22T14:28:15.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-27T14:31:45.000Z", "max_forks_repo_head_hexsha": "a7f0c84fae0b21fe120204e798bd61cdab3a125d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "xxao/pero", "max_forks_repo_path": "pero/drawing/utils.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "a7f0c84fae0b21fe120204e798bd61cdab3a125d", "max_issues_repo_issues_event_max_datetime": "2022-01-21T16:18:48.000Z", "max_issues_repo_issues_event_min_datetime": "2021-12-29T00:46:44.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "xxao/pero", "max_issues_repo_path": "pero/drawing/utils.py", "max_line_length": 90, "max_stars_count": 13, "max_stars_repo_head_hexsha": "a7f0c84fae0b21fe120204e798bd61cdab3a125d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "xxao/pero", "max_stars_repo_path": "pero/drawing/utils.py", "max_stars_repo_stars_event_max_datetime": "2022-03-15T06:13:43.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-15T17:51:21.000Z", "num_tokens": 3880, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 14261 }
\documentclass{article} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{parskip} \usepackage{svg} \usepackage[utf8]{inputenc} \usepackage{helvet} \renewcommand{\familydefault}{\sfdefault} \usepackage{geometry} \usepackage[document]{ragged2e} \geometry{letterpaper, portrait, top=1in, bottom=1in, left=1.5in, right=1.5in} \title{Math 301} \begin{document} \section{Set Notation} \begin{align*} \mathbb{N} &= natural\ numbers = \{0, 1, 2 \cdots\} \\ \mathbb{Z} &= integers = \{ \cdots -2, -1, 0, 1, 2 \cdots \}\\ \mathbb{Q} &= rational\ numbers\ (can\ be\ expressed\ with\ fractions\ of\ two\ integers) \\ \mathbb{R} &= real\ numbers\ (rational\ numbers\ and\ irrational\ numbers) \end{align*} A is an element of B \begin{gather*} A \in B \end{gather*} A is a subset of b \begin{gather*} A \subseteq B \end{gather*} The cardinality of A \begin{gather*} |A| \end{gather*} Complement of A \begin{gather*} A^C \\ \overline{A} \end{gather*} The intersection of A and B \begin{gather*} A \cap B \end{gather*} The union of A and B \begin{gather*} A \cup B \end{gather*} The symmetric difference of A and B \begin{gather*} A \oplus B = (A \cup B) - (A \cap B) \end{gather*} \section{Cartesian Products} The Cartesian product of A and B \begin{gather*} A \times B = {(a, b) | a \in A, b \in B} \end{gather*} \begin{gather*} |A \times B| = |A| \times |B| \end{gather*} The power set of A is the set of all subsets of A. \begin{gather*} \mathcal{P}(A) \end{gather*} \begin{gather*} |\mathcal{P}(A)| = 2^{|A|} \end{gather*} \section{The Rule of Products} Given the number of possibilities for two independent events denoted $|A|$ and $|B|$, the number of possible combinations of both events is $|A| \times |B|$. You can also say "the number of ways to do $A$ \textbf{AND} $B$ is $|A| \times |B|$." \section{The Law of Addition} The basic law of addition \begin{gather*} |A| = |A_1| + |A_2| + \cdots + |A_n| = \sum_{k=1}^{n} |A_k| \end{gather*} Given the number of possibilities for two mutually exclusive events denoted $|A|$ and $|B|$, the sum of possible events is $|A| + |B|$. "The number of ways to do $A$ \textbf{OR} $B$ is $|A| + |B|$." Partition of a set \begin{gather*} A_1 \cup A_2 \cup A_3 \cup \cdots = A \\ A_i \cap A_j = \emptyset\ where\ i \neq j\ \end{gather*} Law of inclusion-exclusion for two sets \begin{gather*} |A_1 \cup A_2| = |A_1| + |A_2| - |A_1 \cap A_2| \end{gather*} \begin{gather*} \end{gather*} \section{Permutation} The total number of ordered sets of $k$ elements taken from a set of $n$ elements \begin{gather*} P(n,k) = \frac{n!}{(n-k)!} \end{gather*} \section{Combination} The total number of unordered sets of $k$ elements taken from a set of $n$ elements \begin{gather*} \binom{n}{k} = \frac{n!}{(n-k)!k!} \end{gather*} Binomial theorem---Allows you to expand $(x+y)^n$. \begin{gather*} (x + y)^n = \sum_{k=0}^{n} \binom{n}{k}x^{n-k}y^k \end{gather*} \section{Logical operator} Logical conjunction (AND) \begin{gather*} p \land q \\ \begin{tabular}{c|c|c} p & q & $p \land q$ \\ \hline 0 & 0 & 0 \\ 0 & 1 & 0 \\ 1 & 0 & 0 \\ 1 & 1 & 1 \end{tabular} \end{gather*} Logical disjunction (OR) \begin{gather*} p \lor q \\ \begin{tabular}{c|c|c} p & q & $p \lor q$ \\ \hline 0 & 0 & 0 \\ 0 & 1 & 1 \\ 1 & 0 & 1 \\ 1 & 1 & 1 \end{tabular} \end{gather*} Logical negation \begin{gather*} \neg p \\ \begin{tabular}{c|c} p & $\neg p$ \\ \hline 0 & 1 \\ 1 & 0 \end{tabular} \end{gather*} Conditional statement (if $p$ is true then $q$ is also true) \begin{gather*} p \rightarrow q \\ \begin{tabular}{c|c|c} p & q & $p \rightarrow q$ \\ \hline 0 & 0 & 1 \\ 0 & 1 & 1 \\ 1 & 0 & 0 \\ 1 & 1 & 1 \end{tabular} \end{gather*} Converse \begin{gather*} p \leftarrow q \\ or \\ q \rightarrow p \end{gather*} Contrapositive \begin{gather*} \neg q \rightarrow \neg p \\ \begin{tabular}{c|c|c} p & q & $\neg q \rightarrow \neg p$ \\ \hline 0 & 0 & 1 \\ 0 & 1 & 1 \\ 1 & 0 & 0 \\ 1 & 1 & 1 \end{tabular} \end{gather*} Biconditional (XNOR) \begin{gather*} p \leftrightarrow q \\ \begin{tabular}{c|c|c} p & q & $p \leftrightarrow q$ \\ \hline 0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0 \\ 1 & 1 & 1 \end{tabular} \end{gather*} Sheffer stroke (NAND) \begin{gather*} p | q \\ \begin{tabular}{c|c|c} p & q & $p | q$ \\ \hline 0 & 0 & 1 \\ 0 & 1 & 1 \\ 1 & 0 & 1 \\ 1 & 1 & 0 \end{tabular} \end{gather*} Order of precedence of propositions \begin{enumerate} \item Parentheses \item Negation \item Conjunction \item Disjunction \item Conditional statement \item Biconditional \end{enumerate} \section{Equivalence and implication} Tautology: An expression that is true in all cases Contradiction: An expression that is false for all cases Equivalence ($r \leftrightarrow s$ is a tautology) \begin{gather*} r \Leftrightarrow s \end{gather*} Implication (r implies s) \begin{gather*} r \Rightarrow s \end{gather*} \section{Laws of logic} Duality principle: Each law of logic can be used to derive a second law by switching the symbols $\land$ with $\lor$, $1$ with $0$ and visa versa. Commutative laws \begin{gather*} p \lor q \Leftrightarrow q \lor p \\ p \land q \Leftrightarrow q \land p \end{gather*} Associative laws \begin{gather*} (p \lor q) \lor r \Leftrightarrow p \lor (q \lor r) \\ (p \land q) \land r \Leftrightarrow p \land (q \land r) \end{gather*} Distributive laws \begin{gather*} p \land (q \lor r) \Leftrightarrow (p \land q) \lor (p \land r) \\ p \lor (q \land r) \Leftrightarrow (p \lor q) \land (p \lor r) \\ \end{gather*} Identity laws \begin{gather*} p \lor 0 \Leftrightarrow p \\ p \land 1 \Leftrightarrow p \end{gather*} Negation laws \begin{gather*} p \land \neg p \Leftrightarrow 0 \\ p \lor \neg p \Leftrightarrow 1 \end{gather*} Idempotent laws \begin{gather*} p \lor p \Leftrightarrow p \\ p \land p \Leftrightarrow p \end{gather*} Null laws \begin{gather*} p \land 0 \Leftrightarrow 0 \\ p \lor 1 \Leftrightarrow 1 \end{gather*} Absorption laws \begin{gather*} p \land (p \lor q) \Leftrightarrow p \\ p \lor (p \land q) \Leftrightarrow p \end{gather*} DeMorgan's laws \begin{gather*} \neg (p \lor q) \Leftrightarrow (\neg p) \land (\neg q) \\ \neg (p \land q) \Leftrightarrow (\neg p) \lor (\neg q) \end{gather*} Involution law \begin{gather*} \neg (\neg p) \Leftrightarrow p \end{gather*} Detachment \begin{gather*} (p \rightarrow q) \land p \Rightarrow q \end{gather*} Indirect reasoning \begin{gather*} (p \rightarrow q) \land \neg q \Rightarrow \neg p \end{gather*} Disjunctive addition \begin{gather*} p \Rightarrow (p \lor q) \end{gather*} Conjunctive simplification \begin{gather*} (p \land q) \Rightarrow p \\ (p \land q) \Rightarrow q \end{gather*} Disjunctive simplification \begin{gather*} (p \lor q) \land \neg p \Rightarrow q \\ (p \lor q) \land \neg q \Rightarrow p \end{gather*} Chain rule \begin{gather*} (p \rightarrow q) \land (q \rightarrow r) \Rightarrow (p \rightarrow r) \end{gather*} Conditional equivalence \begin{gather*} p \rightarrow q \Leftrightarrow \neg p \lor q \end{gather*} Biconditional equivalences \begin{gather*} (p \leftrightarrow q) \Leftrightarrow (p \rightarrow q) \land (q \rightarrow p) \Leftrightarrow (p \land q) \lor (\neg p \land \neg q) \end{gather*} Contrapositive \begin{gather*} (p \rightarrow q) \Leftrightarrow (\neg q \rightarrow \neg p) \end{gather*} \section{Propositions over a Universe} If $p$ is a proposition over $U$, the truth set of $p$ is $T_p = \{a \in U | p(a)\ is\ true\}$. \section{Mathematical induction} Mathematical induction is a way to prove a proposition over natural numbers. \begin{enumerate} \item Prove the basis of the statement, $P(n)$ where $n = 0$. \item Assume the statement is true for $n - 1$. Prove $P(n-1) \Rightarrow P(n)$. \end{enumerate} Stronk induction \begin{enumerate} \item Prove the bases of the statement, $P(m)$, for an arbitrary $m > 0$. \item Assume the statement is true for $k$ where $m \leq k < n$. Prove $P(k) \Rightarrow n$. \end{enumerate} \section{Quantifiers} The existential quantifier states that there exists an $n$ such that $p(n)$ is true. \begin{gather*} (\exists n)(p(n)) \end{gather*} The universal quantifier states that for all $n$ in $U$, $p(n)$ is true. \begin{gather*} (\forall n)(p(n)) \end{gather*} Negation of quantified propositions \begin{gather*} \neg ((\forall n)(p(n))) \Leftrightarrow (\exists n)(\neg p(n)) \\ \neg ((\exists n)(p(n))) \Leftrightarrow (\forall n)(\neg p(n)) \end{gather*} Multiple quantifiers of the same type can be arranged in any order, but mixed quantifiers cannot be exchanged. \section{Proofs for sets} You can prove set propositions using Venn diagrams, truth tables (true indicating that $x \in A$ or with definitions. To prove that $A \subseteq B$, show that $x \in A$ and $x \in B$. \\ To prove that $A = B$, show that $A \subseteq B$ and $B \subseteq A$. \section{Laws of set theory} Commutative laws \begin{gather*} A \cup B = B \cup A \\ A \cap B = B \cap A \end{gather*} Associative laws \begin{gather*} A \cup (B \cup C) = (A \cup B) \cup C \\ A \cap (B \cap C) = (A \cap B) \cap C \end{gather*} Distributive laws \begin{gather*} A \cap (B \cup C) = (A \cap B) \cup (A \cap C) \\ A \cup (B \cap C) = (A \cup B) \cap (A \cup C) \end{gather*} Identity laws \begin{gather*} A \cup \emptyset = A \\ A \cap U = A \end{gather*} Complement laws \begin{gather*} A \cup \overline{A} = U \\ A \cap \overline{A} = \emptyset \end{gather*} Idempotent laws \begin{gather*} A \cup A = A \\ A \cap A = A \end{gather*} Null laws \begin{gather*} A \cup U = U \\ A \cap \emptyset = \emptyset \end{gather*} Absorption laws \begin{gather*} A \cup (A \cap B) = A \\ A \cap (A \cup B) = A \end{gather*} DeMorgan's laws \begin{gather*} \overline{A \cup B} = \overline{A} \cap \overline{B} \\ \overline{A \cap B} = \overline{A} \cup \overline{B} \end{gather*} Involution law \begin{gather*} \overline{\overline{A}} = A \end{gather*} \section{Relations} Relation: Any subset of $A \times B$ Divides: Let $a,b \in \mathbb{Z}, a \neq 0$. $a | b$ ($a$ divides $b$) if and only if there exists an integer $k$ such that $ak = b$. Relation notation: If $s$ is a relation from set $A$ into set $B$, the fact that $(x, y) \in s$ can be written $xsy$. Composition of relations: Let $r$ be a relation from set $A$ into set $B$, and let $s$ be a relation from set $B$ into set $C$. The composition of $r$ with $s$, written $rs$, is the set of pairs $(a,c) \in A \times C$, where $(a, c) \in rs$ if and only if there exists $b \in B$ such that $(a,b) \in r$ and $(b,c) \in s$. Graphs can be used to visualize relations, by having the arrows pointing from vertex $a$ to vertex $b$ if $arb$. \section{Properties of relations} Reflexive relation: $ara$ for all $a \in A$ Antisymmetric relation: if $arb$ and $a \neq b$, then $bra$ is true Symmetric relation: if $arb$, then $bra$ is true Transitive relation: if $arb$ and $brc$, then $arc$ A partial ordering on $A$ is a relation on set $A$ that is reflexive, antisymmetric and transitive. A equivalence relation is a relation that is reflexive, symmetric and transitive. Hasse diagram: A graph can be used to represent a partial ordered set. The reflexive property is implied in every element and loops are not drawn. The antisymmetry property is described by putting the first element go below the second. By the transitive property, edges connecting from one element to a second element and another connecting the second to a third means a third connection can be made from the first to the third. Congruence modulo \begin{gather*} a = b \pmod{n} \Leftrightarrow n | (a - b) \end{gather*} Equivalence class (Set of all elements that are equal to a given element under a relation) \begin{gather*} a \in A,\ r\text{ is an equivalence relation}\\ c(a) = \{b \in A | arb\} \end{gather*} \section{Matrices of relations} Adjacency matrix: Let $A = \{a_1, a_2,..., a_m\}$ and $B = \{b_1, b_2,..., b_n\}$. Let $r$ be a relation from $A$ into $B$. Then $r$ can be represented by the $m \times n$ matrix $R$ defined by \begin{gather*} R_ij = \begin{cases} 1 & \text{if}\ a_i r b_j \\ 0 & \text{otherwise} \end{cases} \end{gather*} Composition as matrix multiplication: If $R_1$ and $R_2$ are the adjacency matrices of $r_1$ and $r_2$ respectively, then the product $R_1 R_2$ using Boolean arithmetic represents the composition $r_1 r_2$. \section{Transitive closure} Transitive closure: Let $A$ be a set and $r$ be a relation on $A$. The transitive closure of $r$, $r^+$, is the smallest transitive relation that contains $r$ as a subset. If $r$ is a relation on a set $A$ and $|A| = n$, then the transitive closure of $r$ is the union of the first $n$ powers of $r$. \begin{gather*} r^+ = r \cup r^2 \cup r^3 \cup \cdots \cup r^n \end{gather*} Matrix math can be used to find $R^+$. \begin{gather*} R^+ = R + R^2 + \cdots + R^n \end{gather*} \section{Functions} A function from set $A$ into set $B$ is a relation from $A$ into $B$ such that each element of $A$ is related to exactly one element of $B$. \begin{gather*} f: A \rightarrow B \end{gather*} Set $A$ is the domain and set $B$ is the codomain. If $f(a) = b$, the image of $a$ is b. Range: The union of all images of a function's domain. It is a subset of the codomain. A function can have the domain of a Cartesian product, in which case it is denoted $C: A \times B \rightarrow C$ defined by $C(a, b)$ \section{Properties of functions} Injective function (one-to-one function): Distinct elements in domain map to distinct elements in codomain \begin{gather*} \forall a, b \in A, \\ f(a) = f(b) \Rightarrow a = b\ and \\ a \neq b \Rightarrow f(a) \neq f(b) \end{gather*} Surjective function (onto function): Its range is equal to its codomain Bijective function (one-to-one, onto function): Is both injective and surjective. Countable set: A set that has the same cardinality as a subset of natural numbers. Pigeonhole principle: Let $f$ be a function from a finite set $X$ into a finite set $Y$. If $n \geq 1$ and $|X| > n|Y|$, then there exists an element of $Y$ that is the image under $f$ of at least $n + 1$ elements of $X$. \section{Function composition} Equality of functions \begin{gather*} function\ f = function\ g \Leftrightarrow (\forall x)_A(f(x) = g(x)) \end{gather*} Two functions with different domains cannot be equal, even if the functions have the same equation. Also, two functions can be equal even if they have different equations. Composition of functions \begin{gather*} g(f(x)) = (g \circ f)(x) \end{gather*} Associativity of functions \begin{gather*} h \circ (g \circ f) = (h \circ g) \circ f \end{gather*} Composition of injections and surjections \begin{itemize} \item If $f: A \rightarrow B$ and $g: B \rightarrow C$ are injections, then $g \circ f: A \rightarrow C$ is an injection. \item If $f: A \rightarrow B$ and $g: B \rightarrow C$ are surjections, then $g \circ f: A \rightarrow C$ is a surjection. \end{itemize} The identity function on $A$ is a function from $A$ onto $A$, such that \\ $(\forall a)_A(i(a) = a)$. Inverse function: Let $f: A \rightarrow B$ and $g: B \rightarrow A$. If $g \circ f = i_A$ and $f \circ g = i_B$, then $g = f^{-1}$. $f^{-1}$ exists if and only if $f$ is a bijection. \section{Recursion} Telescoping form: The expanded recursive form for $f(n)$ Iteration: Starting with the basis, $f(0)$, and working your way up to $f(n)$. Recursive definition of the binomial coefficient \begin{gather*} \binom{n}{0} = 1 \\ \binom{n}{n} = 1 \\ \binom{n}{k} = \binom{n-1}{k} + \binom{n-1}{k-1}, n > k > 0 \end{gather*} Recursive polynomial expression \begin{gather*} p(0) \in \mathbb{Z} \\ p(n) = p(n-1)x + a, a \in \mathbb{Z} \end{gather*} Fibonacci sequence \begin{gather*} F_0 = 1, F_1 = 1 \\ F_k = F_{k-2} + F_{k-1}, k \geq 2 \end{gather*} Closed form expression: An expression that does not have a runaway number of operations. For example, the number of operations in $\sum_{k=1}^{n} k$ grows indefinitely with $n$, but $(n(n+1)) / 2$ has 3 operations no matter what $n$ is. Sequence/discrete function: maps natural numbers into a certain set \section{Solving recurrence relations} Homogeneous recurrence relation: $S(k) + C_1 S(k-1) + \cdots + C_n S(k-n) = 0$ Characteristic equation: $a^n + C_1 a^{n-1} + \cdots + C_{n-1} a + C_n = 0$ Solving homogeneous finite order linear relations \begin{enumerate} \item Use the characteristic equation to solve for $a$, the roots. \item Write the general solution of the recurrence relation and replace $a_n$. The general solution is $S(k) = b_1 a_1^k + b_2 a_2^k + \cdots + b_n a_n^k$. If $a_j$ a double root, then $b_j a_j^k$ is replaced with $(b_{j0}+b_{j1} k)a_j^k$. \item Find $b_n$ using the given initial conditions $S(n)$ and substitute those into the general solution. \end{enumerate} Solving nonhomogeneous finite order linear relations \begin{enumerate} \item Write the associated homogeneous relation by changing the right-hand side to $0$ and solve for $a$. The solution for the associated homogeneous relation is $S^{(h)}(k)$. \item Use the table to find the form of $S^{(p)}(k)$, given the form of the right-hand side. \\ \begin{tabular}{c|c} Right-hand side, $f(x)$ & $S^{(p)}(k)$ \\ $q$ & $d$ \\ $q_0 + q_q k$ & $d_0 + d_1 k$ \\ $qa^k$ & $da^k$ \end{tabular} \item Substitute $S^{(p)}(k)$ into the recurrence relation to find the unknown coefficients. \item Add the homogeneous relation and the unknown coefficients, \\ $S^{(h)}(k) + S^{(k)}$ and use the initial conditions to solve for $b$. \end{enumerate} \section{Generating function} Generating function of a sequence $S$ with terms $S_n$ \begin{gather*} G(S;z) = \sum_{n=0}^{\infty} S_n z^n = S_0+S_1 z+S_2 z^2+S_3 z^3+\cdots \end{gather*} Solving a recurrence relation using generating functions To solve $S(n) - 2S(n-1) - 3S(n-2) = 0, n \geq 2$, with $S(0) = 3$ and $S(1) = 1$ \begin{enumerate} \item Translate the recurrence relation into an equation about generating functions. \\ Let $V(n) = S(n) - 2S (n - 1) - 3S (n - 2), n \geq 2$, with $V(0) = 0$ and $V(1) = 0$. \begin{equation*} G(V;z) = 0 + 0z +\sum_{n=2}^{\infty} (S(n) - 2S (n - 1) - 3S (n - 2)) z^n= 0 \end{equation*} \item Solve for the generating function of the unknown sequence, \\ $G(S;z) = \sum_{n=0}^{\infty} S_n z^n$. \begin{equation*} 0 =\sum_{n=2}^{\infty} {S(n) z^n-2} \left(\sum_{n=2}^{\infty} S(n-1) z^n\right)-3\left(\sum_{n=2}^{\infty} S(n-2) z^n\right) \end{equation*} The three sums can be written as \begin{equation*} \begin{split} \sum_{n=2}^{\infty} S_n z^n &=\sum_{n=0}^{\infty} S_n z^n - S(0)-S(1)z\\ &= G(S;z)-3-z \end{split} \end{equation*} \begin{equation*} \begin{split} \sum_{n=2}^{\infty} S(n-1) z^n &=z\left(\sum_{n=2}^{\infty} S(n-1) z^{n-1}\right)\\ & =z\left(\sum_{n=1}^{\infty} S(n) z^n\right)\\ & = z\left(\sum_{n=0}^{\infty} S(n) z^n-S(0)\right)\\ &= z(G(S;z)-3) \end{split} \end{equation*} \begin{equation*} \begin{split} \sum_{n=2}^{\infty} S(n-2) z^n & = z^2\left(\sum_{n=2}^{\infty} S(n-2) z^{n-2}\right)\\ & =z^2G(S;z) \end{split} \end{equation*} Therefore \begin{equation*} \begin{split} &(G(S;z)-3-z)-2z(G(S;z)-3)-3z^2G(S;z)=0\\ &G(S;z)=\frac{3-5z}{1-2z-3z^2} \end{split} \end{equation*} \item Determine the sequence whose generating function is the one from Step 2. Note that $S(n) = ba^n, G(S;z) = \frac{b}{1-az}$ \\ Apply partial fractions decomposition to get \begin{equation*} G(S;z)= \frac{1}{1-3z}+ \frac{2}{1+z} \end{equation*} \begin{equation*} S(n)=3^n + 2(-1)^n \end{equation*} \end{enumerate} \section{Strategies for proofs} \begin{itemize} \item Draw or write a few examples and notice patterns \item Find the extreme cases, such as an empty set and a set with the maximum cardinality \item Consider the contrapositive \item If the statement is one of the following, it's probably solved through induction: it has a "$n \in \mathbb{N}$" variable, it has a lot of numbers, and/or you can start with a basis case and build upon that with inductive reasoning \item If it's not solved using induction, try to approach a direct proof and then a proof by contradiction and decide which is the simplest one \item For complex proofs that branch out into multiple scenarios, break it down by explaining the various cases \item Read back the proof to make sure a classmate would understand it and that all variables are defined \end{itemize} \section{Graphs} Graph: $G = (V, E)$, where $V =$ nonempty set of vertices, and $E \subseteq V \times V$ Incident = connecting ("$X$ is incident to vertices $a$ and $b$") Adjacent vertices = Vertex pair connected by an edge The edges in an undirected graph have no direction, while the edges in a directed graph have direction. Multigraph: A graph that is permitted to have two or more edges connecting the same vertices. Complete undirected graph: Each vertices are connected to one another, denoted $K_n$. $K_n$ has ${n \choose 2}$ edges. Path length: The number of edges in an edge list. \begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{ c | c | c | c | c } & Open/Closed & A sequence of edges & Distinct edges & Distinct vertices \\ \hline Walk & Open or closed & \checkmark & & \\ Trail & Open or closed & \checkmark & & \\ Circuit & Closed & \checkmark & \checkmark & \\ Path & Open or closed & \checkmark & \checkmark & \checkmark\\ Cycle & Closed & \checkmark & \checkmark & \checkmark \end{tabular} } \end{center} Degree \begin{itemize} \item The degree is the number of edges connected to the vertex. \item The outdegree is the number of edges that initiate at a vertex and indegree is the number of edges that terminate at a vertex. \item The sum of degrees of a graph is $2|E|$ \end{itemize} Regular graph: A graph where each vertex has the same number of degrees. A $n$-regular graph is one where each vertex is $n$ degrees. Subgraph: A subgraph is a graph $G$ formed from a subset of the vertices and edges of $G$. All edges of the subgraph must connect a vertex pair. Induced subgraphs contain a subset of vertices from the original graph, and all edges connecting pairs of vertices in that subset. \begin{figure}[htb] \caption{A graph and an induced subgraph} \centering \includegraphics[width=0.6\textwidth]{InducedSubgraph_900.png} \end{figure} Spanning subgraphs contain all vertices of the original graph. \begin{figure}[htb] \caption{A graph and a spanning subgraph} \centering \includegraphics[width=0.6\textwidth]{fig-subgraphs.png} \end{figure} Isomorphic graph \begin{itemize} \item A graph $(V', E')$ is isomorphic to $(V, E)$ if there exists a bijection $f: V \rightarrow V'$ such that $(v_i, v_j) \in E$ iff $(f(v_i), f(v_j)) \in E'$ \item Basically, two isomorphic graphs have the same structure but may have different labels for the vertices \item Check if two graphs are isomorphic by mapping corresponding vertices of $V$ to those of $E$ \end{itemize} Tournament graph: A directed graph that has zero loops and there is only one edge between any two vertices. Complete/round-robin tournament graph: A tournament graph where all pairs of distinct vertices are connected by one edge. Single-elimination tournament graph: A tournament graph where: one vertex called the champion has no edge terminating at it, every other vertex is the terminal vertex of exactly one edge, and there is a path from the champion vertex to every other vertex. \begin{figure}[htb] \caption{A single-elimination tournament graph} \centering \includegraphics[width=0.4\textwidth]{fig-mlb-1983-9-1.png} \end{figure} Bipartite graph: A graph is bipartite if its vertices can be divided into two disjoint and independent sets. Visually, there exists a pair of sets of where a line between the pair cuts all edges. \begin{figure}[htb] \caption{A bipartite graph and its two vertex sets} \centering \includesvg[width=0.4\textwidth]{Simple-bipartite-graph.svg} \end{figure} Degree sequence: A non-increasing sequence of its vertex degrees. \begin{figure}[htb!] \caption{The degree sequence of this graph is $(4, 3, 2, 2, 1)$} \centering \includegraphics[width=0.5\textwidth]{fig-degrees-example-9-1.png} \end{figure} Graphic sequence: A degree sequence is graphic if there exists an undirected graph that has the same degree sequence. As an example, $(3, 3, 1)$ is not a graphic sequence. The complement of a graph $G = (V, E)$ is $\bar{G} = (V, K - E)$. Eulerian path \begin{itemize} \item A trail that visits every edge in a graph exactly once \item A graph has an Eulerian path iff it has exactly 0 or 2 vertices with odd degrees \item Eulerian circuit: A closed Eulerian path \item A graph has an Eulerian circuit iff the degrees of all vertices are even \end{itemize} Hamilton path \begin{itemize} \item A path that visits each vertex exactly once \item Hamilton cycle/Hamiltonian graph: A Hamilton path that is a cycle \end{itemize} Distance \begin{itemize} \item Distance: Number of edges in a shortest path connecting a pair of vertices \item Eccentricity: Greatest distance between a given vertex and any other vertex \item Center: The vertex with the minimum eccentricity \item Radius: Minimum eccentricity of any vertex \item Diameter: Maximum eccentricity of any vertex \end{itemize} \section{Trees} Tree \begin{itemize} \item An undirected graph that is connected and has no cycles \item A disconnected graph that has no cycles is a forest \end{itemize} Dijkstra's algorithm: An algorithm for finding the shortest path(s) between all or a pair of vertices in a graph. Image credits: Al Doerr, Ken Levasseur - Applied Discrete Structures \end{document}
{ "alphanum_fraction": 0.6513133555, "author": null, "avg_line_length": 31.5034965035, "converted": null, "ext": "tex", "file": null, "hexsha": "16c02f5d22325d2c13452c444b937edd909dc2cc", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ad0b206cb2d2abd57fb00d7827cdb0a4a78083c9", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "MakotoE/math301-notes", "max_forks_repo_path": "math-301.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ad0b206cb2d2abd57fb00d7827cdb0a4a78083c9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "MakotoE/math301-notes", "max_issues_repo_path": "math-301.tex", "max_line_length": 428, "max_stars_count": null, "max_stars_repo_head_hexsha": "ad0b206cb2d2abd57fb00d7827cdb0a4a78083c9", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "MakotoE/math301-notes", "max_stars_repo_path": "math-301.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8856, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 27030 }
#include"Character.h" #include "Kobieta.h" #include "Julek.h" #include <boost/test/unit_test.hpp> BOOST_AUTO_TEST_SUITE(CharacterTest) Kobieta K(32,32,5); Julek J(56,78,8); BOOST_AUTO_TEST_CASE(KobietaInitializingLivesChecking) { BOOST_CHECK_EQUAL(K.getLives(),5); } BOOST_AUTO_TEST_CASE(KobietaInitializingStartPosition) { BOOST_CHECK_EQUAL(K.getDX(),32); BOOST_CHECK_EQUAL(K.getDY(),32); } BOOST_AUTO_TEST_CASE(JulekInitilizingStartPositions) { BOOST_CHECK_EQUAL(J.getDX(),56); BOOST_CHECK_EQUAL(J.getDY(),78); } BOOST_AUTO_TEST_CASE(JulekNumberOfLivesAtStart) { BOOST_CHECK_EQUAL(J.getLives(),3); } BOOST_AUTO_TEST_SUITE_END()
{ "alphanum_fraction": 0.7128851541, "author": null, "avg_line_length": 21.6363636364, "converted": null, "ext": "cpp", "file": null, "hexsha": "b88553c881680ca591224f688ec13fcb11bd15cf", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "929f5dbdfa0aa3883e375d6d518195c944353ab5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kiimchi/DEADlines", "max_forks_repo_path": "test/CharacterTests.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "929f5dbdfa0aa3883e375d6d518195c944353ab5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kiimchi/DEADlines", "max_issues_repo_path": "test/CharacterTests.cpp", "max_line_length": 57, "max_stars_count": null, "max_stars_repo_head_hexsha": "929f5dbdfa0aa3883e375d6d518195c944353ab5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kiimchi/DEADlines", "max_stars_repo_path": "test/CharacterTests.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 183, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 714 }
#! /usr/bin/env python """ Phase_function class definition """ from __future__ import division, print_function __author__ = 'Julien Milli' __all__ = [] import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d class Phase_function(object): """ This class represents the scattering phase function (spf). It contains the attribute phase_function_calc that implements either a single Henyey Greenstein phase function, a double Heyney Greenstein, or any custom function (data interpolated from an input list of phi, spf(phi)). """ def __init__(self, spf_dico={'name': 'HG', 'g': 0., 'polar': False}): """ Constructor of the Phase_function class. It checks whether the spf_dico contains a correct name and sets the attribute phase_function_calc Parameters ---------- spf_dico : dictionnary Parameters describing the scattering phase function to be implemented. By default, an isotropic phase function is implemented. It should at least contain the key "name" chosen between 'HG' (single Henyey Greenstein), 'DoubleHG' (double Heyney Greenstein) or 'interpolated' (custom function). The parameter "polar" enables to switch on the polarisation (if set to True, the default is False). In this case it assumes a Rayleigh polarised fraction (1-(cos phi)^2) / (1+(cos phi)^2). """ if not isinstance(spf_dico,dict): msg = 'The parameters describing the phase function must be a ' \ 'Python dictionnary' raise TypeError(msg) if 'name' not in spf_dico.keys(): msg = 'The dictionnary describing the phase function must contain' \ ' the key "name"' raise TypeError(msg) self.type = spf_dico['name'] if 'polar' not in spf_dico.keys(): self.polar = False elif not isinstance(spf_dico['polar'], bool): msg = 'The dictionnary describing the polarisation must be a ' \ 'boolean' raise TypeError(msg) else: self.polar = spf_dico['polar'] if self.type == 'HG': self.phase_function_calc = HenyeyGreenstein_SPF(spf_dico) elif self.type == 'DoubleHG': self.phase_function_calc = DoubleHenyeyGreenstein_SPF(spf_dico) elif self.type == 'interpolated': self.phase_function_calc = Interpolated_SPF(spf_dico) else: msg = 'Type of phase function not understood: {0:s}' raise TypeError(msg.format(self.type)) def compute_phase_function_from_cosphi(self, cos_phi): """ Compute the phase function at (a) specific scattering scattering angle(s) phi. The argument is not phi but cos(phi) for optimization reasons. Parameters ---------- cos_phi : float or array cosine of the scattering angle(s) at which the scattering function must be calculated. """ phf = self.phase_function_calc.compute_phase_function_from_cosphi(cos_phi) if self.polar: return (1-cos_phi**2)/(1+cos_phi**2) * phf else: return phf def print_info(self): """ Prints information on the type and parameters of the scattering phase function. """ print('----------------------------') print('Phase function parameters') print('----------------------------') print('Type of phase function: {0:s}'.format(self.type)) print('Linear polarisation: {0!r}'.format(self.polar)) self.phase_function_calc.print_info() def plot_phase_function(self): """ Plots the scattering phase function """ phi = np.arange(0, 180, 1) phase_func = self.compute_phase_function_from_cosphi(np.cos(np.deg2rad(phi))) if self.polar: phase_func = (1-np.cos(np.deg2rad(phi))**2) / \ (1+np.cos(np.deg2rad(phi))**2) * phase_func plt.close(0) plt.figure(0) plt.plot(phi, phase_func) plt.xlabel('Scattering phase angle in degrees') plt.ylabel('Scattering phase function') plt.grid() plt.xlim(0, 180) plt.show() class HenyeyGreenstein_SPF(object): """ Implementation of a scattering phase function with a single Henyey Greenstein function. """ def __init__(self, spf_dico={'g':0.}): """ Constructor of a Heyney Greenstein phase function. Parameters ---------- spf_dico : dictionnary containing the key "g" (float) g is the Heyney Greenstein coefficient and should be between -1 (backward scattering) and 1 (forward scattering). """ # it must contain the key "g" if 'g' not in spf_dico.keys(): raise TypeError('The dictionnary describing a Heyney Greenstein ' 'phase function must contain the key "g"') # the value of "g" must be a float or a list of floats elif not isinstance(spf_dico['g'], (float, int)): raise TypeError('The key "g" of a Heyney Greenstein phase function' ' dictionnary must be a float or an integer') self.set_phase_function(spf_dico['g']) def set_phase_function(self, g): """ Set the value of g """ if g >= 1: print('Warning the Henyey Greenstein parameter is greater than or ' 'equal to 1') print('The value was changed from {0:6.2f} to 0.99'.format(g)) g = 0.99 elif g <= -1: print('Warning the Henyey Greenstein parameter is smaller than or ' 'equal to -1') print('The value was changed from {0:6.2f} to -0.99'.format(g)) g = -0.99 self.g = float(g) def compute_phase_function_from_cosphi(self, cos_phi): """ Compute the phase function at (a) specific scattering scattering angle(s) phi. The argument is not phi but cos(phi) for optimization reasons. Parameters ---------- cos_phi : float or array cosine of the scattering angle(s) at which the scattering function must be calculated. """ return 1./(4*np.pi)*(1-self.g**2)/(1+self.g**2-2*self.g*cos_phi)**(3./2.) def print_info(self): """ Prints the value of the HG coefficient """ print('Heynyey Greenstein coefficient: {0:.2f}'.format(self.g)) class DoubleHenyeyGreenstein_SPF(object): """ Implementation of a scattering phase function with a double Henyey Greenstein function. """ def __init__(self, spf_dico={'g': [0.5,-0.3], 'weight': 0.7}): """ """ # it must contain the key "g" if 'g' not in spf_dico.keys(): raise TypeError('The dictionnary describing a Heyney Greenstein' ' phase function must contain the key "g"') # the value of "g" must be a list of floats elif not isinstance(spf_dico['g'],(list,tuple,np.ndarray)): raise TypeError('The key "g" of a Heyney Greenstein phase ' 'function dictionnary must be a list of floats') # it must contain the key "weight" if 'weight' not in spf_dico.keys(): raise TypeError('The dictionnary describing a multiple Henyey ' 'Greenstein phase function must contain the ' 'key "weight"') # the value of "weight" must be a list of floats elif not isinstance(spf_dico['weight'], (float, int)): raise TypeError('The key "weight" of a Heyney Greenstein phase ' 'function dictionnary must be a float (weight of ' 'the first HG coefficient between 0 and 1)') elif spf_dico['weight']<0 or spf_dico['weight']>1: raise ValueError('The key "weight" of a Heyney Greenstein phase' ' function dictionnary must be between 0 and 1. It' ' corresponds to the weight of the first HG ' 'coefficient') if len(spf_dico['g']) != 2: raise TypeError('The keys "weight" and "g" must contain the same' ' number of elements') self.g = spf_dico['g'] self.weight = spf_dico['weight'] def print_info(self): """ Prints the value of the HG coefficients and weights """ print('Heynyey Greenstein first component : coeff {0:.2f} , ' 'weight {1:.1f}%'.format(self.g[0], self.weight*100)) print('Heynyey Greenstein second component: coeff {0:.2f} , ' 'weight {1:.1f}%'.format(self.g[1], (1-self.weight)*100.)) def compute_singleHG_from_cosphi(self, g, cos_phi): """ Compute a single Heyney Greenstein phase function at (a) specific scattering scattering angle(s) phi. The argument is not phi but cos(phi) for optimization reasons. Parameters ---------- g : float Heyney Greenstein coefficient cos_phi : float or array cosine of the scattering angle(s) at which the scattering function must be calculated. """ return 1./(4*np.pi)*(1-g**2)/(1+g**2-2*g*cos_phi)**(3./2.) def compute_phase_function_from_cosphi(self,cos_phi): """ Compute the phase function at (a) specific scattering scattering angle(s) phi. The argument is not phi but cos(phi) for optimization reasons. Parameters ---------- cos_phi : float or array cosine of the scattering angle(s) at which the scattering function must be calculated. """ return self.weight * self.compute_singleHG_from_cosphi(self.g[0], cos_phi) + \ (1-self.weight) * self.compute_singleHG_from_cosphi(self.g[1], cos_phi) class Interpolated_SPF(object): """ Custom implementation of a scattering phase function by providing a list of scattering phase angles and corresponding values of the phase function. """ def __init__(self, spf_dico={'phi':np.array([ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162]), 'spf':np.array([3.580, 0.703, 0.141, 0.0489, 0.0233, 0.0136, 0.0091, 0.0069, 0.0056,0.005])}): """ Constructor of the Interpolated_SPF class. It checks whether the spf_dico contains the keys 'phi' and 'spf' Parameters ---------- spf_dico : dict dictionnary containing the keys "phi" (list of scattering angles) and "spf" (list of corresponding scattering phase function values) """ for key in ['phi','spf']: if key not in spf_dico.keys(): raise TypeError('The dictionnary describing a ' '"interpolated" phase function must contain ' 'the key "{0:s}"'.format(key)) elif isinstance(spf_dico[key],(list,tuple,np.ndarray)): raise TypeError('The key "{0:s}" of a "interpolated" phase' ' function dictionnary must be a list, array' ' or tuple'.format(key)) if len(spf_dico['phi']) != len(spf_dico['spf']): raise TypeError('The keys "phi" and "spf" must contain the same' ' number of elements') self.interpolate_phase_function(spf_dico) def print_info(self): """ Prints the information of the spf """ phi = np.linspace(0, 180, 19) spf = self.compute_phase_function_from_cosphi(np.cos(np.deg2rad(phi))) print('Scattering angle: ', phi) print('Interpolated scattering phase function: ', spf) def interpolate_phase_function(self, spf_dico): """ Creates the function that returns the scattering phase function based on the scattering angle by interpolating the values given in the dictionnary. Parameters ---------- spf_dico : dict dictionnary containing the keys "phi" (list of scattering angles) and "spf" (list of corresponding scattering phase function values) """ self.interpolation_function = interp1d(spf_dico['phi'], spf_dico['spf'], kind='cubic', bounds_error=False, fill_value=np.nan) def compute_phase_function_from_cosphi(self, cos_phi): """ Compute the phase function at (a) specific scattering scattering angle(s) phi. The argument is not phi but cos(phi) for optimization reasons. Parameters ---------- cos_phi : float or array cosine of the scattering angle(s) at which the scattering function must be calculated. """ return self.interpolation_function(cos_phi)
{ "alphanum_fraction": 0.561549101, "author": null, "avg_line_length": 41.0059701493, "converted": null, "ext": "py", "file": null, "hexsha": "d9f99a5168ebd697c80e27e81f7d99ca54bba56a", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "349875f51358948589ccfb4d94808472e7fc90cb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "FaustineAstro/VIP", "max_forks_repo_path": "vip_hci/metrics/phase_function.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "349875f51358948589ccfb4d94808472e7fc90cb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "FaustineAstro/VIP", "max_issues_repo_path": "vip_hci/metrics/phase_function.py", "max_line_length": 90, "max_stars_count": null, "max_stars_repo_head_hexsha": "349875f51358948589ccfb4d94808472e7fc90cb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "FaustineAstro/VIP", "max_stars_repo_path": "vip_hci/metrics/phase_function.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3090, "path": null, "reason": "import numpy,from scipy", "repo": null, "save_path": null, "sha": null, "size": 13737 }
[STATEMENT] lemma empty_fv_exists_fun: "fv t = {} \<Longrightarrow> \<exists>f X. t = Fun f X" [PROOF STATE] proof (prove) goal (1 subgoal): 1. fv t = {} \<Longrightarrow> \<exists>f X. t = Fun f X [PROOF STEP] by (cases t) auto
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Stateful_Protocol_Composition_and_Typing_Messages", "hexsha": null, "include": null, "lang": null, "length": 1, "llama_tokens": 94, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import pathlib import torch import numpy as np import gvision_utils import img_utils class PretrainedModel(): def __init__(self, modelname): model_pt = model_class_dict[modelname](pretrained=True) #model.eval() self.model = nn.DataParallel(model_pt.cuda()) self.model.eval() self.mu = torch.Tensor([0.485, 0.456, 0.406]).float().view(1, 3, 1, 1).cuda() self.sigma = torch.Tensor([0.229, 0.224, 0.225]).float().view(1, 3, 1, 1).cuda() def predict(self, x): out = (x - self.mu) / self.sigma return self.model(out) def forward(self, x): out = (x - self.mu) / self.sigma return self.model(out) def __call__(self, x): return self.predict(x) class GVisionModel: """Returns 2 simulated logits corresponding to correct class and incorrect class, such that attack has signal for optimization""" def __init__(self, correct_labelset=['cat'], exp_name="saved_img", save_location="output", loss_margin=1): self.labelset = correct_labelset self.save_location = save_location self.exp_name = exp_name self.counter = 0 self.loss_margin = loss_margin self.best_loss = np.inf pathlib.Path(save_location).mkdir(parents=True, exist_ok=True) with open(f"{save_location}/labels.txt", "w") as f: f.write(", ".join(correct_labelset)) def __call__(self, x): return self.predict(x) def predict(self, x): x = x.cpu().detach().numpy() if x.shape[0] != 1: raise AssertionError("Batch size must be 1") x = x[0] results = gvision_utils.gvision_classify_numpy(x) print(str(results)) self.counter += 1 logits = self._compute_logits(results) current_loss = self.loss(y=None, logits=logits) if current_loss < self.best_loss: self.best_loss = current_loss img = img_utils.convert_to_pillow(x) img = img_utils.write_text_to_img(img, f"Iter: {self.counter}\n{results}") img.save(f"{self.save_location}/{self.exp_name}_{self.counter}.png") return torch.tensor(logits) def _compute_logits(self, results): """Confidence score of the correct and 2nd best class""" matching_results = results.match(self.labelset) other_results = results.match(self.labelset, inverse=True) print('-----------------') print("Matching results:") print(matching_results) print("\nNot-matching results:") print(other_results) print('-----------------') print(f"Top label: {matching_results.top_label} - {matching_results.top_score}") print(f"2nd best: {other_results.top_label} - {other_results.top_score}\n\n") return np.array([[matching_results.top_score + self.loss_margin, other_results.top_score]]) def loss(self, y, logits, targeted=False, loss_type='margin_loss'): """ Implements the margin loss (difference between the correct and 2nd best class). """ if logits.shape[0] != 1: raise AssertionError("Batch size must be 1") if targeted: raise AssertionError("Targeted attack should be done by modifying the logit computation procedure") if loss_type != 'margin_loss': raise AssertionError("Gvision model only supports margin loss") correct, second_best = logits[0] return np.array([correct - second_best])
{ "alphanum_fraction": 0.6284820921, "author": null, "avg_line_length": 33.5047619048, "converted": null, "ext": "py", "file": null, "hexsha": "34899bd8875a66fd881bc6443c43c2a74b2aa7c3", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9a6bad08745db7c569557e0844f7c33dc2e7a492", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kubic71/sparse-rs", "max_forks_repo_path": "gvision_model.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "9a6bad08745db7c569557e0844f7c33dc2e7a492", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kubic71/sparse-rs", "max_issues_repo_path": "gvision_model.py", "max_line_length": 133, "max_stars_count": null, "max_stars_repo_head_hexsha": "9a6bad08745db7c569557e0844f7c33dc2e7a492", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kubic71/sparse-rs", "max_stars_repo_path": "gvision_model.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 815, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3518 }
__precompile__() module DataProcessingHierarchyTools using ProgressMeter using Glob using MAT using StrTables, LaTeX_Entities using StableHashes import StableHashes.shash import Base:filter,show, convert include("types.jl") const def = LaTeX_Entities.default function git_annex() cmd = nothing try cmd = readchomp(`which git-annex`) catch ee end return cmd end const levels = ["subjects", "subject", "day", "session", "array", "channel", "cell"] const level_patterns = [r"[0-9A-Za-z]*", r"[0-9]{8}", r"session[0-9]{2}", r"array[0-9]{2}", r"channel[0-9]{3}", r"cell[0-9]{2}"] const level_patterns_s = ["*", "*", "[0-9]*", "session[0-9]*", "array[0-9]*", "channel[0-9]*", "cell[0-9]*"] #TODO: Find a way to (automatically) track dependency betweeen types. We could also just do this manually, i.e. have a depends_on function that lists types that a given type depends (directly) on. E.g. depends_on(Raster) = PSTH. If a type does not depend on any other types, just return an empty list depends_on(::Type{T}) where T <: DPHData = DataType[] """ Returns a list of files on which `args` depends. """ dependencies(args::T) where T <: DPHDataArgs = String[] function check_args(X1::T, X2::T) where T <: DPHDataArgs matches = true for f in fieldnames(T) x1 = getfield(X1,f) x2 = getfield(X2,f) if typeof(x1) <: AbstractVector if length(x1) != length(x2) matches = false break end for (_x1,_x2) in zip(x1,x2) if _x1 != _x2 matches = false break end end if !matches break end else if !(x1 ≈ x2) matches = false break end end end matches end function check_args(X::T, args...) where T <: DPHDataArgs matches = true for (a0,a1) in zip(fieldnames(T), args) x = getfield(X, a0) if typeof(x) <: AbstractVector if length(x) != length(a1) matches = false break end for (x1,x2) in zip(x,a1) if x1 != x2 matches = false break end end if !matches break end else if !(x ≈ a1) matches = false break end end end matches end function get_numbers(ss::String) filter(isdigit,ss) end shortnames = Dict("subjects" => x->"", "subject" => x->x[1:1], "day" => x->x, "session" => x->"s$(get_numbers(x))", "array" => x->"a$(get_numbers(x))", "channel" => x->"g$(get_numbers(x))", "cell" => x->"c$(get_numbers(x))") function get_shortname(ss::String) this_level = level(ss) this_idx = findfirst(l->this_level==l, levels) _r, _p = splitdir(ss) qs = String[] while !isempty(_p) && this_idx > 1 # Stop once we have reached the bottom level push!(qs, shortnames[level(_p)](_p)) _r, _p = splitdir(_r) this_idx -= 1 # Keep track of where we are in the hierarchy end join(reverse(qs),"") end """ Returns the full name of the current level """ function get_fullname(ss=pwd()) this_level = level(ss) this_idx = findfirst(l->this_level==l, levels) pp = [get_level_name(this_level, ss)] while this_idx > 2 this_idx -= 1 this_level = levels[this_idx] push!(pp, get_level_name(this_level, ss)) end joinpath(reverse(pp)...) end level() = level(pwd()) level(::Type{DPHData}) = error("Not implemented") filename(::Type{DPHData}) = error("Not implemented") datatype(::Type{DPHDataArgs}) = error("Not implemented") datatype(X::T) where T <: DPHDataArgs = datatype(T) version(X::DPHDataArgs) = "UNKNOWN" function filename(args::T) where T <: DPHDataArgs fname = filename(datatype(T)) h = string(shash(args),base=16) bn, ext = splitext(fname) fname = join([bn, "_", h, ext]) fname end function filename(args::Vector{T}) where T <: DPHDataArgs fname = filename(datatype(T)) h = string(shash(args),base=16) bn, ext = splitext(fname) fname = join([bn, "_", h, ext]) fname end matname(::Type{DPHData}) = error("Not implemented") function load(args::T) where T <: DPHDataArgs fname = filename(args) load(datatype(T), fname) end """ Returns `true` if the data described by `args` has already been computed """ function computed(args::T) where T <: DPHDataArgs fname = filename(args) return isfile(fname) || islink(fname) end function load(args::Vector{T}) where T <: DPHDataArgs fname = filename(args) if isfile(fname) return load(Vector{datatype(T)}, fname) end error("No data exist with the specified arguments") end function plot_data(::Type{T},fig, args::T2, plotargs::T3) where T <: DPHData where T2 <: DPHDataArgs where T3 <: DPHPlotArgs error("Not implemented") end export DPHData, level, filename, plot_data, datatype, BootstrappedDataArgs """ Get the level of the directory represented by `cwd`. """ function level(cwd::String) numbers = map(x->first(string(x)), 0:9) dd = last(splitdir(cwd)) ss = string(rstrip(dd, numbers)) if isempty(ss) # only numbers; assume this is a date ss = "day" elseif dd == ss #no numbers, this is the subject name ss = "subject" end return ss end """ Get the path of `dir` up `target_level` """ function get_level_path(target_level::String, dir=pwd()) parts = splitpath(dir) new_parts = String[] target_idx = findfirst(levels .== target_level) for p in parts this_idx = findfirst(levels .== level(p)) if this_idx <= target_idx push!(new_parts, p) end end return joinpath(new_parts...) end """ Get the name of the requested level """ function get_level_name(target_level::String, dir=pwd()) this_level = level(dir) this_idx = findfirst(l->this_level==l, levels) target_idx = findfirst(l->target_level==l, levels) i = this_idx cwd = dir pp = "" while i >= target_idx cwd, pp = splitdir(cwd) i -= 1 end pp end """ Get all directories corresponding to `target_level` under the current hierarchy """ function get_level_dirs(target_level::String, dir=pwd()) dirs = cd(dir) do this_level = level() this_idx = findfirst(l->this_level==l, levels) target_idx = findfirst(l->target_level==l, levels) if target_idx == this_idx dirs = ["."] elseif target_idx < this_idx rel_path = process_level(target_level, dir) dirs = glob(joinpath(rel_path, "..", level_patterns_s[target_idx])) else dirs = glob(joinpath(level_patterns_s[this_idx+1:target_idx]...)) end end if dir != pwd() dirs = [joinpath(dir, d) for d in dirs] end dirs end """ Get all unique level directories contained in `dirs`. """ function get_level_dirs(level::String, dirs::Vector{String}) cwd = pwd() level_dirs = String[] for c in dirs cd(c) do cd(process_level(level)) _dir = pwd() _dir = strip(replace(_dir, cwd => ""), '/') if !(_dir in level_dirs) push!(level_dirs, _dir) end end end level_dirs end """ Returns the relative path to an object of type `T`, using `dir` as the starting point. """ function process_level(::Type{T}, dir=pwd();kvs...) where T <: DPHData target_level = level(T) process_level(target_level, dir;kvs...) end """ Returns the path relative to `dir` of the level `target_level`. """ function process_level(target_level::String, dir=pwd();kvs...) # get the current level this_level = level(dir) this_idx = findfirst(l->this_level==l, levels) target_idx = findfirst(l->target_level==l, levels) for lidx in [this_idx, target_idx] if lidx == nothing || !(0 < lidx <= length(levels)) throw(ArgumentError("Unknown level")) end end pl = ["."] append!(pl, [".." for i in 1:(this_idx - target_idx)]) dirstring = joinpath(pl...) end """ Returns the path of the current directory relative to `level_dir` """ function get_relative_path(level_dir::String,dir=pwd()) this_level = level(dir) this_idx = findfirst(l->this_level==l, levels) target_idx = findfirst(l->level_dir==l, levels) if target_idx+1 <= this_idx parts = String[] for ii in target_idx+1:this_idx push!(parts, get_level_name(levels[ii],dir)) end elseif target_idx == this_idx parts = ["."] else error("Target level must be below the current level") end joinpath(parts...) end """ Process each directory in `dirs`, creating an object of type `T`, and returning a concatenation of those objects. """ function process_dirs(::Type{T}, dirs::Vector{String}, args...;kvs...) where T <: DPHData pp = cd(dirs[1]) do T(args...;kvs...) end @showprogress 1 "Processing dirs..." for d in dirs[2:end] _pp = cd(d) do T(args...;kvs...) end pp = hcat(pp, _pp) end return pp end """ Visit each directory in `dirs`, instantiating type `T` with argumments `args` and keyword arguments `kvs`. Note that this function is similar to `process_dirs`, except unlike that function, `visit_dirs` does not return any results. """ function visit_dirs(::Type{T}, dirs::Vector{String}, args...;kvs...) where T <: DPHData skipped_dirs = String[] @showprogress 1 "Processing dirs..." for d in dirs cd(d) do try T(args...;kvs...) catch push!(skipped_dirs, d) end end end skipped_dirs end function visit_dirs(func::Function, dirs::Vector{String}, args...;kvs...) skipped_dirs = String[] @showprogress 1 "Processing dirs..." for d in dirs cd(d) do # try func(args...;kvs...) # catch # push!(skipped_dirs, d) # end end end skipped_dirs end """ Process each directory in `dirs` by running the function `func`. """ function process_dirs(func::Function, dirs::Vector{String}, args...;kvs...) Q = Vector{Any}(undef, length(dirs)) @showprogress 1 "Processing dirs..." for (i,d) in enumerate(dirs) Q[i] = cd(d) do func(args...;kvs...) end end Q end """ Load an object of type `T` from the current directory, using additional constructor arguments `args`. """ function load(::Type{T}, args...;kvs...) where T <: DPHData dir = process_level(T) qq = cd(dir) do if isfile(filename(T)) qq = T() else qq = zero(T) end qq end qq end """ Convert some unicde symbols to their latex equivalent before saving """ function sanitise(ss::String) oo = String[] for (i,c) in enumerate(ss) sc = string(c) m = matches(def, sc) if !isempty(m) push!(oo, m[1]) else push!(oo, sc) end end join(oo, "") end function desanitise(ss::String) oo = String[] ss_split = split(ss, "_") for _ss in ss_split _nss = filter(!isdigit, _ss) ll = lookupname(def, _nss) if isempty(ll) ll = _nss end lln = replace(_ss, _nss => ll) push!(oo, lln) end join(oo, "_") end function save(X::T, fname=filename(X.args);overwrite=false) where T <: DPHData if isfile(fname) && !overwrite error("File $fname already exists") end Q = convert(Dict{String,Any}, X) MAT.matwrite(fname,Q) end function save(X::Vector{T}) where T <: DPHData fname = filename([x.args for x in X]) Q = Dict{String, Dict{String,Any}}() for (i,x) in enumerate(X) Q["idx$(i)"] = convert(Dict{String,Any}, x) end MAT.matwrite(fname,Q) end function Base.convert(::Type{Dict{String,Any}}, X::T) where T <: Union{DPHData, DPHDataArgs} Q = Dict{String,Any}() for f in fieldnames(T) v = getfield(X, f) fs = string(f) fs = sanitise(fs) if typeof(v) <: AbstractVector Q[fs] = collect(v) elseif typeof(v) <: DPHDataArgs Q[fs] = convert(Dict{String,Any}, v) elseif typeof(v) <: Symbol Q[fs] = string(v) else Q[fs] = v end end Q end function load(::Type{T}, fname=filename(T)) where T <: DPHData if git_annex() != nothing is_annex = false try run(pipeline(`$(git_annex()) status`, stdout=devnull, stderr=devnull)) is_annex = true catch end if is_annex run(`$(git_annex()) get $fname`) end end if isfile(fname) Q = MAT.matread(fname) else error("No data exist with the specified arguments") end convert(T, Q) end function load(::Type{Vector{T}}, fname=filename{T}) where T <: DPHData Q = MAT.matread(fname) X = Vector{T}(undef, length(Q)) for (k,v) in Q ii = parse(Int64, replace(k,"idx" => "")) X[ii] = convert(T,v) end X end function Base.convert(::Type{T}, Q::Dict{String, Any}) where T <: Union{DPHData, DPHDataArgs} a_args = Any[] for f in fieldnames(T) tt = fieldtype(T,f) fs = string(f) fs = sanitise(fs) if tt <: Symbol vv = Symbol(Q[fs]) elseif tt <: DPHDataArgs #handle arguments to other types here vv = convert(tt, Q[fs]) elseif tt <: AbstractVector && !(typeof(Q[fs]) <: AbstractVector) vv = eltype(tt)[Q[fs];] elseif tt <: AbstractMatrix && !(typeof(Q[fs]) <: AbstractMatrix) vv = fill(Q[fs], 1,1) else vv = Q[fs] end push!(a_args, vv) end T(a_args...) end """ Return those directories among `dirs` where `func`,using arguments `args`, returns true for an object whose arguments are compatible with `typeargs`. """ function Base.filter(func::Function, typeargs::T2, dirs::Vector{String}, args...;verbose=0) where T2 <: DPHDataArgs outdirs = String[] for d in dirs cd(d) do fname = filename(typeargs) if isfile(fname) aa = false try X = load(typeargs) aa = func(X,args...) catch if verbose > 0 rethrow() end end if aa push!(outdirs, d) end end end end outdirs end function Base.show(io::IO, X::T) where T <: DPHDataArgs compact = get(io, :compact, false) print("$T with fields:\n") for f in fieldnames(T) v = getfield(X, f) if fieldtype(T, f) <: String print(io, "\t$f = \"$v\"\n") elseif fieldtype(T, f) <: Symbol print(io, "\t$f = :$v\n") else print(io, "\t$f = $v\n") end end end function findargs(::Type{T}, cwd=pwd();kvs...) where T <: DPHData fname = filename(T) fname = replace(fname, ".mat" => "*.mat") arg_type = fieldtype(T, :args) targs = filter(k->k[1] in fieldnames(arg_type), kvs) args = cd(cwd) do files = glob(fname) args = arg_type[] for (ii,ff) in enumerate(files) X = load(T, ff) found = true for (k,v) in targs if getfield(X.args,k) != v found = false break end end if found push!(args, X.args) end end args end args end function reset!(args::DPHDataArgs) reset!(filename(args)) end """ Unlocks the file pointed to be `fname` if it is under git annex control, so that it can be overwritten. """ function reset!(fname::String) if islink(fname) if git_annex != nothing run(`$(git_annex()) get $fname`) run(`$(git_annex()) unlock $fname`) end end end end # module
{ "alphanum_fraction": 0.5686418394, "author": null, "avg_line_length": 26.9851485149, "converted": null, "ext": "jl", "file": null, "hexsha": "f7a27164a0a9b1a7c6c0194dd38421d282d5b10d", "include": null, "lang": "Julia", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8553b5217e5c3348f7b0776797b47efcd7d2e84e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "grero/DataProcessingHierarchyTools.jl", "max_forks_repo_path": "src/DataProcessingHierarchyTools.jl", "max_issues_count": 15, "max_issues_repo_head_hexsha": "8553b5217e5c3348f7b0776797b47efcd7d2e84e", "max_issues_repo_issues_event_max_datetime": "2020-06-22T00:40:51.000Z", "max_issues_repo_issues_event_min_datetime": "2018-07-24T03:50:25.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "grero/DataProcessingHierarchyTools.jl", "max_issues_repo_path": "src/DataProcessingHierarchyTools.jl", "max_line_length": 300, "max_stars_count": null, "max_stars_repo_head_hexsha": "8553b5217e5c3348f7b0776797b47efcd7d2e84e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "grero/DataProcessingHierarchyTools.jl", "max_stars_repo_path": "src/DataProcessingHierarchyTools.jl", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4446, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 16353 }
(* Default settings (from HsToCoq.Coq.Preamble) *) Generalizable All Variables. Unset Implicit Arguments. Set Maximal Implicit Insertion. Unset Strict Implicit. Unset Printing Implicit Defensive. Require Coq.Program.Tactics. Require Coq.Program.Wf. (* Preamble *) Require String BitTerminationProofs. Import String.StringSyntax. (* Converted imports: *) Require Coq.Init.Peano. Require Data.Bits. Require Data.Foldable. Require Data.Maybe. Require Data.Tuple. Require GHC.Base. Require GHC.Err. Require GHC.Num. Require HsToCoq.Err. Require HsToCoq.Wf. Require IntWord. Import Data.Bits.Notations. Import GHC.Base.Notations. Import GHC.Num.Notations. (* Converted type declarations: *) Definition Prefix := IntWord.Int%type. Definition Nat := IntWord.Word%type. Definition Mask := IntWord.Int%type. Definition Key := IntWord.Int%type. Definition BitMap := IntWord.Word%type. Inductive IntSet : Type := | Bin : Prefix -> Mask -> IntSet -> IntSet -> IntSet | Tip : Prefix -> BitMap -> IntSet | Nil : IntSet. Inductive Stack : Type := | Push : Prefix -> IntSet -> Stack -> Stack | Nada : Stack. Instance Default__IntSet : HsToCoq.Err.Default IntSet := HsToCoq.Err.Build_Default _ Nil. Instance Default__Stack : HsToCoq.Err.Default Stack := HsToCoq.Err.Build_Default _ Nada. (* Midamble *) (** Additional definitions for termination proof *) Fixpoint size_nat (t : IntSet) : nat := match t with | Bin _ _ l r => S (size_nat l + size_nat r)%nat | Tip _ bm => 0 | Nil => 0 end. Require Omega. Ltac termination_by_omega := Coq.Program.Tactics.program_simpl; simpl;Omega.omega. (* Converted value declarations: *) Definition intFromNat := IntWord.intFromWord. Definition natFromInt := IntWord.wordFromInt. Definition branchMask : Prefix -> Prefix -> Mask := fun p1 p2 => intFromNat (IntWord.highestBitMask (Data.Bits.xor (natFromInt p1) (natFromInt p2))). Definition maskW : Nat -> Nat -> Prefix := fun i m => intFromNat (i Data.Bits..&.(**) (Data.Bits.xor (Data.Bits.complement (m GHC.Num.- #1)) m)). Definition mask : IntWord.Int -> Mask -> Prefix := fun i m => maskW (natFromInt i) (natFromInt m). Definition zero : IntWord.Int -> Mask -> bool := fun i m => ((natFromInt i) Data.Bits..&.(**) (natFromInt m)) GHC.Base.== #0. Definition link : Prefix -> IntSet -> Prefix -> IntSet -> IntSet := fun p1 t1 p2 t2 => let m := branchMask p1 p2 in let p := mask p1 m in if zero p1 m : bool then Bin p m t1 t2 else Bin p m t2 t1. Definition nomatch : IntWord.Int -> Prefix -> Mask -> bool := fun i p m => (mask i m) GHC.Base./= p. Fixpoint insertBM (arg_0__ : Prefix) (arg_1__ : BitMap) (arg_2__ : IntSet) : IntSet := match arg_0__, arg_1__, arg_2__ with | kx, bm, (Bin p m l r as t) => if nomatch kx p m : bool then link kx (Tip kx bm) p t else if zero kx m : bool then Bin p m (insertBM kx bm l) r else Bin p m l (insertBM kx bm r) | kx, bm, (Tip kx' bm' as t) => if kx' GHC.Base.== kx : bool then Tip kx' (bm Data.Bits..|.(**) bm') else link kx (Tip kx bm) kx' t | kx, bm, Nil => Tip kx bm end. Definition shorter : Mask -> Mask -> bool := fun m1 m2 => (natFromInt m1) GHC.Base.> (natFromInt m2). Program Fixpoint union (arg_0__ arg_1__ : IntSet) {measure (size_nat arg_0__ + size_nat arg_1__)} : IntSet := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), (Bin p2 m2 l2 r2 as t2) => let union2 := if Bool.Sumbool.sumbool_of_bool (nomatch p1 p2 m2) then link p1 t1 p2 t2 else if Bool.Sumbool.sumbool_of_bool (zero p1 m2) then Bin p2 m2 (union t1 l2) r2 else Bin p2 m2 l2 (union t1 r2) in let union1 := if Bool.Sumbool.sumbool_of_bool (nomatch p2 p1 m1) then link p1 t1 p2 t2 else if Bool.Sumbool.sumbool_of_bool (zero p2 m1) then Bin p1 m1 (union l1 t2) r1 else Bin p1 m1 l1 (union r1 t2) in if Bool.Sumbool.sumbool_of_bool (shorter m1 m2) then union1 else if Bool.Sumbool.sumbool_of_bool (shorter m2 m1) then union2 else if Bool.Sumbool.sumbool_of_bool (p1 GHC.Base.== p2) then Bin p1 m1 (union l1 l2) (union r1 r2) else link p1 t1 p2 t2 | (Bin _ _ _ _ as t), Tip kx bm => insertBM kx bm t | (Bin _ _ _ _ as t), Nil => t | Tip kx bm, t => insertBM kx bm t | Nil, t => t end. Solve Obligations with (termination_by_omega). Local Definition Semigroup__IntSet_op_zlzlzgzg__ : IntSet -> IntSet -> IntSet := union. Program Instance Semigroup__IntSet : GHC.Base.Semigroup IntSet := fun _ k__ => k__ {| GHC.Base.op_zlzlzgzg____ := Semigroup__IntSet_op_zlzlzgzg__ |}. Local Definition Monoid__IntSet_mappend : IntSet -> IntSet -> IntSet := _GHC.Base.<<>>_. Definition empty : IntSet := Nil. Definition unions {f : Type -> Type} `{Data.Foldable.Foldable f} : f IntSet -> IntSet := fun xs => Data.Foldable.foldl' union empty xs. Local Definition Monoid__IntSet_mconcat : list IntSet -> IntSet := unions. Local Definition Monoid__IntSet_mempty : IntSet := empty. Program Instance Monoid__IntSet : GHC.Base.Monoid IntSet := fun _ k__ => k__ {| GHC.Base.mappend__ := Monoid__IntSet_mappend ; GHC.Base.mconcat__ := Monoid__IntSet_mconcat ; GHC.Base.mempty__ := Monoid__IntSet_mempty |}. (* Skipping all instances of class `Data.Data.Data', including `Data.IntSet.InternalWord.Data__IntSet' *) (* Skipping all instances of class `GHC.Exts.IsList', including `Data.IntSet.InternalWord.IsList__IntSet' *) Fixpoint equal (arg_0__ arg_1__ : IntSet) : bool := match arg_0__, arg_1__ with | Bin p1 m1 l1 r1, Bin p2 m2 l2 r2 => andb (m1 GHC.Base.== m2) (andb (p1 GHC.Base.== p2) (andb (equal l1 l2) (equal r1 r2))) | Tip kx1 bm1, Tip kx2 bm2 => andb (kx1 GHC.Base.== kx2) (bm1 GHC.Base.== bm2) | Nil, Nil => true | _, _ => false end. Local Definition Eq___IntSet_op_zeze__ : IntSet -> IntSet -> bool := fun t1 t2 => equal t1 t2. Fixpoint nequal (arg_0__ arg_1__ : IntSet) : bool := match arg_0__, arg_1__ with | Bin p1 m1 l1 r1, Bin p2 m2 l2 r2 => orb (m1 GHC.Base./= m2) (orb (p1 GHC.Base./= p2) (orb (nequal l1 l2) (nequal r1 r2))) | Tip kx1 bm1, Tip kx2 bm2 => orb (kx1 GHC.Base./= kx2) (bm1 GHC.Base./= bm2) | Nil, Nil => false | _, _ => true end. Local Definition Eq___IntSet_op_zsze__ : IntSet -> IntSet -> bool := fun t1 t2 => nequal t1 t2. Program Instance Eq___IntSet : GHC.Base.Eq_ IntSet := fun _ k__ => k__ {| GHC.Base.op_zeze____ := Eq___IntSet_op_zeze__ ; GHC.Base.op_zsze____ := Eq___IntSet_op_zsze__ |}. Definition indexOfTheOnlyBit := IntWord.indexOfTheOnlyBit. Definition lowestBitMask : Nat -> Nat := fun x => x Data.Bits..&.(**) GHC.Num.negate x. Definition revNat : Nat -> Nat := fun x1 => let 'x2 := ((IntWord.shiftRWord x1 #1) Data.Bits..&.(**) #6148914691236517205) Data.Bits..|.(**) (IntWord.shiftLWord (x1 Data.Bits..&.(**) #6148914691236517205) #1) in let 'x3 := ((IntWord.shiftRWord x2 #2) Data.Bits..&.(**) #3689348814741910323) Data.Bits..|.(**) (IntWord.shiftLWord (x2 Data.Bits..&.(**) #3689348814741910323) #2) in let 'x4 := ((IntWord.shiftRWord x3 #4) Data.Bits..&.(**) #1085102592571150095) Data.Bits..|.(**) (IntWord.shiftLWord (x3 Data.Bits..&.(**) #1085102592571150095) #4) in let 'x5 := ((IntWord.shiftRWord x4 #8) Data.Bits..&.(**) #71777214294589695) Data.Bits..|.(**) (IntWord.shiftLWord (x4 Data.Bits..&.(**) #71777214294589695) #8) in let 'x6 := ((IntWord.shiftRWord x5 #16) Data.Bits..&.(**) #281470681808895) Data.Bits..|.(**) (IntWord.shiftLWord (x5 Data.Bits..&.(**) #281470681808895) #16) in (IntWord.shiftRWord x6 #32) Data.Bits..|.(**) (IntWord.shiftLWord x6 #32). Program Definition foldrBits {a} : IntWord.Int -> (IntWord.Int -> a -> a) -> a -> Nat -> a := fun prefix f z bitmap => let go := HsToCoq.Wf.wfFix2 Coq.Init.Peano.lt (fun arg_0__ arg_1__ => IntWord.wordTonat arg_0__) _ (fun arg_0__ arg_1__ go => match arg_0__, arg_1__ with | num_2__, acc => if Bool.Sumbool.sumbool_of_bool (num_2__ GHC.Base.== #0) then acc else match arg_0__, arg_1__ with | bm, acc => let bitmask := lowestBitMask bm in let bi := indexOfTheOnlyBit bitmask in go (Data.Bits.xor bm bitmask) ((f ((prefix GHC.Num.+ (#64 GHC.Num.- #1)) GHC.Num.- bi)) acc) end end) in go (revNat bitmap) z. Admit Obligations. Definition foldr {b : Type} : (Key -> b -> b) -> b -> IntSet -> b := fun f z => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | z', Nil => z' | z', Tip kx bm => foldrBits kx f z' bm | z', Bin _ _ l r => go (go z' r) l end in fun t => match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then go (go z l) r else go (go z r) l | _ => go z t end. Definition toAscList : IntSet -> list Key := foldr cons nil. Local Definition Ord__IntSet_compare : IntSet -> IntSet -> comparison := fun s1 s2 => GHC.Base.compare (toAscList s1) (toAscList s2). Local Definition Ord__IntSet_op_zl__ : IntSet -> IntSet -> bool := fun x y => Ord__IntSet_compare x y GHC.Base.== Lt. Local Definition Ord__IntSet_op_zlze__ : IntSet -> IntSet -> bool := fun x y => Ord__IntSet_compare x y GHC.Base./= Gt. Local Definition Ord__IntSet_op_zg__ : IntSet -> IntSet -> bool := fun x y => Ord__IntSet_compare x y GHC.Base.== Gt. Local Definition Ord__IntSet_op_zgze__ : IntSet -> IntSet -> bool := fun x y => Ord__IntSet_compare x y GHC.Base./= Lt. Local Definition Ord__IntSet_max : IntSet -> IntSet -> IntSet := fun x y => if Ord__IntSet_op_zlze__ x y : bool then y else x. Local Definition Ord__IntSet_min : IntSet -> IntSet -> IntSet := fun x y => if Ord__IntSet_op_zlze__ x y : bool then x else y. Program Instance Ord__IntSet : GHC.Base.Ord IntSet := fun _ k__ => k__ {| GHC.Base.op_zl____ := Ord__IntSet_op_zl__ ; GHC.Base.op_zlze____ := Ord__IntSet_op_zlze__ ; GHC.Base.op_zg____ := Ord__IntSet_op_zg__ ; GHC.Base.op_zgze____ := Ord__IntSet_op_zgze__ ; GHC.Base.compare__ := Ord__IntSet_compare ; GHC.Base.max__ := Ord__IntSet_max ; GHC.Base.min__ := Ord__IntSet_min |}. (* Skipping all instances of class `GHC.Show.Show', including `Data.IntSet.InternalWord.Show__IntSet' *) (* Skipping all instances of class `GHC.Read.Read', including `Data.IntSet.InternalWord.Read__IntSet' *) (* Skipping all instances of class `Control.DeepSeq.NFData', including `Data.IntSet.InternalWord.NFData__IntSet' *) Definition bin : Prefix -> Mask -> IntSet -> IntSet -> IntSet := fun arg_0__ arg_1__ arg_2__ arg_3__ => match arg_0__, arg_1__, arg_2__, arg_3__ with | _, _, l, Nil => l | _, _, Nil, r => r | p, m, l, r => Bin p m l r end. Definition tip : Prefix -> BitMap -> IntSet := fun arg_0__ arg_1__ => match arg_0__, arg_1__ with | _, num_2__ => if num_2__ GHC.Base.== #0 : bool then Nil else match arg_0__, arg_1__ with | kx, bm => Tip kx bm end end. Fixpoint deleteBM (arg_0__ : Prefix) (arg_1__ : BitMap) (arg_2__ : IntSet) : IntSet := match arg_0__, arg_1__, arg_2__ with | kx, bm, (Bin p m l r as t) => if nomatch kx p m : bool then t else if zero kx m : bool then bin p m (deleteBM kx bm l) r else bin p m l (deleteBM kx bm r) | kx, bm, (Tip kx' bm' as t) => if kx' GHC.Base.== kx : bool then tip kx (bm' Data.Bits..&.(**) Data.Bits.complement bm) else t | _, _, Nil => Nil end. Program Fixpoint difference (arg_0__ arg_1__ : IntSet) {measure (size_nat arg_0__ + size_nat arg_1__)} : IntSet := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), (Bin p2 m2 l2 r2 as t2) => let difference2 := if Bool.Sumbool.sumbool_of_bool (nomatch p1 p2 m2) then t1 else if Bool.Sumbool.sumbool_of_bool (zero p1 m2) then difference t1 l2 else difference t1 r2 in let difference1 := if Bool.Sumbool.sumbool_of_bool (nomatch p2 p1 m1) then t1 else if Bool.Sumbool.sumbool_of_bool (zero p2 m1) then bin p1 m1 (difference l1 t2) r1 else bin p1 m1 l1 (difference r1 t2) in if Bool.Sumbool.sumbool_of_bool (shorter m1 m2) then difference1 else if Bool.Sumbool.sumbool_of_bool (shorter m2 m1) then difference2 else if Bool.Sumbool.sumbool_of_bool (p1 GHC.Base.== p2) then bin p1 m1 (difference l1 l2) (difference r1 r2) else t1 | (Bin _ _ _ _ as t), Tip kx bm => deleteBM kx bm t | (Bin _ _ _ _ as t), Nil => t | (Tip kx bm as t1), t2 => let fix differenceTip arg_12__ := match arg_12__ with | Bin p2 m2 l2 r2 => if Bool.Sumbool.sumbool_of_bool (nomatch kx p2 m2) then t1 else if Bool.Sumbool.sumbool_of_bool (zero kx m2) then differenceTip l2 else differenceTip r2 | Tip kx2 bm2 => if Bool.Sumbool.sumbool_of_bool (kx GHC.Base.== kx2) then tip kx (bm Data.Bits..&.(**) Data.Bits.complement bm2) else t1 | Nil => t1 end in differenceTip t2 | Nil, _ => Nil end. Solve Obligations with (termination_by_omega). Definition op_zrzr__ : IntSet -> IntSet -> IntSet := fun m1 m2 => difference m1 m2. Notation "'_\\_'" := (op_zrzr__). Infix "\\" := (_\\_) (at level 99). (* Skipping definition `Data.IntSet.InternalWord.fromListConstr' *) (* Skipping definition `Data.IntSet.InternalWord.intSetDataType' *) Definition null : IntSet -> bool := fun arg_0__ => match arg_0__ with | Nil => true | _ => false end. Definition size : IntSet -> IntWord.Int := let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | acc, Bin _ _ l r => go (go acc l) r | acc, Tip _ bm => acc GHC.Num.+ IntWord.bitcount #0 bm | acc, Nil => acc end in go #0. Definition bitmapOfSuffix : IntWord.Int -> BitMap := fun s => IntWord.shiftLWord #1 s. Definition suffixBitMask : IntWord.Int := #63. Definition suffixOf : IntWord.Int -> IntWord.Int := fun x => x Data.Bits..&.(**) suffixBitMask. Definition bitmapOf : IntWord.Int -> BitMap := fun x => bitmapOfSuffix (suffixOf x). Definition prefixBitMask : IntWord.Int := Data.Bits.complement suffixBitMask. Definition prefixOf : IntWord.Int -> Prefix := fun x => x Data.Bits..&.(**) prefixBitMask. Definition member : Key -> IntSet -> bool := fun x => let fix go arg_0__ := match arg_0__ with | Bin p m l r => if nomatch x p m : bool then false else if zero x m : bool then go l else go r | Tip y bm => andb (prefixOf x GHC.Base.== y) ((bitmapOf x Data.Bits..&.(**) bm) GHC.Base./= #0) | Nil => false end in go. Definition notMember : Key -> IntSet -> bool := fun k => negb GHC.Base.∘ member k. Definition highestBitSet : Nat -> IntWord.Int := fun x => indexOfTheOnlyBit (IntWord.highestBitMask x). Fixpoint unsafeFindMax (arg_0__ : IntSet) : option Key := match arg_0__ with | Nil => None | Tip kx bm => Some (kx GHC.Num.+ highestBitSet bm) | Bin _ _ _ r => unsafeFindMax r end. Definition lookupLT : Key -> IntSet -> option Key := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | def, Bin p m l r => if nomatch x p m : bool then if x GHC.Base.< p : bool then unsafeFindMax def else unsafeFindMax r else if zero x m : bool then go def l else go l r | def, Tip kx bm => let maskLT := (bitmapOf x GHC.Num.- #1) Data.Bits..&.(**) bm in if prefixOf x GHC.Base.> kx : bool then Some (kx GHC.Num.+ highestBitSet bm) else if andb (prefixOf x GHC.Base.== kx) (maskLT GHC.Base./= #0) : bool then Some (kx GHC.Num.+ highestBitSet maskLT) else unsafeFindMax def | def, Nil => unsafeFindMax def end in let j_12__ := go Nil t in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then go r l else go Nil r else j_12__ | _ => j_12__ end. Definition lowestBitSet : Nat -> IntWord.Int := fun x => indexOfTheOnlyBit (lowestBitMask x). Fixpoint unsafeFindMin (arg_0__ : IntSet) : option Key := match arg_0__ with | Nil => None | Tip kx bm => Some (kx GHC.Num.+ lowestBitSet bm) | Bin _ _ l _ => unsafeFindMin l end. Definition lookupGT : Key -> IntSet -> option Key := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | def, Bin p m l r => if nomatch x p m : bool then if x GHC.Base.< p : bool then unsafeFindMin l else unsafeFindMin def else if zero x m : bool then go r l else go def r | def, Tip kx bm => let maskGT := (GHC.Num.negate (IntWord.shiftLWord (bitmapOf x) #1)) Data.Bits..&.(**) bm in if prefixOf x GHC.Base.< kx : bool then Some (kx GHC.Num.+ lowestBitSet bm) else if andb (prefixOf x GHC.Base.== kx) (maskGT GHC.Base./= #0) : bool then Some (kx GHC.Num.+ lowestBitSet maskGT) else unsafeFindMin def | def, Nil => unsafeFindMin def end in let j_12__ := go Nil t in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then go Nil l else go l r else j_12__ | _ => j_12__ end. Definition lookupLE : Key -> IntSet -> option Key := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | def, Bin p m l r => if nomatch x p m : bool then if x GHC.Base.< p : bool then unsafeFindMax def else unsafeFindMax r else if zero x m : bool then go def l else go l r | def, Tip kx bm => let maskLE := ((IntWord.shiftLWord (bitmapOf x) #1) GHC.Num.- #1) Data.Bits..&.(**) bm in if prefixOf x GHC.Base.> kx : bool then Some (kx GHC.Num.+ highestBitSet bm) else if andb (prefixOf x GHC.Base.== kx) (maskLE GHC.Base./= #0) : bool then Some (kx GHC.Num.+ highestBitSet maskLE) else unsafeFindMax def | def, Nil => unsafeFindMax def end in let j_12__ := go Nil t in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then go r l else go Nil r else j_12__ | _ => j_12__ end. Definition lookupGE : Key -> IntSet -> option Key := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | def, Bin p m l r => if nomatch x p m : bool then if x GHC.Base.< p : bool then unsafeFindMin l else unsafeFindMin def else if zero x m : bool then go r l else go def r | def, Tip kx bm => let maskGE := (GHC.Num.negate (bitmapOf x)) Data.Bits..&.(**) bm in if prefixOf x GHC.Base.< kx : bool then Some (kx GHC.Num.+ lowestBitSet bm) else if andb (prefixOf x GHC.Base.== kx) (maskGE GHC.Base./= #0) : bool then Some (kx GHC.Num.+ lowestBitSet maskGE) else unsafeFindMin def | def, Nil => unsafeFindMin def end in let j_12__ := go Nil t in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then go Nil l else go l r else j_12__ | _ => j_12__ end. Definition singleton : Key -> IntSet := fun x => Tip (prefixOf x) (bitmapOf x). Definition insert : Key -> IntSet -> IntSet := fun x => insertBM (prefixOf x) (bitmapOf x). Definition delete : Key -> IntSet -> IntSet := fun x => deleteBM (prefixOf x) (bitmapOf x). Program Fixpoint intersection (arg_0__ arg_1__ : IntSet) {measure (size_nat arg_0__ + size_nat arg_1__)} : IntSet := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), (Bin p2 m2 l2 r2 as t2) => let intersection2 := if Bool.Sumbool.sumbool_of_bool (nomatch p1 p2 m2) then Nil else if Bool.Sumbool.sumbool_of_bool (zero p1 m2) then intersection t1 l2 else intersection t1 r2 in let intersection1 := if Bool.Sumbool.sumbool_of_bool (nomatch p2 p1 m1) then Nil else if Bool.Sumbool.sumbool_of_bool (zero p2 m1) then intersection l1 t2 else intersection r1 t2 in if Bool.Sumbool.sumbool_of_bool (shorter m1 m2) then intersection1 else if Bool.Sumbool.sumbool_of_bool (shorter m2 m1) then intersection2 else if Bool.Sumbool.sumbool_of_bool (p1 GHC.Base.== p2) then bin p1 m1 (intersection l1 l2) (intersection r1 r2) else Nil | (Bin _ _ _ _ as t1), Tip kx2 bm2 => let fix intersectBM arg_11__ := match arg_11__ with | Bin p1 m1 l1 r1 => if Bool.Sumbool.sumbool_of_bool (nomatch kx2 p1 m1) then Nil else if Bool.Sumbool.sumbool_of_bool (zero kx2 m1) then intersectBM l1 else intersectBM r1 | Tip kx1 bm1 => if Bool.Sumbool.sumbool_of_bool (kx1 GHC.Base.== kx2) then tip kx1 (bm1 Data.Bits..&.(**) bm2) else Nil | Nil => Nil end in intersectBM t1 | Bin _ _ _ _, Nil => Nil | Tip kx1 bm1, t2 => let fix intersectBM arg_18__ := match arg_18__ with | Bin p2 m2 l2 r2 => if Bool.Sumbool.sumbool_of_bool (nomatch kx1 p2 m2) then Nil else if Bool.Sumbool.sumbool_of_bool (zero kx1 m2) then intersectBM l2 else intersectBM r2 | Tip kx2 bm2 => if Bool.Sumbool.sumbool_of_bool (kx1 GHC.Base.== kx2) then tip kx1 (bm1 Data.Bits..&.(**) bm2) else Nil | Nil => Nil end in intersectBM t2 | Nil, _ => Nil end. Solve Obligations with (termination_by_omega). Fixpoint subsetCmp (arg_0__ arg_1__ : IntSet) : comparison := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), Bin p2 m2 l2 r2 => let subsetCmpEq := match pair (subsetCmp l1 l2) (subsetCmp r1 r2) with | pair Gt _ => Gt | pair _ Gt => Gt | pair Eq Eq => Eq | _ => Lt end in let subsetCmpLt := if nomatch p1 p2 m2 : bool then Gt else if zero p1 m2 : bool then subsetCmp t1 l2 else subsetCmp t1 r2 in if shorter m1 m2 : bool then Gt else if shorter m2 m1 : bool then match subsetCmpLt with | Gt => Gt | _ => Lt end else if p1 GHC.Base.== p2 : bool then subsetCmpEq else Gt | _, _ => match arg_0__, arg_1__ with | Bin _ _ _ _, _ => Gt | Tip kx1 bm1, Tip kx2 bm2 => if kx1 GHC.Base./= kx2 : bool then Gt else if bm1 GHC.Base.== bm2 : bool then Eq else if (bm1 Data.Bits..&.(**) Data.Bits.complement bm2) GHC.Base.== #0 : bool then Lt else Gt | (Tip kx _ as t1), Bin p m l r => if nomatch kx p m : bool then Gt else if zero kx m : bool then match subsetCmp t1 l with | Gt => Gt | _ => Lt end else match subsetCmp t1 r with | Gt => Gt | _ => Lt end | Tip _ _, Nil => Gt | Nil, Nil => Eq | Nil, _ => Lt end end. Definition isProperSubsetOf : IntSet -> IntSet -> bool := fun t1 t2 => match subsetCmp t1 t2 with | Lt => true | _ => false end. Definition match_ : IntWord.Int -> Prefix -> Mask -> bool := fun i p m => (mask i m) GHC.Base.== p. Fixpoint isSubsetOf (arg_0__ arg_1__ : IntSet) : bool := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), Bin p2 m2 l2 r2 => if shorter m1 m2 : bool then false else if shorter m2 m1 : bool then andb (match_ p1 p2 m2) (if zero p1 m2 : bool then isSubsetOf t1 l2 else isSubsetOf t1 r2) else andb (p1 GHC.Base.== p2) (andb (isSubsetOf l1 l2) (isSubsetOf r1 r2)) | _, _ => match arg_0__, arg_1__ with | Bin _ _ _ _, _ => false | Tip kx1 bm1, Tip kx2 bm2 => andb (kx1 GHC.Base.== kx2) ((bm1 Data.Bits..&.(**) Data.Bits.complement bm2) GHC.Base.== #0) | (Tip kx _ as t1), Bin p m l r => if nomatch kx p m : bool then false else if zero kx m : bool then isSubsetOf t1 l else isSubsetOf t1 r | Tip _ _, Nil => false | Nil, _ => true end end. Program Fixpoint disjoint (arg_0__ arg_1__ : IntSet) {measure (size_nat arg_0__ + size_nat arg_1__)} : bool := match arg_0__, arg_1__ with | (Bin p1 m1 l1 r1 as t1), (Bin p2 m2 l2 r2 as t2) => let disjoint2 := if Bool.Sumbool.sumbool_of_bool (nomatch p1 p2 m2) then true else if Bool.Sumbool.sumbool_of_bool (zero p1 m2) then disjoint t1 l2 else disjoint t1 r2 in let disjoint1 := if Bool.Sumbool.sumbool_of_bool (nomatch p2 p1 m1) then true else if Bool.Sumbool.sumbool_of_bool (zero p2 m1) then disjoint l1 t2 else disjoint r1 t2 in if Bool.Sumbool.sumbool_of_bool (shorter m1 m2) then disjoint1 else if Bool.Sumbool.sumbool_of_bool (shorter m2 m1) then disjoint2 else if Bool.Sumbool.sumbool_of_bool (p1 GHC.Base.== p2) then andb (disjoint l1 l2) (disjoint r1 r2) else true | (Bin _ _ _ _ as t1), Tip kx2 bm2 => let fix disjointBM arg_11__ := match arg_11__ with | Bin p1 m1 l1 r1 => if Bool.Sumbool.sumbool_of_bool (nomatch kx2 p1 m1) then true else if Bool.Sumbool.sumbool_of_bool (zero kx2 m1) then disjointBM l1 else disjointBM r1 | Tip kx1 bm1 => if Bool.Sumbool.sumbool_of_bool (kx1 GHC.Base.== kx2) then (bm1 Data.Bits..&.(**) bm2) GHC.Base.== #0 else true | Nil => true end in disjointBM t1 | Bin _ _ _ _, Nil => true | Tip kx1 bm1, t2 => let fix disjointBM arg_18__ := match arg_18__ with | Bin p2 m2 l2 r2 => if Bool.Sumbool.sumbool_of_bool (nomatch kx1 p2 m2) then true else if Bool.Sumbool.sumbool_of_bool (zero kx1 m2) then disjointBM l2 else disjointBM r2 | Tip kx2 bm2 => if Bool.Sumbool.sumbool_of_bool (kx1 GHC.Base.== kx2) then (bm1 Data.Bits..&.(**) bm2) GHC.Base.== #0 else true | Nil => true end in disjointBM t2 | Nil, _ => true end. Solve Obligations with (termination_by_omega). Program Definition foldl'Bits {a} : IntWord.Int -> (a -> IntWord.Int -> a) -> a -> Nat -> a := fun prefix f z bitmap => let go := HsToCoq.Wf.wfFix2 Coq.Init.Peano.lt (fun arg_0__ arg_1__ => IntWord.wordTonat arg_0__) _ (fun arg_0__ arg_1__ go => match arg_0__, arg_1__ with | num_2__, acc => if Bool.Sumbool.sumbool_of_bool (num_2__ GHC.Base.== #0) then acc else match arg_0__, arg_1__ with | bm, acc => let bitmask := lowestBitMask bm in let bi := indexOfTheOnlyBit bitmask in go (Data.Bits.xor bm bitmask) (f acc (prefix GHC.Num.+ bi)) end end) in go bitmap z. Admit Obligations. Fixpoint filter (predicate : Key -> bool) (t : IntSet) : IntSet := let bitPred := fun kx bm bi => if predicate (kx GHC.Num.+ bi) : bool then bm Data.Bits..|.(**) bitmapOfSuffix bi else bm in match t with | Bin p m l r => bin p m (filter predicate l) (filter predicate r) | Tip kx bm => tip kx (foldl'Bits #0 (bitPred kx) #0 bm) | Nil => Nil end. Definition partition : (Key -> bool) -> IntSet -> (IntSet * IntSet)%type := fun predicate0 t0 => let fix go predicate t := let bitPred := fun kx bm bi => if predicate (kx GHC.Num.+ bi) : bool then bm Data.Bits..|.(**) bitmapOfSuffix bi else bm in match t with | Bin p m l r => let 'pair r1 r2 := go predicate r in let 'pair l1 l2 := go predicate l in pair (bin p m l1 r1) (bin p m l2 r2) | Tip kx bm => let bm1 := foldl'Bits #0 (bitPred kx) #0 bm in pair (tip kx bm1) (tip kx (Data.Bits.xor bm bm1)) | Nil => (pair Nil Nil) end in id (go predicate0 t0). Definition split : Key -> IntSet -> (IntSet * IntSet)%type := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | x', (Bin p m l r as t') => if match_ x' p m : bool then if zero x' m : bool then let 'pair lt gt := go x' l in pair lt (union gt r) else let 'pair lt gt := go x' r in pair (union lt l) gt else if x' GHC.Base.< p : bool then (pair Nil t') else (pair t' Nil) | x', (Tip kx' bm as t') => let lowerBitmap := bitmapOf x' GHC.Num.- #1 in let higherBitmap := Data.Bits.complement (lowerBitmap GHC.Num.+ bitmapOf x') in if kx' GHC.Base.> x' : bool then (pair Nil t') else if kx' GHC.Base.< prefixOf x' : bool then (pair t' Nil) else pair (tip kx' (bm Data.Bits..&.(**) lowerBitmap)) (tip kx' (bm Data.Bits..&.(**) higherBitmap)) | _, Nil => (pair Nil Nil) end in let j_21__ := let 'pair lt gt := go x t in pair lt gt in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then let 'pair lt gt := go x l in let lt' := union lt r in pair lt' gt else let 'pair lt gt := go x r in let gt' := union gt l in pair lt gt' else j_21__ | _ => j_21__ end. Definition splitMember : Key -> IntSet -> (IntSet * bool * IntSet)%type := fun x t => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | x', (Bin p m l r as t') => if match_ x' p m : bool then if zero x' m : bool then let 'pair (pair lt fnd) gt := go x' l in pair (pair lt fnd) (union gt r) else let 'pair (pair lt fnd) gt := go x' r in pair (pair (union lt l) fnd) gt else if x' GHC.Base.< p : bool then pair (pair Nil false) t' else pair (pair t' false) Nil | x', (Tip kx' bm as t') => let bitmapOfx' := bitmapOf x' in let lowerBitmap := bitmapOfx' GHC.Num.- #1 in let higherBitmap := Data.Bits.complement (lowerBitmap GHC.Num.+ bitmapOfx') in if kx' GHC.Base.> x' : bool then pair (pair Nil false) t' else if kx' GHC.Base.< prefixOf x' : bool then pair (pair t' false) Nil else let gt := tip kx' (bm Data.Bits..&.(**) higherBitmap) in let found := (bm Data.Bits..&.(**) bitmapOfx') GHC.Base./= #0 in let lt := tip kx' (bm Data.Bits..&.(**) lowerBitmap) in pair (pair lt found) gt | _, Nil => pair (pair Nil false) Nil end in let j_22__ := go x t in match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then if x GHC.Base.>= #0 : bool then let 'pair (pair lt fnd) gt := go x l in let lt' := union lt r in pair (pair lt' fnd) gt else let 'pair (pair lt fnd) gt := go x r in let gt' := union gt l in pair (pair lt fnd) gt' else j_22__ | _ => j_22__ end. Definition maxView : IntSet -> option (Key * IntSet)%type := fun t => let fix go arg_0__ := match arg_0__ with | Bin p m l r => let 'pair result r' := go r in pair result (bin p m l r') | Tip kx bm => let 'bi := highestBitSet bm in pair (kx GHC.Num.+ bi) (tip kx (bm Data.Bits..&.(**) Data.Bits.complement (bitmapOfSuffix bi))) | Nil => GHC.Err.error (GHC.Base.hs_string__ "maxView Nil") end in let j_12__ := Some (go t) in match t with | Nil => None | Bin p m l r => if m GHC.Base.< #0 : bool then let 'pair result l' := go l in Some (pair result (bin p m l' r)) else j_12__ | _ => j_12__ end. Definition minView : IntSet -> option (Key * IntSet)%type := fun t => let fix go arg_0__ := match arg_0__ with | Bin p m l r => let 'pair result l' := go l in pair result (bin p m l' r) | Tip kx bm => let 'bi := lowestBitSet bm in pair (kx GHC.Num.+ bi) (tip kx (bm Data.Bits..&.(**) Data.Bits.complement (bitmapOfSuffix bi))) | Nil => GHC.Err.error (GHC.Base.hs_string__ "minView Nil") end in let j_12__ := Some (go t) in match t with | Nil => None | Bin p m l r => if m GHC.Base.< #0 : bool then let 'pair result r' := go r in Some (pair result (bin p m l r')) else j_12__ | _ => j_12__ end. (* Skipping definition `Data.IntSet.InternalWord.deleteFindMin' *) (* Skipping definition `Data.IntSet.InternalWord.deleteFindMax' *) (* Skipping definition `Data.IntSet.InternalWord.findMin' *) (* Skipping definition `Data.IntSet.InternalWord.findMax' *) Definition deleteMin : IntSet -> IntSet := Data.Maybe.maybe Nil Data.Tuple.snd GHC.Base.∘ minView. Definition deleteMax : IntSet -> IntSet := Data.Maybe.maybe Nil Data.Tuple.snd GHC.Base.∘ maxView. Definition fromList : list Key -> IntSet := fun xs => let ins := fun t x => insert x t in Data.Foldable.foldl' ins empty xs. Definition toList : IntSet -> list Key := toAscList. Definition map : (Key -> Key) -> IntSet -> IntSet := fun f => fromList GHC.Base.∘ (GHC.Base.map f GHC.Base.∘ toList). Definition fold {b : Type} : (Key -> b -> b) -> b -> IntSet -> b := foldr. Program Definition foldr'Bits {a} : IntWord.Int -> (IntWord.Int -> a -> a) -> a -> Nat -> a := fun prefix f z bitmap => let go := HsToCoq.Wf.wfFix2 Coq.Init.Peano.lt (fun arg_0__ arg_1__ => IntWord.wordTonat arg_0__) _ (fun arg_0__ arg_1__ go => match arg_0__, arg_1__ with | num_2__, acc => if Bool.Sumbool.sumbool_of_bool (num_2__ GHC.Base.== #0) then acc else match arg_0__, arg_1__ with | bm, acc => let bitmask := lowestBitMask bm in let bi := indexOfTheOnlyBit bitmask in go (Data.Bits.xor bm bitmask) ((f ((prefix GHC.Num.+ (#64 GHC.Num.- #1)) GHC.Num.- bi)) acc) end end) in go (revNat bitmap) z. Admit Obligations. Definition foldr' {b : Type} : (Key -> b -> b) -> b -> IntSet -> b := fun f z => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | z', Nil => z' | z', Tip kx bm => foldr'Bits kx f z' bm | z', Bin _ _ l r => go (go z' r) l end in fun t => match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then go (go z l) r else go (go z r) l | _ => go z t end. Program Definition foldlBits {a} : IntWord.Int -> (a -> IntWord.Int -> a) -> a -> Nat -> a := fun prefix f z bitmap => let go := HsToCoq.Wf.wfFix2 Coq.Init.Peano.lt (fun arg_0__ arg_1__ => IntWord.wordTonat arg_0__) _ (fun arg_0__ arg_1__ go => match arg_0__, arg_1__ with | num_2__, acc => if Bool.Sumbool.sumbool_of_bool (num_2__ GHC.Base.== #0) then acc else match arg_0__, arg_1__ with | bm, acc => let bitmask := lowestBitMask bm in let bi := indexOfTheOnlyBit bitmask in go (Data.Bits.xor bm bitmask) (f acc (prefix GHC.Num.+ bi)) end end) in go bitmap z. Admit Obligations. Definition foldl {a : Type} : (a -> Key -> a) -> a -> IntSet -> a := fun f z => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | z', Nil => z' | z', Tip kx bm => foldlBits kx f z' bm | z', Bin _ _ l r => go (go z' l) r end in fun t => match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then go (go z r) l else go (go z l) r | _ => go z t end. Definition foldl' {a : Type} : (a -> Key -> a) -> a -> IntSet -> a := fun f z => let fix go arg_0__ arg_1__ := match arg_0__, arg_1__ with | z', Nil => z' | z', Tip kx bm => foldl'Bits kx f z' bm | z', Bin _ _ l r => go (go z' l) r end in fun t => match t with | Bin _ m l r => if m GHC.Base.< #0 : bool then go (go z r) l else go (go z l) r | _ => go z t end. Definition elems : IntSet -> list Key := toAscList. Definition toDescList : IntSet -> list Key := foldl (GHC.Base.flip cons) nil. Definition foldrFB {b} : (Key -> b -> b) -> b -> IntSet -> b := foldr. Definition foldlFB {a} : (a -> Key -> a) -> a -> IntSet -> a := foldl. (* Skipping definition `Data.IntSet.InternalWord.fromAscList' *) (* Skipping definition `Data.IntSet.InternalWord.fromDistinctAscList' *) (* Skipping definition `Data.IntSet.InternalWord.showTree' *) (* Skipping definition `Data.IntSet.InternalWord.showTreeWith' *) (* Skipping definition `Data.IntSet.InternalWord.showsTree' *) (* Skipping definition `Data.IntSet.InternalWord.showsTreeHang' *) (* Skipping definition `Data.IntSet.InternalWord.showBin' *) (* Skipping definition `Data.IntSet.InternalWord.showWide' *) (* Skipping definition `Data.IntSet.InternalWord.showsBars' *) (* Skipping definition `Data.IntSet.InternalWord.showsBitMap' *) (* Skipping definition `Data.IntSet.InternalWord.showBitMap' *) (* Skipping definition `Data.IntSet.InternalWord.node' *) (* Skipping definition `Data.IntSet.InternalWord.withBar' *) (* Skipping definition `Data.IntSet.InternalWord.withEmpty' *) Definition splitRoot : IntSet -> list IntSet := fun arg_0__ => match arg_0__ with | Nil => nil | (Tip _ _ as x) => cons x nil | Bin _ m l r => if m GHC.Base.< #0 : bool then cons r (cons l nil) else cons l (cons r nil) end. Module Notations. Notation "'_Data.IntSet.InternalWord.\\_'" := (op_zrzr__). Infix "Data.IntSet.InternalWord.\\" := (_\\_) (at level 99). End Notations. (* External variables: Bool.Sumbool.sumbool_of_bool Eq Gt Lt None Some Type andb bool comparison cons false id list negb nil op_zp__ op_zt__ option orb pair size_nat true Coq.Init.Peano.lt Data.Bits.complement Data.Bits.op_zizazi__ Data.Bits.op_zizbzi__ Data.Bits.xor Data.Foldable.Foldable Data.Foldable.foldl' Data.Maybe.maybe Data.Tuple.snd GHC.Base.Eq_ GHC.Base.Monoid GHC.Base.Ord GHC.Base.Semigroup GHC.Base.compare GHC.Base.compare__ GHC.Base.flip GHC.Base.map GHC.Base.mappend__ GHC.Base.max__ GHC.Base.mconcat__ GHC.Base.mempty__ GHC.Base.min__ GHC.Base.op_z2218U__ GHC.Base.op_zeze__ GHC.Base.op_zeze____ GHC.Base.op_zg__ GHC.Base.op_zg____ GHC.Base.op_zgze__ GHC.Base.op_zgze____ GHC.Base.op_zl__ GHC.Base.op_zl____ GHC.Base.op_zlze____ GHC.Base.op_zlzlzgzg__ GHC.Base.op_zlzlzgzg____ GHC.Base.op_zsze__ GHC.Base.op_zsze____ GHC.Err.error GHC.Num.fromInteger GHC.Num.negate GHC.Num.op_zm__ GHC.Num.op_zp__ HsToCoq.Err.Build_Default HsToCoq.Err.Default HsToCoq.Wf.wfFix2 IntWord.Int IntWord.Word IntWord.bitcount IntWord.highestBitMask IntWord.indexOfTheOnlyBit IntWord.intFromWord IntWord.shiftLWord IntWord.shiftRWord IntWord.wordFromInt IntWord.wordTonat *)
{ "alphanum_fraction": null, "author": "plclub", "avg_line_length": null, "converted": null, "ext": null, "file": null, "hexsha": null, "include": null, "lang": null, "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": "github-repos/coq/plclub-hs-to-coq/hs-to-coq-e6401f6f054a2c1ff5e63a17ab8af2bcd5861c9c/examples/containers/lib/Data/IntSet/InternalWord.v", "reason": null, "repo": "hs-to-coq", "save_path": "github-repos/coq/plclub-hs-to-coq", "sha": "e6401f6f054a2c1ff5e63a17ab8af2bcd5861c9c", "size": null }
[STATEMENT] lemma monad_alt_optionT' [locale_witness]: "monad_alt return (bind :: ('a option, 'm) bind) alt \<Longrightarrow> monad_alt return (bind :: ('a, ('a, 'm) optionT) bind) alt" [PROOF STATE] proof (prove) goal (1 subgoal): 1. monad_alt Monad_Overloading.return Monad_Overloading.bind alt \<Longrightarrow> monad_alt Monad_Overloading.return Monad_Overloading.bind alt [PROOF STEP] unfolding return_optionT_def bind_optionT_def alt_optionT_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. monad_alt Monad_Overloading.return Monad_Overloading.bind alt \<Longrightarrow> monad_alt (return_option Monad_Overloading.return) (bind_option Monad_Overloading.return Monad_Overloading.bind) (alt_option alt) [PROOF STEP] by(rule monad_alt_optionT)
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Monomorphic_Monad_Monad_Overloading", "hexsha": null, "include": null, "lang": null, "length": 2, "llama_tokens": 273, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import gym import numpy as np import dso.task.control # Registers custom and third-party environments from dso.program import Program, from_str_tokens from dso.library import Library from dso.functions import create_tokens import dso.task.control.utils as U REWARD_SEED_SHIFT = int(1e6) # Reserve the first million seeds for evaluation # Pre-computed values for reward scale REWARD_SCALE = { "CustomCartPoleContinuous-v0" : [0.0,1000.0], "MountainCarContinuous-v0" : [0.0,93.95], "Pendulum-v0" : [-1300.0,-147.56], "InvertedDoublePendulumBulletEnv-v0" : [0.0,9357.77], "InvertedPendulumSwingupBulletEnv-v0" : [0.0,891.34], "LunarLanderContinuous-v2" : [0.0,272.65], "HopperBulletEnv-v0" : [0.0,2741.86], "ReacherBulletEnv-v0" : [-5.0, 19.05], "BipedalWalker-v2" : [-60.0, 312.0] } def make_control_task(function_set, env, action_spec, algorithm=None, anchor=None, n_episodes_train=5, n_episodes_test=1000, success_score=None, protected=False, env_kwargs=None, fix_seeds=False, episode_seed_shift=0, reward_scale=True): """ Factory function for episodic reward function of a reinforcement learning environment with continuous actions. This includes closures for the environment, an anchor model, and fixed symbolic actions. Parameters ---------- function_set : list List of allowable functions. env : str Name of Gym environment, e.g. "Pendulum-v0" or "my_module:MyEnv-v0". action_spec : list List of action specifications: None, "anchor", or a list of tokens. algorithm : str or None Name of algorithm corresponding to anchor path, or None to use default anchor for given environment. anchor : str or None Path to anchor model, or None to use default anchor for given environment. n_episodes_train : int Number of episodes to run during training. n_episodes_test : int Number of episodes to run during testing. protected : bool Whether or not to use protected operators. env_kwargs : dict Dictionary of environment kwargs passed to gym.make(). fix_seeds : bool If True, environment uses the first n_episodes_train seeds for reward and the next n_episodes_test seeds for evaluation. This makes the task deterministic. episode_seed_shift : int Training episode seeds start at episode_seed_shift * 100 + REWARD_SEED_SHIFT. This has no effect if fix_seeds == False. reward_scale : list or bool If list: list of [r_min, r_max] used to scale rewards. If True, use default values in REWARD_SCALE. If False, don't scale rewards. Returns ------- See dso.task.task.make_task(). """ env_name = env if env_kwargs is None: env_kwargs = {} # Create the environment env = gym.make(env_name, **env_kwargs) # Determine reward scaling if isinstance(reward_scale, list): assert len(reward_scale) == 2, "Reward scale should be length 2: min, max." r_min, r_max = reward_scale elif reward_scale: if env_name in REWARD_SCALE: r_min, r_max = REWARD_SCALE[env_name] else: raise RuntimeError("{} has no default values for reward_scale. Use reward_scale=False or specify reward_scale=[r_min, r_max].".format(env_name)) else: r_min = r_max = None # HACK: Wrap pybullet envs in TimeFeatureWrapper # TBD: Load the Zoo hyperparameters, including wrapper features, not just the model. # Note Zoo is not implemented as a package, which might make this tedious if "Bullet" in env_name: env = U.TimeFeatureWrapper(env) # Set the library (need to do this now in case there are symbolic actions) stochastic = not fix_seeds n_input_var = env.observation_space.shape[0] tokens = create_tokens(n_input_var, function_set, protected) library = Library(tokens) Program.library = library # Configuration assertions assert len(env.observation_space.shape) == 1, "Only support vector observation spaces." assert isinstance(env.action_space, gym.spaces.Box), "Only supports continuous action spaces." n_actions = env.action_space.shape[0] assert n_actions == len(action_spec), "Received specifications for {} action dimensions; expected {}.".format(len(action_spec), n_actions) assert len([v for v in action_spec if v is None]) <= 1, "No more than 1 action_spec element can be None." assert int(algorithm is None) + int(anchor is None) in [0, 2], "Either none or both of (algorithm, anchor) must be None." # Load the anchor model (if applicable) if "anchor" in action_spec: # Load custom anchor, if provided, otherwise load default if algorithm is not None and anchor is not None: U.load_model(algorithm, anchor) else: U.load_default_model(env_name) model = U.model else: model = None # Generate symbolic policies and determine action dimension symbolic_actions = {} action_dim = None for i, spec in enumerate(action_spec): # Action taken from anchor policy if spec == "anchor": continue # Action dimnension being learned if spec is None: action_dim = i # Pre-specified symbolic policy elif isinstance(spec, list) or isinstance(spec, str): str_tokens = spec p = from_str_tokens(str_tokens, optimize=False, skip_cache=True) symbolic_actions[i] = p else: assert False, "Action specifications must be None, a str/list of tokens, or 'anchor'." def get_action(p, obs): """Helper function to get an action from Program p according to obs, since Program.execute() requires 2D arrays but we only want 1D.""" action = p.execute(np.array([obs]))[0] return action def run_episodes(p, n_episodes, evaluate): """Runs n_episodes episodes and returns each episodic reward.""" # Run the episodes and return the average episodic reward r_episodes = np.zeros(n_episodes, dtype=np.float64) # Episodic rewards for each episode for i in range(n_episodes): # During evaluation, always use the same seeds if evaluate: env.seed(i) elif fix_seeds: env.seed(i + (episode_seed_shift * 100) + REWARD_SEED_SHIFT) obs = env.reset() done = False while not done: # Compute anchor actions if model is not None: action, _ = model.predict(obs) else: action = np.zeros(env.action_space.shape, dtype=np.float32) # Replace fixed symbolic actions for j, fixed_p in symbolic_actions.items(): action[j] = get_action(fixed_p, obs) # Replace symbolic action with current program if action_dim is not None: action[action_dim] = get_action(p, obs) # Replace NaNs and clip infinites action[np.isnan(action)] = 0.0 # Replace NaNs with zero action = np.clip(action, env.action_space.low, env.action_space.high) obs, r, done, _ = env.step(action) r_episodes[i] += r return r_episodes def reward(p): # Run the episodes r_episodes = run_episodes(p, n_episodes_train, evaluate=False) # Return the mean r_avg = np.mean(r_episodes) # Scale rewards to [0, 1] if r_min is not None: r_avg = (r_avg - r_min) / (r_max - r_min) return r_avg def evaluate(p): # Run the episodes r_episodes = run_episodes(p, n_episodes_test, evaluate=True) # Compute eval statistics r_avg_test = np.mean(r_episodes) success_rate = np.mean(r_episodes >= success_score) success = success_rate == 1.0 info = { "r_avg_test" : r_avg_test, "success_rate" : success_rate, "success" : success } return info # Define name for task, based on environment and learned action dimension name = env_name if action_dim is not None: name += "_a{}".format(action_dim) extra_info = { "symbolic_actions" : symbolic_actions } task = dso.task.Task(reward_function=reward, evaluate=evaluate, library=library, stochastic=stochastic, task_type='control', name=name, extra_info=extra_info) return task
{ "alphanum_fraction": 0.6373375883, "author": null, "avg_line_length": 33.6168582375, "converted": null, "ext": "py", "file": null, "hexsha": "2445d9b4e43f0ca35d924e067b2e8c34f1e13842", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 18, "max_forks_repo_forks_event_max_datetime": "2021-06-10T13:27:51.000Z", "max_forks_repo_forks_event_min_datetime": "2020-05-01T11:45:38.000Z", "max_forks_repo_head_hexsha": "9dc2086f5d219fdfab5aaae2485e11b693da4d4a", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ryok/deep-symbolic-optimization", "max_forks_repo_path": "dso/dso/task/control/control.py", "max_issues_count": 9, "max_issues_repo_head_hexsha": "9dc2086f5d219fdfab5aaae2485e11b693da4d4a", "max_issues_repo_issues_event_max_datetime": "2021-06-04T18:16:29.000Z", "max_issues_repo_issues_event_min_datetime": "2020-02-20T04:00:50.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ryok/deep-symbolic-optimization", "max_issues_repo_path": "dso/dso/task/control/control.py", "max_line_length": 156, "max_stars_count": 65, "max_stars_repo_head_hexsha": "9dc2086f5d219fdfab5aaae2485e11b693da4d4a", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ryok/deep-symbolic-optimization", "max_stars_repo_path": "dso/dso/task/control/control.py", "max_stars_repo_stars_event_max_datetime": "2021-05-28T11:30:18.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-27T06:20:04.000Z", "num_tokens": 2028, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 8774 }
# -*- coding: utf-8 -*- import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from collections import Counter import os from argparse import Namespace flags = Namespace( train_file='dane_disco.txt', seq_size=3, batch_size=120, embedding_size=84, lstm_size=384, gradients_norm=1, predict_top_k=1, checkpoint_path='checkpoint', ) # ============================================================================= # Processing Data for initial words in chain, generated through LSTM # ============================================================================= def generate_initial_words(train_file): list_char = ['(', ')', ',', '.', '„', '/', '\', ','1', '2', '3', '4', '5', '?', '!'] with open(train_file, 'r') as f: text = f.read() text = text.split() # Remove duplicate text = sorted(list(set(text))) new_text = [] for word in text: if word.istitle() and word[0] not in list_char and word[-1] not in ['.', '”', ')']: new_text.append(word) del text return np.random.choice(new_text) # ============================================================================= # Read and processing Data # ============================================================================= def get_data_from_file(train_file, batch_size, seq_size): with open(train_file, 'r') as f: text = f.read() text = text.split() word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {k: w for k, w in enumerate(sorted_vocab)} vocab_to_int = {w: k for k, w in int_to_vocab.items()} n_vocab = len(int_to_vocab) print('Vocabulary size', n_vocab) int_text = [vocab_to_int[w] for w in text] num_batches = int(len(int_text) / (seq_size * batch_size)) in_text = int_text[:num_batches * batch_size * seq_size] out_text = np.zeros_like(in_text) out_text[:-1] = in_text[1:] out_text[-1] = in_text[0] in_text = np.reshape(in_text, (batch_size, -1)) out_text = np.reshape(out_text, (batch_size, -1)) return int_to_vocab, vocab_to_int, n_vocab, in_text, out_text def get_batches(in_text, out_text, batch_size, seq_size): num_batches = np.prod(in_text.shape) // (seq_size * batch_size) for i in range(0, num_batches * seq_size, seq_size): yield in_text[:, i:i+seq_size], out_text[:, i:i+seq_size] # ============================================================================= # Model # ============================================================================= class RNNModule(nn.Module): def __init__(self, n_vocab, seq_size, embedding_size, lstm_size): super(RNNModule, self).__init__() self.seq_size = seq_size self.lstm_size = lstm_size self.embedding = nn.Embedding(n_vocab, embedding_size) self.lstm = nn.LSTM(embedding_size, lstm_size, batch_first=True) self.dense = nn.Linear(lstm_size, n_vocab) def forward(self, x, prev_state): embed = self.embedding(x) output, state = self.lstm(embed, prev_state) logits = self.dense(output) return logits, state def zero_state(self, batch_size): return (torch.zeros(1, batch_size, self.lstm_size), torch.zeros(1, batch_size, self.lstm_size)) # ============================================================================= # Loss # ============================================================================= def get_loss_and_train_op(net, lr=0.001): criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=lr) return criterion, optimizer # ============================================================================= # Main # ============================================================================= def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') int_to_vocab, vocab_to_int, n_vocab, in_text, out_text = get_data_from_file( flags.train_file, flags.batch_size, flags.seq_size) net = RNNModule(n_vocab, flags.seq_size, flags.embedding_size, flags.lstm_size) net = net.to(device) criterion, optimizer = get_loss_and_train_op(net, 0.01) iteration = 0 for e in range(50): batches = get_batches(in_text, out_text, flags.batch_size, flags.seq_size) state_h, state_c = net.zero_state(flags.batch_size) # Transfer data to GPU state_h = state_h.to(device) state_c = state_c.to(device) for x, y in batches: iteration += 1 # Tell it we are in training mode net.train() # Reset all gradients optimizer.zero_grad() # Transfer data to GPU x = torch.tensor(x).to(device) y = torch.tensor(y).to(device) logits, (state_h, state_c) = net(x, (state_h, state_c)) loss = criterion(logits.transpose(1, 2), y) state_h = state_h.detach() state_c = state_c.detach() loss_value = loss.item() # Perform back-propagation loss.backward() # Update the network's parameters _ = torch.nn.utils.clip_grad_norm_( net.parameters(), flags.gradients_norm) optimizer.step() if iteration % 100 == 0: print('Epoch: {}/{}'.format(e, 200), 'Iteration: {}'.format(iteration), 'Loss: {}'.format(loss_value)) if iteration % 150 == 0: predict(device, net, generate_initial_words(flags.train_file), n_vocab, vocab_to_int, int_to_vocab, top_k=5) torch.save(net, 'checkpoint_pt/model-{}'.format(iteration)) # ============================================================================= # Predict # ============================================================================= def predict(device, net, word, n_vocab, vocab_to_int, int_to_vocab, top_k): net.eval() state_h, state_c = net.zero_state(1) state_h = state_h.to(device) state_c = state_c.to(device) words = []; words.append(word) for w in words: ix = torch.tensor([[vocab_to_int[word]]]).to(device) output, (state_h, state_c) = net(ix, (state_h, state_c)) _, top_ix = torch.topk(output[0], k=top_k) choices = top_ix.tolist() choice = np.random.choice(choices[0]) words.append(int_to_vocab[choice]) for _ in range(100): ix = torch.tensor([[choice]]).to(device) output, (state_h, state_c) = net(ix, (state_h, state_c)) _, top_ix = torch.topk(output[0], k=top_k) choices = top_ix.tolist() choice = np.random.choice(choices[0]) words.append(int_to_vocab[choice]) # ============================================================================= # Run Program # ============================================================================= if __name__ == '__main__': main() # ============================================================================= # Generate Songs # ============================================================================= def generate_text(device, net, word, len_generate, n_vocab, vocab_to_int, int_to_vocab, top_k): net.eval() state_h, state_c = net.zero_state(1) state_h = state_h.to(device) state_c = state_c.to(device) words = []; words.append(word) ix = torch.tensor([[vocab_to_int[word]]]).to(device) output, (state_h, state_c) = net(ix, (state_h, state_c)) _, top_ix = torch.topk(output[0], k=top_k) choices = top_ix.tolist() choice = np.random.choice(choices[0]) words.append(int_to_vocab[choice]) for _ in range(len_generate -1): ix = torch.tensor([[choice]]).to(device) output, (state_h, state_c) = net(ix, (state_h, state_c)) _, top_ix = torch.topk(output[0], k=top_k) choices = top_ix.tolist() choice = np.random.choice(choices[0]) words.append(int_to_vocab[choice]) return ' '.join(words) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') int_to_vocab, vocab_to_int, n_vocab, in_text, out_text = get_data_from_file( flags.train_file, flags.batch_size, flags.seq_size) net = torch.load('checkpoint_pt/model-4200') len_generate = 125 num_song_text_generate = 4 name_file = 'Generate_Text2.txt' def GenSong(num_song): with open(name_file, 'w') as f: for num in range(num_song_text_generate): sentence = generate_text(device, net, generate_initial_words(flags.train_file), len_generate, n_vocab, vocab_to_int, int_to_vocab, top_k=1) f.write("%s\n\n" % sentence) GenSong(num_song_text_generate)
{ "alphanum_fraction": 0.5347743308, "author": null, "avg_line_length": 37.9957446809, "converted": null, "ext": "py", "file": null, "hexsha": "1006d6b801cbf64e8668487910e65f5b1517e75c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dfa5fd1a79705d19d6cc0d1163a7bccd30c5387b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Apogeum12/LSTMGenerator", "max_forks_repo_path": "main.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "dfa5fd1a79705d19d6cc0d1163a7bccd30c5387b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Apogeum12/LSTMGenerator", "max_issues_repo_path": "main.py", "max_line_length": 151, "max_stars_count": null, "max_stars_repo_head_hexsha": "dfa5fd1a79705d19d6cc0d1163a7bccd30c5387b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Apogeum12/LSTMGenerator", "max_stars_repo_path": "main.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1952, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 8929 }
/********************************************************************** * Copyright (c) 2008-2014, Alliance for Sustainable Energy. * All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********************************************************************/ #ifndef UTILITIES_IDD_IDDFILE_IMPL_HPP #define UTILITIES_IDD_IDDFILE_IMPL_HPP #include <utilities/UtilitiesAPI.hpp> #include <utilities/idd/IddObject.hpp> #include <utilities/core/Logger.hpp> #include <string> #include <ostream> #include <vector> #include <boost/algorithm/string.hpp> namespace openstudio{ namespace detail{ /// Implementation of IddFile class UTILITIES_API IddFile_Impl { public: /** @name Constructors */ //@{ /// default constructor IddFile_Impl(); //@} /** @name Getters */ //@{ /// get version std::string version() const; /// get header std::string header() const; /// get all objects std::vector<IddObject> objects() const; /// get all groups in file (e.g. "" and "Simulation Parameters") std::vector<std::string> groups() const; /// get all objects in group (e.g. "Simulation Parameters") std::vector<IddObject> getObjectsInGroup(const std::string& group) const; /** Get the IddObjects that match objectRegex. */ std::vector<IddObject> getObjects(const boost::regex& objectRegex) const; /** If possible, returns the version IddObject for this IddFile. */ boost::optional<IddObject> versionObject() const; /// get object by name boost::optional<IddObject> getObject(const std::string& objectName) const; /// get object by type boost::optional<IddObject> getObject(IddObjectType objectType) const; /// required objects, occur at least once std::vector<IddObject> requiredObjects() const; /// unique objects, occur at most one time std::vector<IddObject> uniqueObjects() const; //@} /** @name Setters */ //@{ /// set version void setVersion(const std::string& version); /// set header void setHeader(const std::string& header); /// add an object void addObject(const IddObject& object); //@} /** @name Serialization */ //@{ /// parse text from input stream to construct an IddFile_Impl static boost::shared_ptr<IddFile_Impl> load(std::istream& is); /// print std::ostream& print(std::ostream& os) const; //@} private: /// Parse file text to populate this IddFile. void parse(std::istream& is); /// Version string required to be at top of any IddFile. std::string m_version; /// The first comment block in an IddFile is its header. std::string m_header; /// The vector of IddObjects that constitute this IddFile. std::vector<IddObject> m_objects; /// Cache the Version IddObject mutable boost::optional<IddObject> m_versionObject; /// Configure logging. REGISTER_LOGGER("utilities.idd.IddFile"); }; } // detail } // openstudio #endif // UTILITIES_IDD_IDDFILE_IMPL_HPP
{ "alphanum_fraction": 0.6366915743, "author": null, "avg_line_length": 29.1804511278, "converted": null, "ext": "hpp", "file": null, "hexsha": "4f97eebc40515552ee42816c3b0e9faf7b584e0d", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_forks_repo_licenses": [ "blessing" ], "max_forks_repo_name": "ORNL-BTRIC/OpenStudio", "max_forks_repo_path": "openstudiocore/src/utilities/idd/IddFile_Impl.hpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "blessing" ], "max_issues_repo_name": "ORNL-BTRIC/OpenStudio", "max_issues_repo_path": "openstudiocore/src/utilities/idd/IddFile_Impl.hpp", "max_line_length": 82, "max_stars_count": null, "max_stars_repo_head_hexsha": "878f94bebf6f025445d1373e8b2304ececac16d8", "max_stars_repo_licenses": [ "blessing" ], "max_stars_repo_name": "ORNL-BTRIC/OpenStudio", "max_stars_repo_path": "openstudiocore/src/utilities/idd/IddFile_Impl.hpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 893, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 3881 }
''' All the functions linked to the prediction behaviour of the robot ''' import algebra as alg from math import sqrt,cos,sin,acos,pi,atan2 import numpy as np deltaT = 0.4 #s def predictionNextPosition(linearSpeed,position): ''' Return the predicted next position of the robot Considering its current position and speed ''' distance = np.dot(linearSpeed,deltaT) return np.add(position,distance) def neareastPoint(begin,end,point): ''' Return the neareast point of the robot on a certain subPath (segment) using the orthogonal projection ''' from pathFollowingAlgorithm import radius v = np.subtract(end,begin) normV = np.linalg.norm(v) #Compute the nearest point coordinate thanks to orthogonal projection distance = ((point[0]-begin[0])*v[0] + (point[1]-begin[1])*v[1])/normV xn = begin[0] + (distance/normV)*v[0] yn = begin[1] + (distance/normV)*v[1] distanceToPath = np.linalg.norm(np.subtract(point,[xn,yn])) #if the robot is too far from the path if abs(distanceToPath) > radius : return[xn,yn] # return its nearest point coordinate else : return point #return its coordinate def smallestDistance(begin,end,point): ''' Return the smallest distance between the robot and the subpath as well as the coordinate of the nearest point ''' v = alg.euclideanVector(begin,end) normV = np.linalg.norm(v) #Compute the nearest point coordinate thanks to orthogonal projection distance = ((point[0]-begin[0])*v[0] + (point[1]-begin[1])*v[1])/normV xn = begin[0] + (distance/normV)*v[0] yn = begin[1] + (distance/normV)*v[1] n = [xn,yn] dx = xn - point[0] dy = yn - point[1] return [sqrt(dx**2+dy**2),n] '''Compute the position of the target using the neareast point on a subtrajectory and its euclidean vector''' def positionTarget(neareastPoint,euclideanvector): from pathFollowingAlgorithm import deltaD euclideanvector = alg.unitVector(euclideanvector) vDistance = np.dot(euclideanvector,deltaD) return np.add(neareastPoint,vDistance) '''Return the vector between the robot and its target''' def orientationTarget(pRobot,pTarget): return alg.unitVector(alg.euclideanVector(pRobot,pTarget)) '''Return the closest subPath from the robot''' def closestPath(lastSubPath,pathPositions,point): [number,closest] = [-1,1000] #path number, distance to robot #Every path after the one currently considered and except the last one if lastSubPath < len(pathPositions)-2: for i in range(lastSubPath,(len(pathPositions)-1)): [d,n] = smallestDistance(pathPositions[i],pathPositions[i+1],point) t = positionTarget(n,alg.euclideanVector(pathPositions[i],pathPositions[i+1])) #The robot should follow a close path as much as possible begin = pathPositions[lastSubPath] end = pathPositions[lastSubPath+1] if i == lastSubPath and alg.is_close_to_path(n,end,begin): if d < closest+1 : closest = d number = i '''else : #If the closest paths are not ideal, let's consider the other paths if d < closest+1 and alg.is_close_to_path(n,pathPositions[i],pathPositions[i+1]): closest = d number = i''' if number != -1 and closest != 1000: #if the robot found a patht to follw [d,n] = smallestDistance(pathPositions[number],pathPositions[number+1],point) return number else : # we move to the next path return lastSubPath+1 else : #we stay on the last path return lastSubPath if __name__ == "__main__": begin = [0,0] end = [10,10] p = [5,5] linearSpeed = 50 p = predictionNextPosition(linearSpeed,p) n = neareastPoint(begin,end,p) d = smallestDistance(begin,end,p) t = positionTarget(n,np.subtract(end,begin)) o = orientationTarget(p,t)
{ "alphanum_fraction": 0.6502826247, "author": null, "avg_line_length": 38.0280373832, "converted": null, "ext": "py", "file": null, "hexsha": "7005e066dab9d5308d5a6191bcc6107d3eca0b66", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7687023d2bb3ea486f3c54d24d8c575cbae2b250", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kboubakri/Cozmo_FollowPath", "max_forks_repo_path": "prediction.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "7687023d2bb3ea486f3c54d24d8c575cbae2b250", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kboubakri/Cozmo_FollowPath", "max_issues_repo_path": "prediction.py", "max_line_length": 98, "max_stars_count": 1, "max_stars_repo_head_hexsha": "7687023d2bb3ea486f3c54d24d8c575cbae2b250", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kboubakri/Cozmo_FollowPath", "max_stars_repo_path": "prediction.py", "max_stars_repo_stars_event_max_datetime": "2019-08-25T21:55:18.000Z", "max_stars_repo_stars_event_min_datetime": "2019-08-25T21:55:18.000Z", "num_tokens": 1039, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4069 }
import tensorflow as tf from tensorflow.contrib.layers import xavier_initializer from tensorflow.examples.tutorials.mnist import input_data import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import os import imageio initializer = xavier_initializer() # 为生成器生成随机噪声 Z = tf.placeholder(tf.float32, shape=[None, 100], name='Z') Z_dim = 100 # 生成器参数设置 G_W1 = tf.Variable(initializer([100, 128]), name='G_W1') G_b1 = tf.Variable(tf.zeros(shape=[128]), name='G_b1') G_W2 = tf.Variable(initializer([128, 784]), name='G_W2') G_b2 = tf.Variable(tf.zeros(shape=[784]), name='G_b2') theta_G = [G_W1, G_W2, G_b1, G_b2] # 生成器网络 def generator(z): G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1) G_log_prob = tf.matmul(G_h1, G_W2) + G_b2 G_prob = tf.nn.sigmoid(G_log_prob) return G_prob # 为判别器准备的MNIST图像输入设置 X = tf.placeholder(tf.float32, shape=[None, 784], name='X') # 判别器参数设置 D_W1 = tf.Variable(initializer(shape=[784, 128]), name='D_W1') D_b1 = tf.Variable(tf.zeros(shape=[128]), name='D_b1') D_W2 = tf.Variable(initializer(shape=[128, 1]), name='D_W2') D_b2 = tf.Variable(tf.zeros(shape=[1]), name="D_W2") theta_D = [D_W1, D_W2, D_b1, D_b2] # 判别器网络 def discriminator(x): D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1) D_logit = tf.matmul(D_h1, D_W2) + D_b2 D_prob = tf.nn.sigmoid(D_logit) return D_prob, D_logit G_sample = generator(Z) D_real, D_logit_real = discriminator(X) D_fake, D_logit_fake = discriminator(G_sample) # GAN原始论文中的损失函数 D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake)) G_loss = -tf.reduce_mean(tf.log(D_fake)) # 仅更新D(X)的参数, var_list=theta_D D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D) # 仅更新G(X)的参数, var_list=theta_G G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G) def sample_Z(m, n): return np.random.uniform(-1., 1., size=[m, n]) def plot(samples): fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') return fig mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) mb_size = 100 if not os.path.exists('output/'): os.makedirs('output/') i = 0 for it in range(60000): if it % 10000 == 0: samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)}) fig = plot(samples) plt.savefig('output/{}.png'.format(str(i).zfill(3)), bbox_inches='tight') i += 1 plt.close(fig) X_mb, _ = mnist.train.next_batch(mb_size) _, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)}) _, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)}) if it % 1000 == 0: print('Iter: {}'.format(it)) print('D loss: {:.4}'.format(D_loss_curr)) print('G_loss: {:.4}'.format(G_loss_curr)) print() images = [] for file_name in os.listdir('output'): if file_name.endswith('.png'): file_path = os.path.join('output', file_name) images.append(imageio.imread(file_path)) imageio.mimsave(os.path.join("output", 'samples.gif'), images, fps=1)
{ "alphanum_fraction": 0.6897679952, "author": null, "avg_line_length": 26.896, "converted": null, "ext": "py", "file": null, "hexsha": "b75bb37f1676249ed027d88d0665886761ca6d54", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f0099d55f1a19328b09ab2b9b5968b804c2d5f07", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "BaranovArtyom/aiexamples", "max_forks_repo_path": "tensorflow/gan/gan_mnist.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "f0099d55f1a19328b09ab2b9b5968b804c2d5f07", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "BaranovArtyom/aiexamples", "max_issues_repo_path": "tensorflow/gan/gan_mnist.py", "max_line_length": 97, "max_stars_count": null, "max_stars_repo_head_hexsha": "f0099d55f1a19328b09ab2b9b5968b804c2d5f07", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "BaranovArtyom/aiexamples", "max_stars_repo_path": "tensorflow/gan/gan_mnist.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1094, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3362 }
import unittest import numpy as np from sklearn.metrics import pairwise_kernels from skactiveml.pool._quire import ( _del_i_inv, _L_aa_inv, _one_versus_rest_transform, Quire, ) from skactiveml.utils import MISSING_LABEL, is_labeled, is_unlabeled class TestQuire(unittest.TestCase): def setUp(self): self.random_state = 1 self.candidates = np.array([1, 3]) self.X_cand = np.array([[8, 1], [9, 1], [5, 1]]) self.X = np.array([[1, 2], [5, 8], [8, 4], [5, 4]]) self.y_true = np.array([0, 0, 1, 1]) self.y = np.array([0, MISSING_LABEL, 1, MISSING_LABEL]) self.classes = np.array([0, 1]) self.kwargs = dict( candidates=self.candidates, X=self.X, y=self.y, ) def test_init_param_classes(self): qs = Quire(self.classes) self.assertTrue(hasattr(qs, "classes")) def test_init_param_lmbda(self): for lmbda in [-1, 0, "string"]: qs = Quire(self.classes, lmbda=lmbda) self.assertRaises((ValueError, TypeError), qs.query, **self.kwargs) def test_init_param_metric_dict(self): for metric_dict in ["String", 42, {"string": None}]: qs = Quire(self.classes, metric_dict=metric_dict) self.assertRaises(TypeError, qs.query, **self.kwargs) def test_init_param_metric(self): qs = Quire(self.classes, metric="Test") self.assertRaises(ValueError, qs.query, **self.kwargs) qs = Quire(self.classes, metric=42) self.assertRaises(ValueError, qs.query, **self.kwargs) qs = Quire(self.classes, metric="precomputed") K = np.zeros((len(self.y), len(self.y) - 1)) self.assertRaises(ValueError, qs.query, y=self.y, X=K) def test_query(self): # Test metric="precomputed" qs = Quire(self.classes, metric="precomputed") K = pairwise_kernels(self.X, self.X, metric="rbf") _, utils = qs.query(K, self.y, return_utilities=True) qs = Quire(self.classes, metric="rbf") _, expected_utils = qs.query(**self.kwargs, return_utilities=True) np.testing.assert_array_equal(expected_utils, utils) # Test with zero labels. qs.query(X=self.X, y=np.full(shape=len(self.X), fill_value=np.nan)) # Test Scenario. qs = Quire(self.classes, metric="precomputed") K = np.zeros_like(K) _, utils = qs.query(K, self.y, return_utilities=True) is_lbld = is_labeled(self.y) y_labeled = self.y[is_lbld].reshape(-1, 1) * 2 - 1 expected_utils = np.full_like(utils, -1 - y_labeled.T.dot(y_labeled)) np.testing.assert_array_equal( expected_utils[:, ~is_lbld], utils[:, ~is_lbld] ) qs = Quire(self.classes) _, utils = qs.query(**self.kwargs, return_utilities=True) def test__del_i_inv(self): A = np.random.random((3, 3)) A = A + A.T A_inv = np.linalg.inv(A) for i in range(len(A)): B = np.delete(np.delete(A, i, axis=0), i, axis=1) B_inv = np.linalg.inv(B) np.testing.assert_allclose(B_inv, _del_i_inv(A_inv, i)) def test__L_aa_inv(self): lmbda = 1 X = np.append(self.X, self.X_cand, axis=0) y = np.append(self.y_true, np.full(len(self.X_cand), MISSING_LABEL)) is_lbld = is_labeled(y=y, missing_label=MISSING_LABEL) is_unlbld = is_unlabeled(y=y, missing_label=MISSING_LABEL) K = pairwise_kernels(X, X, metric="rbf") # compute L and L_aa L = np.linalg.inv(K + lmbda * np.eye(len(X))) L_aa = L[is_unlbld][:, is_unlbld] L_aa_inv = np.linalg.inv(L_aa) np.testing.assert_allclose( L_aa_inv, _L_aa_inv(K, lmbda, is_unlbld, is_lbld) ) def test__one_versus_rest_transform(self): y = np.array([0, 1, 2, 1, 2, 0]) y_ovr = np.array( [[1, 0, 0, 0, 0, 1], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 1, 0]] ).T classes = np.unique(y) np.testing.assert_array_equal( y_ovr, _one_versus_rest_transform(y, classes, l_rest=0) )
{ "alphanum_fraction": 0.5952380952, "author": null, "avg_line_length": 37.125, "converted": null, "ext": "py", "file": null, "hexsha": "fd7d331b7aab98290968dc1a676e4a5dadb50cb3", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "04d7107272ef0438070808475599131d8726f547", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "LukasLuehrs/scikit-activeml", "max_forks_repo_path": "skactiveml/pool/tests/test_quire.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "04d7107272ef0438070808475599131d8726f547", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "LukasLuehrs/scikit-activeml", "max_issues_repo_path": "skactiveml/pool/tests/test_quire.py", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "04d7107272ef0438070808475599131d8726f547", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "LukasLuehrs/scikit-activeml", "max_stars_repo_path": "skactiveml/pool/tests/test_quire.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1168, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 4158 }
import numpy as np import spikeextractors as se class OutputRecordingExtractor(se.RecordingExtractor): def __init__(self, *, base_recording, block_size): super().__init__() self._base_recording = base_recording self._block_size = block_size self.copy_channel_properties(recording=self._base_recording) self._blocks = [] def add_block(self, traces: np.ndarray): """Add a block of output traces Parameters ---------- traces : np.ndarray The block of output traces """ if traces.shape[1] == self._block_size: self._blocks.append(traces) else: if traces.shape[1] >= self._block_size * 2: raise Exception( 'Unexpected error adding block to OutputRecordingExtractor.') # pragma: no cover if len(self._blocks) * self._block_size + traces.shape[1] != self.get_num_frames(): raise Exception(f'Problem adding final block. Unexpected size: {traces.shape[1]}. Block size is {self._block_size}. Number of frames is {self.get_num_frames()}.') # pragma: no cover if traces.shape[1] > self._block_size: self._blocks.append(traces[:, :self._block_size]) self._blocks.append(traces[:, self._block_size:]) else: self._blocks.append(traces) def get_channel_ids(self): """Return the channel ids """ return self._base_recording.get_channel_ids() def get_num_frames(self): """Return number of frames in the recording """ return self._base_recording.get_num_frames() def get_sampling_frequency(self): """Return sampling frequency """ return self._base_recording.get_sampling_frequency() def get_traces(self, channel_ids=None, start_frame=None, end_frame=None, return_scaled=False): if start_frame is None: start_frame = 0 if end_frame is None: end_frame = self.get_num_frames() if channel_ids is None: channel_ids = self.get_channel_ids() channel_indices = [] aa = self.get_channel_ids() for ch in channel_ids: channel_indices.append(np.nonzero(np.array(aa) == ch)[0][0]) ib1 = int(start_frame / self._block_size) ib2 = int((end_frame-1) / self._block_size) assert ib2 < len(self._blocks), f'Block {ib2} not found in OutputRecordingExtractor (num blocks is {len(self._blocks)})' trace_blocks = [] if ib1 == ib2: trace_blocks.append( self._blocks[ib1][channel_indices][:, start_frame - ib1 * self._block_size:end_frame - ib1 * self._block_size] ) else: trace_blocks.append( self._blocks[ib1][channel_indices][:, start_frame - ib1 * self._block_size:] ) for ii in range(ib1 + 1, ib2): trace_blocks.append( self._blocks[ii][channel_indices, :] ) trace_blocks.append( self._blocks[ib2][channel_indices][:, :end_frame - ib2 * self._block_size] ) return np.concatenate(trace_blocks, axis=1) @staticmethod def write_recording(recording, save_path): raise Exception('write_recording not implemented for this recording extractor')
{ "alphanum_fraction": 0.5899688297, "author": null, "avg_line_length": 38.7802197802, "converted": null, "ext": "py", "file": null, "hexsha": "6ca809dee9e660c2feb96510a260bd5b6dbd3d29", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e4109c8f123174d9cefe25065ad78e49a4ddb894", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "magland/ephys_nlm", "max_forks_repo_path": "ephys_nlm/_outputrecordingextractor.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "e4109c8f123174d9cefe25065ad78e49a4ddb894", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "magland/ephys_nlm", "max_issues_repo_path": "ephys_nlm/_outputrecordingextractor.py", "max_line_length": 197, "max_stars_count": 4, "max_stars_repo_head_hexsha": "e4109c8f123174d9cefe25065ad78e49a4ddb894", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "magland/ephys_nlm", "max_stars_repo_path": "ephys_nlm/_outputrecordingextractor.py", "max_stars_repo_stars_event_max_datetime": "2020-11-18T22:20:55.000Z", "max_stars_repo_stars_event_min_datetime": "2020-02-11T00:43:10.000Z", "num_tokens": 748, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 3529 }
[STATEMENT] lemma has_white_path_to_refl[iff]: "(x has_white_path_to x) s" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (x has_white_path_to x) s [PROOF STEP] unfolding has_white_path_to_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>x y. (x points_to y) s \<and> white y s)\<^sup>*\<^sup>* x x [PROOF STEP] by simp
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "ConcurrentGC_Global_Invariants_Lemmas", "hexsha": null, "include": null, "lang": null, "length": 2, "llama_tokens": 158, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import torch import numpy as np from sklearn.mixture import GaussianMixture from torch import nn import os from torch.utils.data import DataLoader from sklearn.cluster import KMeans from torch.autograd import Variable from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score from python_research.io import save_to_csv from time import time class DCEC(nn.Module): def __init__(self, input_dims: np.ndarray, n_clusters: int, kernel_shape: np.ndarray, last_out_channels: int = 32, latent_vector_size: int = 25, update_interval: int = 140, device: str='cpu', artifacts_path: str='DCEC'): super(DCEC, self).__init__() self.latent_vector_size = latent_vector_size self.n_clusters = n_clusters self.last_out_channels = last_out_channels encoder_shape, out_features = self._calculate_shapes(input_dims, kernel_shape, 2, self.last_out_channels) self.final_encoder_shape = tuple(np.hstack([self.last_out_channels, encoder_shape])) self.encoder = nn.Sequential( nn.Conv3d(in_channels=1, out_channels=32, kernel_size=kernel_shape), nn.ReLU(), nn.Dropout(), nn.Conv3d(in_channels=32, out_channels=self.last_out_channels, kernel_size=kernel_shape), nn.ReLU(), Flatten(), nn.Linear(in_features=out_features, out_features=self.latent_vector_size) ) self.linear = nn.Sequential( nn.Linear(in_features=self.latent_vector_size, out_features=out_features), nn.ReLU() ) self.decoder = nn.Sequential( nn.ConvTranspose3d(in_channels=self.last_out_channels, out_channels=32, kernel_size=kernel_shape), nn.ReLU(), nn.Dropout(), nn.ConvTranspose3d(in_channels=32, out_channels=1, kernel_size=kernel_shape) ) self.clustering_layer = ClusteringLayer(n_clusters=self.n_clusters, input_dim=self.latent_vector_size) self.log_softmax = nn.LogSoftmax() self.update_interval = update_interval self.n_clusters = n_clusters self.artifacts_path = artifacts_path self.mse_loss = nn.MSELoss() self.kld_loss = nn.KLDivLoss(reduction='batchmean') self.device = device self.metrics = {'MSE': [], 'KLD': [], 'NMI': [0], 'ARS': [0]} self._best_autoencoder_loss = np.inf self._best_nmi = -np.inf def _calculate_shapes(self, input_dims: np.ndarray, kernel_shapes: np.ndarray, kernels_count: int, channels_count: int): final_encoder_shape = input_dims - ((kernels_count * kernel_shapes) - kernels_count) out_features = np.prod(final_encoder_shape) * channels_count return final_encoder_shape, out_features def predict_clusters(self, data_loader): predicted_clusters = torch.zeros((len(data_loader) * data_loader.batch_size, self.n_clusters), device=self.device) last_insert = 0 with torch.no_grad(): for batch_x in data_loader: batch_x = Variable(batch_x, requires_grad=False).float() predicted_clusters[last_insert: last_insert + data_loader.batch_size, :] = self.clustering_layer(self.encoder(batch_x)) last_insert += data_loader.batch_size return predicted_clusters @staticmethod def calculate_target_distribution(q): weight = torch.pow(q, 2) / torch.sum(q, dim=0) return torch.t(torch.t(weight) / torch.sum(weight, dim=1)) def get_target_distribution(self, data_loader: DataLoader): return self.calculate_target_distribution( self.predict_clusters(data_loader)) def encode_features(self, data_loader: DataLoader): encoded_features = torch.zeros((len(data_loader) * data_loader.batch_size, self.latent_vector_size), device=self.device) last_insert = 0 with torch.no_grad(): for batch_x in data_loader: batch_x = Variable(batch_x, requires_grad=False).float() encoded_features[last_insert: last_insert + data_loader.batch_size, :] = self.encoder(batch_x) last_insert += data_loader.batch_size return encoded_features def initialize(self, data_loader): encoded_features = self.encode_features(data_loader) kmeans = KMeans(n_clusters=self.n_clusters).fit(encoded_features.cpu().detach()) cluster_centers = kmeans.cluster_centers_.astype(np.float32) cluster_centers = torch.from_numpy(cluster_centers).to(device=self.device) self.clustering_layer.set_weights(cluster_centers) def train_with_clustering(self, data_loader, optimizer, iterations: int, gamma: float): self.initialize(data_loader) last_batch = 0 true_labels = (data_loader.data.labels .cpu() .detach() .numpy() .transpose() .reshape(-1)) for iteration in range(iterations): if iteration % self.update_interval == 0: last_batch = 0 data_loader.sort() predicted_labels = self.cluster_with_model(data_loader) self.plot_high_res(predicted_labels, data_loader.cube_2d_shape(), iteration, 'model') self.metrics['NMI'].append(self.calculate_nmi(true_labels, predicted_labels)) self.metrics['ARS'].append(self.calculate_ars(true_labels, predicted_labels)) self._log_metrics_to_file() self._print_losses(iteration) self._save_model(iteration) data_loader.shuffle() target_distribution = self.get_target_distribution(data_loader) iter(data_loader) optimizer.zero_grad() try: batch_x = next(data_loader) batch_x = Variable(batch_x).float() except StopIteration: iter(data_loader) last_batch = 0 continue encoder_output = self.encoder(batch_x) clustering_layer_output = self.log_softmax(self.clustering_layer(encoder_output)) div_loss = self.kld_loss(clustering_layer_output, target_distribution[last_batch: last_batch + data_loader.batch_size]) * gamma linear_output = self.linear(encoder_output) linear_output = torch.reshape(linear_output, ((data_loader.batch_size, ) + self.final_encoder_shape)) decoder_output = self.decoder(linear_output) mse_loss = self.mse_loss(batch_x, decoder_output) self.metrics['MSE'].append(mse_loss.item()) self.metrics['KLD'].append(div_loss.item()) div_loss.backward(retain_graph=True) mse_loss.backward() optimizer.step() last_batch += data_loader.batch_size def train_autoencoder(self, data_loader, optimizer, epochs, epsilon): true_labels = (data_loader.data.labels .cpu() .detach() .numpy() .transpose() .reshape(-1)) last_mse = 0 for epoch in range(epochs): data_loader.shuffle() for batch_x in data_loader: batch_x = Variable(batch_x).float() optimizer.zero_grad() encoded = self.encoder(batch_x) linear_output = self.linear(encoded) reshaped = torch.reshape(linear_output, ((data_loader.batch_size, ) + self.final_encoder_shape)) decoder_output = self.decoder(reshaped) mse_loss = self.mse_loss(batch_x, decoder_output) self.metrics['MSE'].append(mse_loss.item()) mse_loss.backward() optimizer.step() data_loader.sort() self.save_if_best(np.average(self.metrics['MSE'])) # predicted_labels = self.cluster_with_kmeans(data_loader) predicted_labels = self.cluster_with_gaussian(data_loader) self.plot_high_res(predicted_labels, data_loader.cube_2d_shape(), epoch, 'kmeans') self.metrics['NMI'].append(self.calculate_nmi(true_labels, predicted_labels)) self.metrics['ARS'].append(self.calculate_ars(true_labels, predicted_labels)) self._log_metrics_to_file() if epoch > 1: if np.abs(last_mse - np.average(self.metrics['MSE'])) < epsilon: break last_mse = np.average(self.metrics['MSE']) self._print_losses(epoch) @staticmethod def calculate_nmi(labels_true, labels_predicted): to_delete = np.where(labels_true == 0)[0] labels_predicted = np.delete(labels_predicted, to_delete) labels_true = np.delete(labels_true, to_delete).astype(np.int32) return normalized_mutual_info_score(labels_true, labels_predicted) @staticmethod def calculate_ars(labels_true, labels_predicted): to_delete = np.where(labels_true == 0)[0] labels_predicted = np.delete(labels_predicted, to_delete) labels_true = np.delete(labels_true, to_delete) return adjusted_rand_score(labels_true, labels_predicted) def _print_losses(self, iteration): print('Iter: {}, MSE -> {} KLD -> {}, NMI -> {}, ARS -> {}' .format(iteration, np.average(self.metrics['MSE']), np.average(self.metrics['KLD']) if len(self.metrics['KLD']) != 0 else 0, self.metrics['NMI'][-1], self.metrics['ARS'][-1])) self.metrics['KLD'] = [] self.metrics['MSE'] = [] def _log_metrics_to_file(self): path = os.path.join(self.artifacts_path, 'metrics.csv') save_to_csv(path, [np.average(self.metrics['MSE']), np.average(self.metrics['KLD']), self.metrics['NMI'][-1], self.metrics['ARS'][-1]]) def save_if_best(self, loss): if loss < self._best_autoencoder_loss: self._save_model() self._best_autoencoder_loss = loss def train_model(self, data_loader, optimizer, epochs: int=200, iterations: int=10000, gamma: float=0.1, epsilon=0.00001): print("Pretraining autoencoder:") training_start = time() self.train_autoencoder(data_loader, optimizer, epochs, epsilon) print("Pretraining finished, training with clustering") self.load_state_dict(torch.load(os.path.join(self.artifacts_path, "best_autoencoder_model.pt"))) self.train_with_clustering(data_loader, optimizer, iterations, gamma) training_time = time() - training_start save_to_csv(os.path.join(self.artifacts_path, "time.csv"), [training_time]) print("Done!") def cluster_with_kmeans(self, data_loader): encoded_features = self.encode_features(data_loader) return KMeans(n_clusters=self.n_clusters).fit_predict( encoded_features.cpu().detach()) def cluster_with_gaussian(self, data_loader): encoded_features = self.encode_features(data_loader) return GaussianMixture(n_components=self.n_clusters).fit_predict( encoded_features.cpu().detach()) def cluster_with_model(self, data_loader): clusters = self.predict_clusters(data_loader) return np.argmax(clusters.cpu().detach().numpy(), axis=1) def _save_model(self, epoch: int = None): os.makedirs(self.artifacts_path, exist_ok=True) if epoch is not None: path = os.path.join(self.artifacts_path, "model_epoch_{}.pt".format(epoch)) else: path = os.path.join(self.artifacts_path, "best_autoencoder_model.pt") torch.save(self.state_dict(), path) def forward(self, *input): pass class Flatten(nn.Module): def forward(self, input_data): return input_data.view(input_data.size(0), -1) class ClusteringLayer(nn.Module): def __init__(self, n_clusters: int, input_dim: int): super(ClusteringLayer, self).__init__() self.weights = nn.Parameter(torch.Tensor(n_clusters, input_dim)) nn.init.xavier_normal_(self.weights) def set_weights(self, weights): self.weights = nn.Parameter(weights).float() def forward(self, input_data): q = 1.0 / (1.0 + (torch.sum(torch.pow(input_data.unsqueeze(dim=1) - self.weights, 2), dim=2))) q = torch.t(torch.t(q) / torch.sum(q, dim=1)) return q
{ "alphanum_fraction": 0.5823313413, "author": null, "avg_line_length": 46.4515050167, "converted": null, "ext": "py", "file": null, "hexsha": "9603d860a3c81fde962e79d06e39a8bfaecd16b4", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 11, "max_forks_repo_forks_event_max_datetime": "2022-03-12T03:50:50.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-24T12:42:59.000Z", "max_forks_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ESA-PhiLab/hypernet", "max_forks_repo_path": "beetles/scripts/3D_CAE.py", "max_issues_count": 5, "max_issues_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_issues_repo_issues_event_max_datetime": "2022-03-24T09:32:01.000Z", "max_issues_repo_issues_event_min_datetime": "2018-09-11T14:52:35.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ESA-PhiLab/hypernet", "max_issues_repo_path": "beetles/scripts/3D_CAE.py", "max_line_length": 109, "max_stars_count": 34, "max_stars_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ESA-PhiLab/hypernet", "max_stars_repo_path": "beetles/scripts/3D_CAE.py", "max_stars_repo_stars_event_max_datetime": "2022-01-31T17:44:51.000Z", "max_stars_repo_stars_event_min_datetime": "2018-11-14T09:38:00.000Z", "num_tokens": 2647, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 13889 }
//======================================================================= // Copyright 2001 University of Notre Dame. // Copyright 2006 Trustees of Indiana University // Authors: Jeremy G. Siek and Douglas Gregor <dgregor@cs.indiana.edu> // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) //======================================================================= #ifndef BOOST_ADJACENCY_MATRIX_HPP #define BOOST_ADJACENCY_MATRIX_HPP #include <boost/config.hpp> #include <vector> #include <memory> #include <boost/assert.hpp> #include <boost/limits.hpp> #include <boost/iterator.hpp> #include <boost/graph/graph_traits.hpp> #include <boost/graph/graph_mutability_traits.hpp> #include <boost/graph/graph_selectors.hpp> #include <boost/mpl/if.hpp> #include <boost/mpl/bool.hpp> #include <boost/graph/adjacency_iterator.hpp> #include <boost/graph/detail/edge.hpp> #include <boost/iterator/iterator_adaptor.hpp> #include <boost/iterator/filter_iterator.hpp> #include <boost/range/irange.hpp> #include <boost/graph/properties.hpp> #include <boost/tuple/tuple.hpp> #include <boost/static_assert.hpp> #include <boost/type_traits.hpp> #include <boost/property_map/property_map.hpp> #include <boost/property_map/transform_value_property_map.hpp> #include <boost/property_map/function_property_map.hpp> namespace boost { namespace detail { template <class Directed, class Vertex> class matrix_edge_desc_impl : public edge_desc_impl<Directed,Vertex> { typedef edge_desc_impl<Directed,Vertex> Base; public: matrix_edge_desc_impl() { } matrix_edge_desc_impl(bool exists, Vertex s, Vertex d, const void* ep = 0) : Base(s, d, ep), m_exists(exists) { } bool exists() const { return m_exists; } private: bool m_exists; }; struct does_edge_exist { template <class Edge> bool operator()(const Edge& e) const { return e.exists(); } }; // Note to self... The int for get_edge_exists and set_edge exist helps // make these calls unambiguous. template <typename EdgeProperty> bool get_edge_exists(const std::pair<bool, EdgeProperty>& stored_edge, int) { return stored_edge.first; } template <typename EdgeProperty> void set_edge_exists( std::pair<bool, EdgeProperty>& stored_edge, bool flag, int ) { stored_edge.first = flag; } template <typename EdgeProxy> bool get_edge_exists(const EdgeProxy& edge_proxy, ...) { return edge_proxy; } template <typename EdgeProxy> EdgeProxy& set_edge_exists(EdgeProxy& edge_proxy, bool flag, ...) { edge_proxy = flag; return edge_proxy; // just to avoid never used warning } // NOTE: These functions collide with the get_property function for // accessing bundled graph properties. Be excplicit when using them. template <typename EdgeProperty> const EdgeProperty& get_edge_property(const std::pair<bool, EdgeProperty>& stored_edge) { return stored_edge.second; } template <typename EdgeProperty> EdgeProperty& get_edge_property(std::pair<bool, EdgeProperty>& stored_edge) { return stored_edge.second; } template <typename StoredEdgeProperty, typename EdgeProperty> inline void set_edge_property(std::pair<bool, StoredEdgeProperty>& stored_edge, const EdgeProperty& ep, int) { stored_edge.second = ep; } inline const no_property& get_edge_property(const char&) { static no_property s_prop; return s_prop; } inline no_property& get_edge_property(char&) { static no_property s_prop; return s_prop; } template <typename EdgeProxy, typename EdgeProperty> inline void set_edge_property(EdgeProxy, const EdgeProperty&, ...) {} //======================================================================= // Directed Out Edge Iterator template < typename VertexDescriptor, typename MatrixIter , typename VerticesSizeType, typename EdgeDescriptor > struct dir_adj_matrix_out_edge_iter : iterator_adaptor< dir_adj_matrix_out_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > { typedef iterator_adaptor< dir_adj_matrix_out_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > super_t; dir_adj_matrix_out_edge_iter() { } dir_adj_matrix_out_edge_iter( const MatrixIter& i , const VertexDescriptor& src , const VerticesSizeType& n ) : super_t(i), m_src(src), m_targ(0), m_n(n) { } void increment() { ++this->base_reference(); ++m_targ; } inline EdgeDescriptor dereference() const { return EdgeDescriptor(get_edge_exists(*this->base(), 0), m_src, m_targ, &get_edge_property(*this->base())); } VertexDescriptor m_src, m_targ; VerticesSizeType m_n; }; //======================================================================= // Directed In Edge Iterator template < typename VertexDescriptor, typename MatrixIter , typename VerticesSizeType, typename EdgeDescriptor > struct dir_adj_matrix_in_edge_iter : iterator_adaptor< dir_adj_matrix_in_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > { typedef iterator_adaptor< dir_adj_matrix_in_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > super_t; dir_adj_matrix_in_edge_iter() { } dir_adj_matrix_in_edge_iter( const MatrixIter& i , const MatrixIter& last , const VertexDescriptor& tgt , const VerticesSizeType& n ) : super_t(i), m_last(last), m_src(0), m_targ(tgt), m_n(n) { } void increment() { if (VerticesSizeType(m_last - this->base_reference()) >= m_n) { this->base_reference() += m_n; ++m_src; } else { this->base_reference() = m_last; } } inline EdgeDescriptor dereference() const { return EdgeDescriptor(get_edge_exists(*this->base(), 0), m_src, m_targ, &get_edge_property(*this->base())); } MatrixIter m_last; VertexDescriptor m_src, m_targ; VerticesSizeType m_n; }; //======================================================================= // Undirected Out Edge Iterator template < typename VertexDescriptor, typename MatrixIter , typename VerticesSizeType, typename EdgeDescriptor > struct undir_adj_matrix_out_edge_iter : iterator_adaptor< undir_adj_matrix_out_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > { typedef iterator_adaptor< undir_adj_matrix_out_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > super_t; undir_adj_matrix_out_edge_iter() { } undir_adj_matrix_out_edge_iter( const MatrixIter& i , const VertexDescriptor& src , const VerticesSizeType& n ) : super_t(i), m_src(src), m_inc(src), m_targ(0), m_n(n) {} void increment() { if (m_targ < m_src) // first half { ++this->base_reference(); } else if (m_targ < m_n - 1) { // second half ++m_inc; this->base_reference() += m_inc; } else { // past-the-end this->base_reference() += m_n - m_src; } ++m_targ; } inline EdgeDescriptor dereference() const { return EdgeDescriptor(get_edge_exists(*this->base(), 0), m_src, m_targ, &get_edge_property(*this->base())); } VertexDescriptor m_src, m_inc, m_targ; VerticesSizeType m_n; }; //======================================================================= // Undirected In Edge Iterator template < typename VertexDescriptor, typename MatrixIter , typename VerticesSizeType, typename EdgeDescriptor > struct undir_adj_matrix_in_edge_iter : iterator_adaptor< undir_adj_matrix_in_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > { typedef iterator_adaptor< undir_adj_matrix_in_edge_iter<VertexDescriptor, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > super_t; undir_adj_matrix_in_edge_iter() { } undir_adj_matrix_in_edge_iter( const MatrixIter& i , const VertexDescriptor& src , const VerticesSizeType& n ) : super_t(i), m_src(src), m_inc(src), m_targ(0), m_n(n) {} void increment() { if (m_targ < m_src) // first half { ++this->base_reference(); } else if (m_targ < m_n - 1) { // second half ++m_inc; this->base_reference() += m_inc; } else { // past-the-end this->base_reference() += m_n - m_src; } ++m_targ; } inline EdgeDescriptor dereference() const { return EdgeDescriptor(get_edge_exists(*this->base(), 0), m_targ, m_src, &get_edge_property(*this->base())); } VertexDescriptor m_src, m_inc, m_targ; VerticesSizeType m_n; }; //======================================================================= // Edge Iterator template <typename Directed, typename MatrixIter, typename VerticesSizeType, typename EdgeDescriptor> struct adj_matrix_edge_iter : iterator_adaptor< adj_matrix_edge_iter<Directed, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > { typedef iterator_adaptor< adj_matrix_edge_iter<Directed, MatrixIter, VerticesSizeType, EdgeDescriptor> , MatrixIter , EdgeDescriptor , use_default , EdgeDescriptor , std::ptrdiff_t > super_t; adj_matrix_edge_iter() { } adj_matrix_edge_iter(const MatrixIter& i, const MatrixIter& start, const VerticesSizeType& n) : super_t(i), m_start(start), m_src(0), m_targ(0), m_n(n) { } void increment() { increment_dispatch(this->base_reference(), Directed()); } void increment_dispatch(MatrixIter& i, directedS) { ++i; if (m_targ == m_n - 1) { m_targ = 0; ++m_src; } else { ++m_targ; } } void increment_dispatch(MatrixIter& i, undirectedS) { ++i; if (m_targ == m_src) { m_targ = 0; ++m_src; } else { ++m_targ; } } inline EdgeDescriptor dereference() const { return EdgeDescriptor(get_edge_exists(*this->base(), 0), m_src, m_targ, &get_edge_property(*this->base())); } MatrixIter m_start; VerticesSizeType m_src, m_targ, m_n; }; } // namespace detail //========================================================================= // Adjacency Matrix Traits template <typename Directed = directedS> class adjacency_matrix_traits { typedef typename Directed::is_directed_t is_directed; public: // The bidirectionalS tag is not allowed with the adjacency_matrix // graph type. Instead, use directedS, which also provides the // functionality required for a Bidirectional Graph (in_edges, // in_degree, etc.). #if !defined(_MSC_VER) || _MSC_VER > 1300 BOOST_STATIC_ASSERT(type_traits::ice_not<(is_same<Directed, bidirectionalS>::value)>::value); #endif typedef typename mpl::if_<is_directed, bidirectional_tag, undirected_tag>::type directed_category; typedef disallow_parallel_edge_tag edge_parallel_category; typedef std::size_t vertex_descriptor; typedef detail::matrix_edge_desc_impl<directed_category, vertex_descriptor> edge_descriptor; }; struct adjacency_matrix_class_tag { }; struct adj_matrix_traversal_tag : public virtual adjacency_matrix_tag, public virtual vertex_list_graph_tag, public virtual incidence_graph_tag, public virtual adjacency_graph_tag, public virtual edge_list_graph_tag { }; //========================================================================= // Adjacency Matrix Class template <typename Directed = directedS, typename VertexProperty = no_property, typename EdgeProperty = no_property, typename GraphProperty = no_property, typename Allocator = std::allocator<bool> > class adjacency_matrix { typedef adjacency_matrix self; typedef adjacency_matrix_traits<Directed> Traits; public: #if !defined(BOOST_MSVC) || BOOST_MSVC > 1300 // The bidirectionalS tag is not allowed with the adjacency_matrix // graph type. Instead, use directedS, which also provides the // functionality required for a Bidirectional Graph (in_edges, // in_degree, etc.). BOOST_STATIC_ASSERT(!(is_same<Directed, bidirectionalS>::value)); #endif typedef GraphProperty graph_property_type; typedef typename lookup_one_property<GraphProperty, graph_bundle_t>::type graph_bundled; typedef VertexProperty vertex_property_type; typedef typename lookup_one_property<VertexProperty, vertex_bundle_t>::type vertex_bundled; typedef EdgeProperty edge_property_type; typedef typename lookup_one_property<EdgeProperty, edge_bundle_t>::type edge_bundled; public: // should be private typedef typename mpl::if_<typename has_property<edge_property_type>::type, std::pair<bool, edge_property_type>, char>::type StoredEdge; #if (defined(BOOST_MSVC) && BOOST_MSVC <= 1300) || defined(BOOST_NO_STD_ALLOCATOR) typedef std::vector<StoredEdge> Matrix; #else // This causes internal compiler error for MSVC typedef typename Allocator::template rebind<StoredEdge>::other Alloc; typedef std::vector<StoredEdge, Alloc> Matrix; #endif typedef typename Matrix::iterator MatrixIter; typedef typename Matrix::size_type size_type; public: // Graph concept required types typedef typename Traits::vertex_descriptor vertex_descriptor; typedef typename Traits::edge_descriptor edge_descriptor; typedef typename Traits::directed_category directed_category; typedef typename Traits::edge_parallel_category edge_parallel_category; typedef adj_matrix_traversal_tag traversal_category; static vertex_descriptor null_vertex() { return (std::numeric_limits<vertex_descriptor>::max)(); } //private: if friends worked, these would be private typedef detail::dir_adj_matrix_out_edge_iter< vertex_descriptor, MatrixIter, size_type, edge_descriptor > DirOutEdgeIter; typedef detail::undir_adj_matrix_out_edge_iter< vertex_descriptor, MatrixIter, size_type, edge_descriptor > UnDirOutEdgeIter; typedef typename mpl::if_< typename Directed::is_directed_t, DirOutEdgeIter, UnDirOutEdgeIter >::type unfiltered_out_edge_iter; typedef detail::dir_adj_matrix_in_edge_iter< vertex_descriptor, MatrixIter, size_type, edge_descriptor > DirInEdgeIter; typedef detail::undir_adj_matrix_in_edge_iter< vertex_descriptor, MatrixIter, size_type, edge_descriptor > UnDirInEdgeIter; typedef typename mpl::if_< typename Directed::is_directed_t, DirInEdgeIter, UnDirInEdgeIter >::type unfiltered_in_edge_iter; typedef detail::adj_matrix_edge_iter< Directed, MatrixIter, size_type, edge_descriptor > unfiltered_edge_iter; public: // IncidenceGraph concept required types typedef filter_iterator<detail::does_edge_exist, unfiltered_out_edge_iter> out_edge_iterator; typedef size_type degree_size_type; // BidirectionalGraph required types typedef filter_iterator<detail::does_edge_exist, unfiltered_in_edge_iter> in_edge_iterator; // AdjacencyGraph required types typedef typename adjacency_iterator_generator<self, vertex_descriptor, out_edge_iterator>::type adjacency_iterator; // VertexListGraph required types typedef size_type vertices_size_type; typedef integer_range<vertex_descriptor> VertexList; typedef typename VertexList::iterator vertex_iterator; // EdgeListGraph required types typedef size_type edges_size_type; typedef filter_iterator< detail::does_edge_exist, unfiltered_edge_iter > edge_iterator; // PropertyGraph required types typedef adjacency_matrix_class_tag graph_tag; // Constructor required by MutableGraph adjacency_matrix(vertices_size_type n_vertices, const GraphProperty& p = GraphProperty()) : m_matrix(Directed::is_directed ? (n_vertices * n_vertices) : (n_vertices * (n_vertices + 1) / 2)), m_vertex_set(0, n_vertices), m_vertex_properties(n_vertices), m_num_edges(0), m_property(p) { } template <typename EdgeIterator> adjacency_matrix(EdgeIterator first, EdgeIterator last, vertices_size_type n_vertices, const GraphProperty& p = GraphProperty()) : m_matrix(Directed::is_directed ? (n_vertices * n_vertices) : (n_vertices * (n_vertices + 1) / 2)), m_vertex_set(0, n_vertices), m_vertex_properties(n_vertices), m_num_edges(0), m_property(p) { for (; first != last; ++first) { add_edge(first->first, first->second, *this); } } template <typename EdgeIterator, typename EdgePropertyIterator> adjacency_matrix(EdgeIterator first, EdgeIterator last, EdgePropertyIterator ep_iter, vertices_size_type n_vertices, const GraphProperty& p = GraphProperty()) : m_matrix(Directed::is_directed ? (n_vertices * n_vertices) : (n_vertices * (n_vertices + 1) / 2)), m_vertex_set(0, n_vertices), m_vertex_properties(n_vertices), m_num_edges(0), m_property(p) { for (; first != last; ++first, ++ep_iter) { add_edge(first->first, first->second, *ep_iter, *this); } } #ifndef BOOST_GRAPH_NO_BUNDLED_PROPERTIES // Directly access a vertex or edge bundle vertex_bundled& operator[](vertex_descriptor v) { return get(vertex_bundle, *this, v); } const vertex_bundled& operator[](vertex_descriptor v) const { return get(vertex_bundle, *this, v); } edge_bundled& operator[](edge_descriptor e) { return get(edge_bundle, *this, e); } const edge_bundled& operator[](edge_descriptor e) const { return get(edge_bundle, *this, e); } graph_bundled& operator[](graph_bundle_t) { return get_property(*this); } const graph_bundled& operator[](graph_bundle_t) const { return get_property(*this); } #endif //private: if friends worked, these would be private typename Matrix::const_reference get_edge(vertex_descriptor u, vertex_descriptor v) const { if (Directed::is_directed) return m_matrix[u * m_vertex_set.size() + v]; else { if (v > u) std::swap(u, v); return m_matrix[u * (u + 1)/2 + v]; } } typename Matrix::reference get_edge(vertex_descriptor u, vertex_descriptor v) { if (Directed::is_directed) return m_matrix[u * m_vertex_set.size() + v]; else { if (v > u) std::swap(u, v); return m_matrix[u * (u + 1)/2 + v]; } } Matrix m_matrix; VertexList m_vertex_set; std::vector<vertex_property_type> m_vertex_properties; size_type m_num_edges; graph_property_type m_property; }; //========================================================================= // Functions required by the AdjacencyMatrix concept template <typename D, typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor, bool> edge(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor v, const adjacency_matrix<D,VP,EP,GP,A>& g) { bool exists = detail::get_edge_exists(g.get_edge(u,v), 0); typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor e(exists, u, v, &detail::get_edge_property(g.get_edge(u,v))); return std::make_pair(e, exists); } //========================================================================= // Functions required by the IncidenceGraph concept // O(1) template <typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<directedS,VP,EP,GP,A>::out_edge_iterator, typename adjacency_matrix<directedS,VP,EP,GP,A>::out_edge_iterator> out_edges (typename adjacency_matrix<directedS,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<directedS,VP,EP,GP,A>& g_) { typedef adjacency_matrix<directedS,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); typename Graph::vertices_size_type offset = u * g.m_vertex_set.size(); typename Graph::MatrixIter f = g.m_matrix.begin() + offset; typename Graph::MatrixIter l = f + g.m_vertex_set.size(); typename Graph::unfiltered_out_edge_iter first(f, u, g.m_vertex_set.size()) , last(l, u, g.m_vertex_set.size()); detail::does_edge_exist pred; typedef typename Graph::out_edge_iterator out_edge_iterator; return std::make_pair(out_edge_iterator(pred, first, last), out_edge_iterator(pred, last, last)); } // O(1) template <typename VP, typename EP, typename GP, typename A> std::pair< typename adjacency_matrix<undirectedS,VP,EP,GP,A>::out_edge_iterator, typename adjacency_matrix<undirectedS,VP,EP,GP,A>::out_edge_iterator> out_edges (typename adjacency_matrix<undirectedS,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<undirectedS,VP,EP,GP,A>& g_) { typedef adjacency_matrix<undirectedS,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); typename Graph::vertices_size_type offset = u * (u + 1) / 2; typename Graph::MatrixIter f = g.m_matrix.begin() + offset; typename Graph::MatrixIter l = g.m_matrix.end(); typename Graph::unfiltered_out_edge_iter first(f, u, g.m_vertex_set.size()) , last(l, u, g.m_vertex_set.size()); detail::does_edge_exist pred; typedef typename Graph::out_edge_iterator out_edge_iterator; return std::make_pair(out_edge_iterator(pred, first, last), out_edge_iterator(pred, last, last)); } // O(N) template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D,VP,EP,GP,A>::degree_size_type out_degree(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<D,VP,EP,GP,A>& g) { typename adjacency_matrix<D,VP,EP,GP,A>::degree_size_type n = 0; typename adjacency_matrix<D,VP,EP,GP,A>::out_edge_iterator f, l; for (boost::tie(f, l) = out_edges(u, g); f != l; ++f) ++n; return n; } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A, typename Dir, typename Vertex> typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor source(const detail::matrix_edge_desc_impl<Dir,Vertex>& e, const adjacency_matrix<D,VP,EP,GP,A>&) { return e.m_source; } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A, typename Dir, typename Vertex> typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor target(const detail::matrix_edge_desc_impl<Dir,Vertex>& e, const adjacency_matrix<D,VP,EP,GP,A>&) { return e.m_target; } //========================================================================= // Functions required by the BidirectionalGraph concept // O(1) template <typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<directedS,VP,EP,GP,A>::in_edge_iterator, typename adjacency_matrix<directedS,VP,EP,GP,A>::in_edge_iterator> in_edges (typename adjacency_matrix<directedS,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<directedS,VP,EP,GP,A>& g_) { typedef adjacency_matrix<directedS,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); typename Graph::MatrixIter f = g.m_matrix.begin() + u; typename Graph::MatrixIter l = g.m_matrix.end(); typename Graph::unfiltered_in_edge_iter first(f, l, u, g.m_vertex_set.size()) , last(l, l, u, g.m_vertex_set.size()); detail::does_edge_exist pred; typedef typename Graph::in_edge_iterator in_edge_iterator; return std::make_pair(in_edge_iterator(pred, first, last), in_edge_iterator(pred, last, last)); } // O(1) template <typename VP, typename EP, typename GP, typename A> std::pair< typename adjacency_matrix<undirectedS,VP,EP,GP,A>::in_edge_iterator, typename adjacency_matrix<undirectedS,VP,EP,GP,A>::in_edge_iterator> in_edges (typename adjacency_matrix<undirectedS,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<undirectedS,VP,EP,GP,A>& g_) { typedef adjacency_matrix<undirectedS,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); typename Graph::vertices_size_type offset = u * (u + 1) / 2; typename Graph::MatrixIter f = g.m_matrix.begin() + offset; typename Graph::MatrixIter l = g.m_matrix.end(); typename Graph::unfiltered_in_edge_iter first(f, u, g.m_vertex_set.size()) , last(l, u, g.m_vertex_set.size()); detail::does_edge_exist pred; typedef typename Graph::in_edge_iterator in_edge_iterator; return std::make_pair(in_edge_iterator(pred, first, last), in_edge_iterator(pred, last, last)); } // O(N) template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D,VP,EP,GP,A>::degree_size_type in_degree(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<D,VP,EP,GP,A>& g) { typename adjacency_matrix<D,VP,EP,GP,A>::degree_size_type n = 0; typename adjacency_matrix<D,VP,EP,GP,A>::in_edge_iterator f, l; for (boost::tie(f, l) = in_edges(u, g); f != l; ++f) ++n; return n; } //========================================================================= // Functions required by the AdjacencyGraph concept template <typename D, typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::adjacency_iterator, typename adjacency_matrix<D,VP,EP,GP,A>::adjacency_iterator> adjacent_vertices (typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, const adjacency_matrix<D,VP,EP,GP,A>& g_) { typedef adjacency_matrix<D,VP,EP,GP,A> Graph; const Graph& cg = static_cast<const Graph&>(g_); Graph& g = const_cast<Graph&>(cg); typedef typename Graph::adjacency_iterator adjacency_iterator; typename Graph::out_edge_iterator first, last; boost::tie(first, last) = out_edges(u, g); return std::make_pair(adjacency_iterator(first, &g), adjacency_iterator(last, &g)); } //========================================================================= // Functions required by the VertexListGraph concept template <typename D, typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::vertex_iterator, typename adjacency_matrix<D,VP,EP,GP,A>::vertex_iterator> vertices(const adjacency_matrix<D,VP,EP,GP,A>& g_) { typedef adjacency_matrix<D,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); return std::make_pair(g.m_vertex_set.begin(), g.m_vertex_set.end()); } template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D,VP,EP,GP,A>::vertices_size_type num_vertices(const adjacency_matrix<D,VP,EP,GP,A>& g) { return g.m_vertex_set.size(); } //========================================================================= // Functions required by the EdgeListGraph concept template <typename D, typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::edge_iterator, typename adjacency_matrix<D,VP,EP,GP,A>::edge_iterator> edges(const adjacency_matrix<D,VP,EP,GP,A>& g_) { typedef adjacency_matrix<D,VP,EP,GP,A> Graph; Graph& g = const_cast<Graph&>(g_); typename Graph::unfiltered_edge_iter first(g.m_matrix.begin(), g.m_matrix.begin(), g.m_vertex_set.size()), last(g.m_matrix.end(), g.m_matrix.begin(), g.m_vertex_set.size()); detail::does_edge_exist pred; typedef typename Graph::edge_iterator edge_iterator; return std::make_pair(edge_iterator(pred, first, last), edge_iterator(pred, last, last)); } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D,VP,EP,GP,A>::edges_size_type num_edges(const adjacency_matrix<D,VP,EP,GP,A>& g) { return g.m_num_edges; } //========================================================================= // Functions required by the MutableGraph concept // O(1) template <typename D, typename VP, typename EP, typename GP, typename A, typename EP2> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor, bool> add_edge(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor v, const EP2& ep, adjacency_matrix<D,VP,EP,GP,A>& g) { typedef typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor edge_descriptor; if (detail::get_edge_exists(g.get_edge(u,v), 0) == false) { ++(g.m_num_edges); detail::set_edge_property(g.get_edge(u,v), EP(ep), 0); detail::set_edge_exists(g.get_edge(u,v), true, 0); return std::make_pair (edge_descriptor(true, u, v, &detail::get_edge_property(g.get_edge(u,v))), true); } else return std::make_pair (edge_descriptor(true, u, v, &detail::get_edge_property(g.get_edge(u,v))), false); } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A> std::pair<typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor, bool> add_edge(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor v, adjacency_matrix<D,VP,EP,GP,A>& g) { EP ep; return add_edge(u, v, ep, g); } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A> void remove_edge(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor u, typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor v, adjacency_matrix<D,VP,EP,GP,A>& g) { // Don'remove the edge unless it already exists. if(detail::get_edge_exists(g.get_edge(u,v), 0)) { --(g.m_num_edges); detail::set_edge_exists(g.get_edge(u,v), false, 0); } } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A> void remove_edge(typename adjacency_matrix<D,VP,EP,GP,A>::edge_descriptor e, adjacency_matrix<D,VP,EP,GP,A>& g) { remove_edge(source(e, g), target(e, g), g); } template <typename D, typename VP, typename EP, typename GP, typename A> inline typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor add_vertex(adjacency_matrix<D,VP,EP,GP,A>& g) { // UNDER CONSTRUCTION BOOST_ASSERT(false); return *vertices(g).first; } template <typename D, typename VP, typename EP, typename GP, typename A, typename VP2> inline typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor add_vertex(const VP2& /*vp*/, adjacency_matrix<D,VP,EP,GP,A>& g) { // UNDER CONSTRUCTION BOOST_ASSERT(false); return *vertices(g).first; } template <typename D, typename VP, typename EP, typename GP, typename A> inline void remove_vertex(typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor /*u*/, adjacency_matrix<D,VP,EP,GP,A>& /*g*/) { // UNDER CONSTRUCTION BOOST_ASSERT(false); } // O(V) template <typename VP, typename EP, typename GP, typename A> void clear_vertex (typename adjacency_matrix<directedS,VP,EP,GP,A>::vertex_descriptor u, adjacency_matrix<directedS,VP,EP,GP,A>& g) { typename adjacency_matrix<directedS,VP,EP,GP,A>::vertex_iterator vi, vi_end; for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) remove_edge(u, *vi, g); for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) remove_edge(*vi, u, g); } // O(V) template <typename VP, typename EP, typename GP, typename A> void clear_vertex (typename adjacency_matrix<undirectedS,VP,EP,GP,A>::vertex_descriptor u, adjacency_matrix<undirectedS,VP,EP,GP,A>& g) { typename adjacency_matrix<undirectedS,VP,EP,GP,A>::vertex_iterator vi, vi_end; for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) remove_edge(u, *vi, g); } //========================================================================= // Functions required by the PropertyGraph concept template <typename D, typename VP, typename EP, typename GP, typename A, typename Prop, typename Kind> struct adj_mat_pm_helper; template <typename D, typename VP, typename EP, typename GP, typename A, typename Prop> struct adj_mat_pm_helper<D, VP, EP, GP, A, Prop, vertex_property_tag> { typedef typename graph_traits<adjacency_matrix<D, VP, EP, GP, A> >::vertex_descriptor arg_type; typedef typed_identity_property_map<arg_type> vi_map_type; typedef iterator_property_map<typename std::vector<VP>::iterator, vi_map_type> all_map_type; typedef iterator_property_map<typename std::vector<VP>::const_iterator, vi_map_type> all_map_const_type; typedef transform_value_property_map< detail::lookup_one_property_f<VP, Prop>, all_map_type> type; typedef transform_value_property_map< detail::lookup_one_property_f<const VP, Prop>, all_map_const_type> const_type; typedef typename property_traits<type>::reference single_nonconst_type; typedef typename property_traits<const_type>::reference single_const_type; static type get_nonconst(adjacency_matrix<D, VP, EP, GP, A>& g, Prop prop) { return type(prop, all_map_type(g.m_vertex_properties.begin(), vi_map_type())); } static const_type get_const(const adjacency_matrix<D, VP, EP, GP, A>& g, Prop prop) { return const_type(prop, all_map_const_type(g.m_vertex_properties.begin(), vi_map_type())); } static single_nonconst_type get_nonconst_one(adjacency_matrix<D, VP, EP, GP, A>& g, Prop prop, arg_type v) { return lookup_one_property<VP, Prop>::lookup(g.m_vertex_properties[v], prop); } static single_const_type get_const_one(const adjacency_matrix<D, VP, EP, GP, A>& g, Prop prop, arg_type v) { return lookup_one_property<const VP, Prop>::lookup(g.m_vertex_properties[v], prop); } }; template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> struct adj_mat_pm_helper<D, VP, EP, GP, A, Tag, edge_property_tag> { typedef typename graph_traits<adjacency_matrix<D, VP, EP, GP, A> >::edge_descriptor edge_descriptor; template <typename IsConst> struct lookup_property_from_edge { Tag tag; lookup_property_from_edge(Tag tag): tag(tag) {} typedef typename boost::mpl::if_<IsConst, const EP, EP>::type ep_type_nonref; typedef ep_type_nonref& ep_type; typedef typename lookup_one_property<ep_type_nonref, Tag>::type& result_type; result_type operator()(edge_descriptor e) const { return lookup_one_property<ep_type_nonref, Tag>::lookup(*static_cast<ep_type_nonref*>(e.get_property()), tag); } }; typedef function_property_map< lookup_property_from_edge<boost::mpl::false_>, typename graph_traits<adjacency_matrix<D, VP, EP, GP, A> >::edge_descriptor> type; typedef function_property_map< lookup_property_from_edge<boost::mpl::true_>, typename graph_traits<adjacency_matrix<D, VP, EP, GP, A> >::edge_descriptor> const_type; typedef edge_descriptor arg_type; typedef typename lookup_property_from_edge<boost::mpl::false_>::result_type single_nonconst_type; typedef typename lookup_property_from_edge<boost::mpl::true_>::result_type single_const_type; static type get_nonconst(adjacency_matrix<D, VP, EP, GP, A>& g, Tag tag) { return type(tag); } static const_type get_const(const adjacency_matrix<D, VP, EP, GP, A>& g, Tag tag) { return const_type(tag); } static single_nonconst_type get_nonconst_one(adjacency_matrix<D, VP, EP, GP, A>& g, Tag tag, edge_descriptor e) { return lookup_one_property<EP, Tag>::lookup(*static_cast<EP*>(e.get_property()), tag); } static single_const_type get_const_one(const adjacency_matrix<D, VP, EP, GP, A>& g, Tag tag, edge_descriptor e) { return lookup_one_property<const EP, Tag>::lookup(*static_cast<const EP*>(e.get_property()), tag); } }; template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> struct property_map<adjacency_matrix<D,VP,EP,GP,A>, Tag> : adj_mat_pm_helper<D, VP, EP, GP, A, Tag, typename detail::property_kind_from_graph<adjacency_matrix<D, VP, EP, GP, A>, Tag>::type> {}; template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::type get(Tag tag, adjacency_matrix<D, VP, EP, GP, A>& g) { return property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::get_nonconst(g, tag); } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::const_type get(Tag tag, const adjacency_matrix<D, VP, EP, GP, A>& g) { return property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::get_const(g, tag); } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::single_nonconst_type get(Tag tag, adjacency_matrix<D, VP, EP, GP, A>& g, typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::arg_type a) { return property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::get_nonconst_one(g, tag, a); } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::single_const_type get(Tag tag, const adjacency_matrix<D, VP, EP, GP, A>& g, typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::arg_type a) { return property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::get_const_one(g, tag, a); } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> void put(Tag tag, adjacency_matrix<D, VP, EP, GP, A>& g, typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::arg_type a, typename property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::single_const_type val) { property_map<adjacency_matrix<D, VP, EP, GP, A>, Tag>::get_nonconst_one(g, tag, a) = val; } // O(1) template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag, typename Value> inline void set_property(adjacency_matrix<D,VP,EP,GP,A>& g, Tag tag, const Value& value) { get_property_value(g.m_property, tag) = value; } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> inline typename graph_property<adjacency_matrix<D,VP,EP,GP,A>, Tag>::type& get_property(adjacency_matrix<D,VP,EP,GP,A>& g, Tag tag) { return get_property_value(g.m_property, tag); } template <typename D, typename VP, typename EP, typename GP, typename A, typename Tag> inline const typename graph_property<adjacency_matrix<D,VP,EP,GP,A>, Tag>::type& get_property(const adjacency_matrix<D,VP,EP,GP,A>& g, Tag tag) { return get_property_value(g.m_property, tag); } //========================================================================= // Vertex Property Map template <typename D, typename VP, typename EP, typename GP, typename A> struct property_map<adjacency_matrix<D, VP, EP, GP, A>, vertex_index_t> { typedef typename adjacency_matrix<D, VP, EP, GP, A>::vertex_descriptor Vertex; typedef typed_identity_property_map<Vertex> type; typedef type const_type; }; template <typename D, typename VP, typename EP, typename GP, typename A> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, vertex_index_t>::const_type get(vertex_index_t, adjacency_matrix<D, VP, EP, GP, A>&) { return typename property_map<adjacency_matrix<D, VP, EP, GP, A>, vertex_index_t>::const_type(); } template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D, VP, EP, GP, A>::vertex_descriptor get(vertex_index_t, adjacency_matrix<D, VP, EP, GP, A>&, typename adjacency_matrix<D, VP, EP, GP, A>::vertex_descriptor v) { return v; } template <typename D, typename VP, typename EP, typename GP, typename A> typename property_map<adjacency_matrix<D, VP, EP, GP, A>, vertex_index_t>::const_type get(vertex_index_t, const adjacency_matrix<D, VP, EP, GP, A>&) { return typename property_map<adjacency_matrix<D, VP, EP, GP, A>, vertex_index_t>::const_type(); } template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D, VP, EP, GP, A>::vertex_descriptor get(vertex_index_t, const adjacency_matrix<D, VP, EP, GP, A>&, typename adjacency_matrix<D, VP, EP, GP, A>::vertex_descriptor v) { return v; } //========================================================================= // Other Functions template <typename D, typename VP, typename EP, typename GP, typename A> typename adjacency_matrix<D,VP,EP,GP,A>::vertex_descriptor vertex(typename adjacency_matrix<D,VP,EP,GP,A>::vertices_size_type n, const adjacency_matrix<D,VP,EP,GP,A>&) { return n; } template <typename D, typename VP, typename EP, typename GP, typename A> struct graph_mutability_traits<adjacency_matrix<D, VP, EP, GP, A> > { typedef mutable_edge_property_graph_tag category; }; } // namespace boost #endif // BOOST_ADJACENCY_MATRIX_HPP
{ "alphanum_fraction": 0.6190547381, "author": null, "avg_line_length": 37.7300564061, "converted": null, "ext": "hpp", "file": null, "hexsha": "aadf90407b8b22e172cbefc8b8d692a687b25c7f", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 139, "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "isuhao/klib", "max_forks_repo_path": "master/core/third/boost/graph/adjacency_matrix.hpp", "max_issues_count": 197, "max_issues_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_issues_repo_issues_event_max_datetime": "2019-05-31T17:57:51.000Z", "max_issues_repo_issues_event_min_datetime": "2017-07-06T16:53:59.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "isuhao/klib", "max_issues_repo_path": "master/core/third/boost/graph/adjacency_matrix.hpp", "max_line_length": 138, "max_stars_count": 198, "max_stars_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "importlib/klib", "max_stars_repo_path": "master/core/third/boost/graph/adjacency_matrix.hpp", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "num_tokens": 10804, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 46823 }
import gym import numpy as np import random import simple_memory_testing_env import matplotlib.pyplot as plt rotate_right = 1 rotate_left = 0 forward = 2 def test_env(): env = gym.make(f"SimpleMemoryTestingEnv-v0") obs = env.reset() #env.render() obs = env.step(forward) obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() obs = env.step(forward) obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() obs = env.step(rotate_left) plt.imshow(obs[0]) plt.show() obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() def test_2colors_env(): env = gym.make(f"SimpleMemoryTestingEnv-2Colors-v0") obs = env.reset(seed=1) env.render() #env.reset(seed=2) #env.render() #env.reset(seed=3) #env.render() plt.imshow(obs) plt.show() obs = env.step(forward) obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() obs = env.step(rotate_left) plt.imshow(obs[0]) plt.show() obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() def test_easy_env(): env = gym.make(f"SimpleMemoryTestingEnv-Easy-v0") obs = env.reset() #env.render() obs = env.step(forward) obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() obs = env.step(rotate_left) plt.imshow(obs[0]) plt.show() obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() def test_easy_2colors_env(): env = gym.make(f"SimpleMemoryTestingEnv-Easy-2Colors-v0") obs = env.reset() #env.render() env.reset(seed=1) #import ipdb; ipdb.set_trace() #env.reset(seed=2) #env.render() plt.imshow(obs) plt.show() obs = env.step(forward) obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() obs = env.step(rotate_left) plt.imshow(obs[0]) plt.show() obs = env.step(forward) obs = env.step(forward) plt.imshow(obs[0]) plt.show() if __name__ == "__main__": seed = 1 np.random.seed(seed) random.seed(seed) #test_env() test_2colors_env() #test_easy_env() #test_easy_2colors_env()
{ "alphanum_fraction": 0.584627964, "author": null, "avg_line_length": 16.527027027, "converted": null, "ext": "py", "file": null, "hexsha": "5386a4f595c20a63b86a0832b636d2617225ee84", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d282f2e510906d4c8f225047f4937f3bf96e7784", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Near32/SimpleMemoryTestingEnv", "max_forks_repo_path": "tests/env/test_simple_memory_testing_env.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "d282f2e510906d4c8f225047f4937f3bf96e7784", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Near32/SimpleMemoryTestingEnv", "max_issues_repo_path": "tests/env/test_simple_memory_testing_env.py", "max_line_length": 61, "max_stars_count": null, "max_stars_repo_head_hexsha": "d282f2e510906d4c8f225047f4937f3bf96e7784", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Near32/SimpleMemoryTestingEnv", "max_stars_repo_path": "tests/env/test_simple_memory_testing_env.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 684, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2446 }
[STATEMENT] lemma Exit_not_dyn_standard_control_dependent: assumes control:"n controls\<^sub>s (_Exit_) via as" shows "False" [PROOF STATE] proof (prove) goal (1 subgoal): 1. False [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. False [PROOF STEP] from control [PROOF STATE] proof (chain) picking this: n controls\<^sub>s (_Exit_) via as [PROOF STEP] obtain a as' where path:"n -as\<rightarrow>* (_Exit_)" and as:"as = a#as'" and pd:"(_Exit_) postdominates (targetnode a)" [PROOF STATE] proof (prove) using this: n controls\<^sub>s (_Exit_) via as goal (1 subgoal): 1. (\<And>a as'. \<lbrakk>n -as\<rightarrow>* (_Exit_); as = a # as'; (_Exit_) postdominates targetnode a\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by(auto simp:dyn_standard_control_dependence_def) [PROOF STATE] proof (state) this: n -as\<rightarrow>* (_Exit_) as = a # as' (_Exit_) postdominates targetnode a goal (1 subgoal): 1. False [PROOF STEP] from path as [PROOF STATE] proof (chain) picking this: n -as\<rightarrow>* (_Exit_) as = a # as' [PROOF STEP] have "n -[]@a#as'\<rightarrow>* (_Exit_)" [PROOF STATE] proof (prove) using this: n -as\<rightarrow>* (_Exit_) as = a # as' goal (1 subgoal): 1. n -[] @ a # as'\<rightarrow>* (_Exit_) [PROOF STEP] by simp [PROOF STATE] proof (state) this: n -[] @ a # as'\<rightarrow>* (_Exit_) goal (1 subgoal): 1. False [PROOF STEP] hence "valid_edge a" [PROOF STATE] proof (prove) using this: n -[] @ a # as'\<rightarrow>* (_Exit_) goal (1 subgoal): 1. valid_edge a [PROOF STEP] by(fastforce dest:path_split) [PROOF STATE] proof (state) this: valid_edge a goal (1 subgoal): 1. False [PROOF STEP] with pd [PROOF STATE] proof (chain) picking this: (_Exit_) postdominates targetnode a valid_edge a [PROOF STEP] show False [PROOF STATE] proof (prove) using this: (_Exit_) postdominates targetnode a valid_edge a goal (1 subgoal): 1. False [PROOF STEP] by -(rule Exit_no_postdominator,auto) [PROOF STATE] proof (state) this: False goal: No subgoals! [PROOF STEP] qed
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Slicing_Basic_DynStandardControlDependence", "hexsha": null, "include": null, "lang": null, "length": 13, "llama_tokens": 838, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
/* ***************************************************** THIS IS AN AUTOMATICALLY GENERATED FILE. DO NOT EDIT. ***************************************************** Generated by: gltbx.generate_defines_bpl */ #include <boost/python/def.hpp> #include <boost/python/scope.hpp> #include <gltbx/include_opengl.h> namespace gltbx { namespace gl { namespace boost_python { void wrap_defines_06(boost::python::scope scope) { #if defined(GL_RGB10_A2) scope.attr("GL_RGB10_A2") = GL_RGB10_A2; #endif #if defined(GL_RGB12) scope.attr("GL_RGB12") = GL_RGB12; #endif #if defined(GL_RGB16) scope.attr("GL_RGB16") = GL_RGB16; #endif #if defined(GL_RGB4) scope.attr("GL_RGB4") = GL_RGB4; #endif #if defined(GL_RGB5) scope.attr("GL_RGB5") = GL_RGB5; #endif #if defined(GL_RGB5_A1) scope.attr("GL_RGB5_A1") = GL_RGB5_A1; #endif #if defined(GL_RGB8) scope.attr("GL_RGB8") = GL_RGB8; #endif #if defined(GL_RGBA) scope.attr("GL_RGBA") = GL_RGBA; #endif #if defined(GL_RGBA12) scope.attr("GL_RGBA12") = GL_RGBA12; #endif #if defined(GL_RGBA16) scope.attr("GL_RGBA16") = GL_RGBA16; #endif #if defined(GL_RGBA2) scope.attr("GL_RGBA2") = GL_RGBA2; #endif #if defined(GL_RGBA4) scope.attr("GL_RGBA4") = GL_RGBA4; #endif #if defined(GL_RGBA8) scope.attr("GL_RGBA8") = GL_RGBA8; #endif #if defined(GL_RGBA_MODE) scope.attr("GL_RGBA_MODE") = GL_RGBA_MODE; #endif #if defined(GL_RIGHT) scope.attr("GL_RIGHT") = GL_RIGHT; #endif #if defined(GL_S) scope.attr("GL_S") = GL_S; #endif #if defined(GL_SCISSOR_BIT) scope.attr("GL_SCISSOR_BIT") = GL_SCISSOR_BIT; #endif #if defined(GL_SCISSOR_BOX) scope.attr("GL_SCISSOR_BOX") = GL_SCISSOR_BOX; #endif #if defined(GL_SCISSOR_TEST) scope.attr("GL_SCISSOR_TEST") = GL_SCISSOR_TEST; #endif #if defined(GL_SELECT) scope.attr("GL_SELECT") = GL_SELECT; #endif #if defined(GL_SELECTION_BUFFER_POINTER) scope.attr("GL_SELECTION_BUFFER_POINTER") = GL_SELECTION_BUFFER_POINTER; #endif #if defined(GL_SET) scope.attr("GL_SET") = GL_SET; #endif #if defined(GL_SHADE_MODEL) scope.attr("GL_SHADE_MODEL") = GL_SHADE_MODEL; #endif #if defined(GL_SHININESS) scope.attr("GL_SHININESS") = GL_SHININESS; #endif #if defined(GL_SHORT) scope.attr("GL_SHORT") = GL_SHORT; #endif #if defined(GL_SMOOTH) scope.attr("GL_SMOOTH") = GL_SMOOTH; #endif #if defined(GL_SPECULAR) scope.attr("GL_SPECULAR") = GL_SPECULAR; #endif #if defined(GL_SPHERE_MAP) scope.attr("GL_SPHERE_MAP") = GL_SPHERE_MAP; #endif #if defined(GL_SPOT_CUTOFF) scope.attr("GL_SPOT_CUTOFF") = GL_SPOT_CUTOFF; #endif #if defined(GL_SPOT_DIRECTION) scope.attr("GL_SPOT_DIRECTION") = GL_SPOT_DIRECTION; #endif #if defined(GL_SPOT_EXPONENT) scope.attr("GL_SPOT_EXPONENT") = GL_SPOT_EXPONENT; #endif #if defined(GL_SRC_ALPHA) scope.attr("GL_SRC_ALPHA") = GL_SRC_ALPHA; #endif #if defined(GL_SRC_ALPHA_SATURATE) scope.attr("GL_SRC_ALPHA_SATURATE") = GL_SRC_ALPHA_SATURATE; #endif #if defined(GL_SRC_COLOR) scope.attr("GL_SRC_COLOR") = GL_SRC_COLOR; #endif #if defined(GL_STACK_OVERFLOW) scope.attr("GL_STACK_OVERFLOW") = GL_STACK_OVERFLOW; #endif #if defined(GL_STACK_UNDERFLOW) scope.attr("GL_STACK_UNDERFLOW") = GL_STACK_UNDERFLOW; #endif #if defined(GL_STENCIL) scope.attr("GL_STENCIL") = GL_STENCIL; #endif #if defined(GL_STENCIL_BITS) scope.attr("GL_STENCIL_BITS") = GL_STENCIL_BITS; #endif #if defined(GL_STENCIL_BUFFER_BIT) scope.attr("GL_STENCIL_BUFFER_BIT") = GL_STENCIL_BUFFER_BIT; #endif #if defined(GL_STENCIL_CLEAR_VALUE) scope.attr("GL_STENCIL_CLEAR_VALUE") = GL_STENCIL_CLEAR_VALUE; #endif #if defined(GL_STENCIL_FAIL) scope.attr("GL_STENCIL_FAIL") = GL_STENCIL_FAIL; #endif #if defined(GL_STENCIL_FUNC) scope.attr("GL_STENCIL_FUNC") = GL_STENCIL_FUNC; #endif #if defined(GL_STENCIL_INDEX) scope.attr("GL_STENCIL_INDEX") = GL_STENCIL_INDEX; #endif #if defined(GL_STENCIL_PASS_DEPTH_FAIL) scope.attr("GL_STENCIL_PASS_DEPTH_FAIL") = GL_STENCIL_PASS_DEPTH_FAIL; #endif #if defined(GL_STENCIL_PASS_DEPTH_PASS) scope.attr("GL_STENCIL_PASS_DEPTH_PASS") = GL_STENCIL_PASS_DEPTH_PASS; #endif #if defined(GL_STENCIL_REF) scope.attr("GL_STENCIL_REF") = GL_STENCIL_REF; #endif #if defined(GL_STENCIL_TEST) scope.attr("GL_STENCIL_TEST") = GL_STENCIL_TEST; #endif #if defined(GL_STENCIL_VALUE_MASK) scope.attr("GL_STENCIL_VALUE_MASK") = GL_STENCIL_VALUE_MASK; #endif #if defined(GL_STENCIL_WRITEMASK) scope.attr("GL_STENCIL_WRITEMASK") = GL_STENCIL_WRITEMASK; #endif #if defined(GL_STEREO) scope.attr("GL_STEREO") = GL_STEREO; #endif #if defined(GL_SUBPIXEL_BITS) scope.attr("GL_SUBPIXEL_BITS") = GL_SUBPIXEL_BITS; #endif #if defined(GL_T) scope.attr("GL_T") = GL_T; #endif #if defined(GL_T2F_C3F_V3F) scope.attr("GL_T2F_C3F_V3F") = GL_T2F_C3F_V3F; #endif #if defined(GL_T2F_C4F_N3F_V3F) scope.attr("GL_T2F_C4F_N3F_V3F") = GL_T2F_C4F_N3F_V3F; #endif #if defined(GL_T2F_C4UB_V3F) scope.attr("GL_T2F_C4UB_V3F") = GL_T2F_C4UB_V3F; #endif #if defined(GL_T2F_N3F_V3F) scope.attr("GL_T2F_N3F_V3F") = GL_T2F_N3F_V3F; #endif #if defined(GL_T2F_V3F) scope.attr("GL_T2F_V3F") = GL_T2F_V3F; #endif #if defined(GL_T4F_C4F_N3F_V4F) scope.attr("GL_T4F_C4F_N3F_V4F") = GL_T4F_C4F_N3F_V4F; #endif #if defined(GL_T4F_V4F) scope.attr("GL_T4F_V4F") = GL_T4F_V4F; #endif #if defined(GL_TEXTURE) scope.attr("GL_TEXTURE") = GL_TEXTURE; #endif #if defined(GL_TEXTURE_1D) scope.attr("GL_TEXTURE_1D") = GL_TEXTURE_1D; #endif #if defined(GL_TEXTURE_1D_BINDING) scope.attr("GL_TEXTURE_1D_BINDING") = GL_TEXTURE_1D_BINDING; #endif #if defined(GL_TEXTURE_2D) scope.attr("GL_TEXTURE_2D") = GL_TEXTURE_2D; #endif #if defined(GL_TEXTURE_2D_BINDING) scope.attr("GL_TEXTURE_2D_BINDING") = GL_TEXTURE_2D_BINDING; #endif #if defined(GL_TEXTURE_ALPHA_SIZE) scope.attr("GL_TEXTURE_ALPHA_SIZE") = GL_TEXTURE_ALPHA_SIZE; #endif #if defined(GL_TEXTURE_BIT) scope.attr("GL_TEXTURE_BIT") = GL_TEXTURE_BIT; #endif #if defined(GL_AUX0) scope.attr("GL_AUX0") = GL_AUX0; #endif #if defined(GL_AUX1) scope.attr("GL_AUX1") = GL_AUX1; #endif #if defined(GL_AUX2) scope.attr("GL_AUX2") = GL_AUX2; #endif #if defined(GL_AUX3) scope.attr("GL_AUX3") = GL_AUX3; #endif #if defined(GL_LIGHT0) scope.attr("GL_LIGHT0") = GL_LIGHT0; #endif #if defined(GL_LIGHT1) scope.attr("GL_LIGHT1") = GL_LIGHT1; #endif #if defined(GL_LIGHT2) scope.attr("GL_LIGHT2") = GL_LIGHT2; #endif #if defined(GL_LIGHT3) scope.attr("GL_LIGHT3") = GL_LIGHT3; #endif #if defined(GL_LIGHT4) scope.attr("GL_LIGHT4") = GL_LIGHT4; #endif #if defined(GL_LIGHT5) scope.attr("GL_LIGHT5") = GL_LIGHT5; #endif #if defined(GL_LIGHT6) scope.attr("GL_LIGHT6") = GL_LIGHT6; #endif #if defined(GL_LIGHT7) scope.attr("GL_LIGHT7") = GL_LIGHT7; #endif #if defined(GL_TEXTURE0) scope.attr("GL_TEXTURE0") = GL_TEXTURE0; #endif #if defined(GL_TEXTURE1) scope.attr("GL_TEXTURE1") = GL_TEXTURE1; #endif #if defined(GL_TEXTURE2) scope.attr("GL_TEXTURE2") = GL_TEXTURE2; #endif #if defined(GL_TEXTURE3) scope.attr("GL_TEXTURE3") = GL_TEXTURE3; #endif #if defined(GL_TEXTURE4) scope.attr("GL_TEXTURE4") = GL_TEXTURE4; #endif #if defined(GL_TEXTURE5) scope.attr("GL_TEXTURE5") = GL_TEXTURE5; #endif #if defined(GL_TEXTURE6) scope.attr("GL_TEXTURE6") = GL_TEXTURE6; #endif #if defined(GL_TEXTURE7) scope.attr("GL_TEXTURE7") = GL_TEXTURE7; #endif #if defined(GL_TEXTURE8) scope.attr("GL_TEXTURE8") = GL_TEXTURE8; #endif #if defined(GL_TEXTURE9) scope.attr("GL_TEXTURE9") = GL_TEXTURE9; #endif #if defined(GL_TEXTURE10) scope.attr("GL_TEXTURE10") = GL_TEXTURE10; #endif #if defined(GL_TEXTURE11) scope.attr("GL_TEXTURE11") = GL_TEXTURE11; #endif #if defined(GL_TEXTURE12) scope.attr("GL_TEXTURE12") = GL_TEXTURE12; #endif #if defined(GL_TEXTURE13) scope.attr("GL_TEXTURE13") = GL_TEXTURE13; #endif #if defined(GL_TEXTURE14) scope.attr("GL_TEXTURE14") = GL_TEXTURE14; #endif #if defined(GL_TEXTURE15) scope.attr("GL_TEXTURE15") = GL_TEXTURE15; #endif #if defined(GL_TEXTURE16) scope.attr("GL_TEXTURE16") = GL_TEXTURE16; #endif #if defined(GL_TEXTURE17) scope.attr("GL_TEXTURE17") = GL_TEXTURE17; #endif #if defined(GL_TEXTURE18) scope.attr("GL_TEXTURE18") = GL_TEXTURE18; #endif #if defined(GL_TEXTURE19) scope.attr("GL_TEXTURE19") = GL_TEXTURE19; #endif #if defined(GL_TEXTURE20) scope.attr("GL_TEXTURE20") = GL_TEXTURE20; #endif #if defined(GL_TEXTURE21) scope.attr("GL_TEXTURE21") = GL_TEXTURE21; #endif #if defined(GL_TEXTURE22) scope.attr("GL_TEXTURE22") = GL_TEXTURE22; #endif #if defined(GL_TEXTURE23) scope.attr("GL_TEXTURE23") = GL_TEXTURE23; #endif #if defined(GL_TEXTURE24) scope.attr("GL_TEXTURE24") = GL_TEXTURE24; #endif #if defined(GL_TEXTURE25) scope.attr("GL_TEXTURE25") = GL_TEXTURE25; #endif #if defined(GL_TEXTURE26) scope.attr("GL_TEXTURE26") = GL_TEXTURE26; #endif #if defined(GL_TEXTURE27) scope.attr("GL_TEXTURE27") = GL_TEXTURE27; #endif #if defined(GL_TEXTURE28) scope.attr("GL_TEXTURE28") = GL_TEXTURE28; #endif #if defined(GL_TEXTURE29) scope.attr("GL_TEXTURE29") = GL_TEXTURE29; #endif #if defined(GL_TEXTURE30) scope.attr("GL_TEXTURE30") = GL_TEXTURE30; #endif #if defined(GL_TEXTURE31) scope.attr("GL_TEXTURE31") = GL_TEXTURE31; #endif } }}} // gltbx::gl::boost_python
{ "alphanum_fraction": 0.7311210381, "author": null, "avg_line_length": 26.7863247863, "converted": null, "ext": "cpp", "file": null, "hexsha": "0fb841a6a18ff91aee202f5c33ae80bc9066fd1f", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-02-04T15:39:06.000Z", "max_forks_repo_forks_event_min_datetime": "2020-02-04T15:39:06.000Z", "max_forks_repo_head_hexsha": "77d66c719b5746f37af51ad593e2941ed6fbba17", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "jorgediazjr/dials-dev20191018", "max_forks_repo_path": "build/gltbx/gl_defines_06_bpl.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "77d66c719b5746f37af51ad593e2941ed6fbba17", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "jorgediazjr/dials-dev20191018", "max_issues_repo_path": "build/gltbx/gl_defines_06_bpl.cpp", "max_line_length": 76, "max_stars_count": null, "max_stars_repo_head_hexsha": "77d66c719b5746f37af51ad593e2941ed6fbba17", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "jorgediazjr/dials-dev20191018", "max_stars_repo_path": "build/gltbx/gl_defines_06_bpl.cpp", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2688, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 9402 }
import pickle import os import torch import time import numpy as np from torch import nn import matplotlib.pyplot as plt from regretnet import ibp from regretnet.mipcertify.mip_solver import MIPNetwork from regretnet.regretnet import RegretNet, calc_agent_util, optimize_misreports, tiled_misreport_util from regretnet.mipcertify.model import clip_relu_convert, clip_relu_remove, simplify_network, sigmoid_linear_convert from datetime import datetime RANDOM_SEED = 4321 torch.manual_seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) def curr_timestamp(): return datetime.strftime(datetime.now(), format="%Y-%m-%d_%H-%M-%S") EXP_TIME = curr_timestamp() def plot_misreports(experiment_dicts, player_ind): for d in experiment_dicts: if "regret" in d: truthful = d["truthful_input"][player_ind,:].numpy() misreport = d["better_bid"][player_ind,:].numpy() difference = misreport - truthful plt.plot([truthful[0]], [truthful[1]], marker='o', markersize=30*d["regret"], color='blue') plt.plot([misreport[0]], [misreport[1]], marker='o', markersize=3, color='red') plt.arrow(truthful[0], truthful[1], difference[0], difference[1], head_width=0.01) plt.show() def convert_input_dom(truthful_input, player_index): """ Converts a truthful input into a set of input bounds for player i :param truthful_input: the current set of truthful bids :param player_index: the player whose input can vary :return: a tensor of upper and lower bounds in the format which MIPNetwork can accept. """ input_lbs = truthful_input.clone() input_ubs = truthful_input.clone() input_lbs[player_index, :] = 0.0 input_ubs[player_index, :] = 1.0 return torch.stack((input_lbs.flatten(), input_ubs.flatten())).T def experiment_on_input( model, truthful_input, player_ind, inner_product, regret_tolerance=None, n_samples=100, misreport_lr=0.01, misreport_iter=1000, ): truthful_allocs, truthful_payments = model(truthful_input) both_util = calc_agent_util(truthful_input, truthful_allocs, truthful_payments).flatten() truthful_util = both_util[player_ind].item() input_dom = convert_input_dom(truthful_input, player_ind) # the stuff below may be necessary for clip relu # converted_payment = clip_relu_convert(model.payment_head) # converted_alloc = clip_relu_remove(model.allocation_head) if inner_product: print('inner product') payment_head = simplify_network(sigmoid_linear_convert(model.payment_head)) alloc_head = model.allocation_head mipnet = MIPNetwork( model.nn_model, payment_head, alloc_head, model.n_agents, model.n_items, fractional_payment=True ) else: payment_head = clip_relu_convert(model.payment_head) alloc_head = model.allocation_head mipnet = MIPNetwork( model.nn_model, payment_head, alloc_head, model.n_agents, model.n_items, fractional_payment=False ) start_setup = time.time() mipnet.setup_model( input_dom, truthful_input, truthful_util, use_obj_function=True, player_ind=player_ind ) end_setup = time.time() start_solve = time.time() point_was_found, result_tuple, num_states = mipnet.solve(input_dom, truthful_util) end_solve = time.time() # we should return a dict containing: # truthful point # truthful util # better point if it was found # better util if it was found # regret tolerance results_dict = {} results_dict["truthful_util"] = truthful_util results_dict["truthful_input"] = truthful_input results_dict["truthful_allocs"] = truthful_allocs.cpu().detach() results_dict["truthful_payments"] = truthful_payments.cpu().detach() results_dict["regret_tolerance"] = regret_tolerance results_dict["num_states"] = num_states results_dict["setup_time"] = end_setup - start_setup results_dict["solve_time"] = end_solve - start_solve results_dict["agent"] = player_ind if True: var_results = [] for v in mipnet.gurobi_vars[0]: var_results.append(v.x) better_bid = torch.tensor(var_results).reshape(model.n_agents, model.n_items) results_dict["better_bid"] = better_bid if inner_product: results_dict["gurobi_better_frac_payments"] = [ v.x for v in mipnet.payment_gurobi_vars[-1] ] results_dict["gurobi_final_payment"] = mipnet.final_player_payment.getValue() else: results_dict["gurobi_better_payments"] = [ v.x for v in mipnet.payment_gurobi_vars[-1] ] results_dict["gurobi_final_payment"] = results_dict["gurobi_better_payments"][player_ind] results_dict["gurobi_better_allocs"] = [ v.x for v in np.array(mipnet.allocation_gurobi_vars[-1]).flatten() ] results_dict["better_util_gurobi"] = mipnet.final_util_expr.getValue() better_allocs, better_payments = model(better_bid) better_util = (calc_agent_util(truthful_input, better_allocs, better_payments).flatten())[player_ind].item() results_dict["better_allocs"] = better_allocs.cpu().detach() results_dict["better_payments"] = better_payments.cpu().detach().flatten() results_dict["better_payment_player"] = results_dict["better_payments"][player_ind].item() results_dict["better_util"] = better_util results_dict["regret"] = better_util - truthful_util # randomly sample some points on the original network, to test that none of them beat # optimized util. max_random_util = 0.0 for i in range(n_samples): # create new point altering only player i's bid new_point = truthful_input.clone() new_point[player_ind, :] = torch.rand_like(truthful_input[player_ind, :]) new_alloc, new_payments = model(new_point) new_util = (calc_agent_util(truthful_input, new_alloc, new_payments).flatten())[player_ind].item() if new_util > max_random_util: max_random_util = new_util results_dict["max_random_util"] = max_random_util results_dict["misreport_lr"] = misreport_lr results_dict["misreport_iter"] = misreport_iter batch_truthful_input = truthful_input.unsqueeze(0) # add batch dim misreport_batch = batch_truthful_input.clone().detach() optimize_misreports( model, batch_truthful_input, misreport_batch, misreport_iter=misreport_iter, lr=misreport_lr, ) print(misreport_batch) misreport_util = (tiled_misreport_util( misreport_batch,batch_truthful_input, model ).flatten())[player_ind] results_dict["misreport_util"] = misreport_util.flatten()[0].item() print(results_dict["misreport_util"] - better_util) return results_dict def experiment_and_save(model_name, presampled_points, agent=0): path = f"model/{model_name}.pt" checkpoint = torch.load(path, map_location=torch.device('cpu')) curr_activation = checkpoint['arch']['p_activation'] inner_product = curr_activation == "frac_sigmoid_linear" if not inner_product: checkpoint['arch']['p_activation'] = 'full_relu_clipped' # add on clip relu otherwise model1 = RegretNet(**checkpoint['arch']).to('cpu') model1.load_state_dict(checkpoint['state_dict']) experiment_results = [] random_points = presampled_points[(model1.n_agents, model1.n_items)] num_samples = len(random_points) for sample in range(num_samples): truthful_valuations = random_points[sample] print(sample) print(truthful_valuations) experiment_result = experiment_on_input(model1, truthful_valuations, agent, inner_product) experiment_results.append(experiment_result) return experiment_results def process_and_plot_results(experiment_results, dir_name, model_name, exp_time): os.makedirs(dir_name, exist_ok=True) plt.hist([r["solve_time"] for r in experiment_results]) plt.title("solve time") plt.savefig(f"{dir_name}/solve_time_{model_name}_{exp_time}.eps") plt.savefig(f"{dir_name}/solve_time_{model_name}_{exp_time}.png") plt.close() plt.hist([r.get("regret", 0.0) for r in experiment_results]) plt.title("regret") plt.savefig(f"{dir_name}/regret_{model_name}_{exp_time}.eps") plt.savefig(f"{dir_name}/regret_{model_name}_{exp_time}.png") plt.close() plt.hist([r.get("misreport_util", 0.0) - r.get("better_util", 0.0) for r in experiment_results]) plt.title("PGD vs. MIP regret difference") plt.savefig(f"{dir_name}/util_difference_{model_name}_{exp_time}.eps") plt.savefig(f"{dir_name}/util_difference_{model_name}_{exp_time}.png") plt.close() all_util_diffs = [] all_alloc_max_diffs = [] all_pay_diffs = [] for exp_dict in experiment_results: gurobi_allocs = np.array(exp_dict["gurobi_better_allocs"]) gurobi_util = exp_dict["better_util_gurobi"] gurobi_pay = exp_dict["gurobi_final_payment"] diff = (exp_dict["better_allocs"].flatten().cpu().numpy() - gurobi_allocs).max() all_alloc_max_diffs.append(diff) all_util_diffs.append(gurobi_util - exp_dict["better_util"]) all_pay_diffs.append(gurobi_pay - exp_dict["better_payment_player"]) with open(f"{dir_name}/{model_name}_{exp_time}.pickle", "wb") as fast_results: pickle.dump(experiment_results, fast_results) with open(f"{dir_name}/{model_name}_{exp_time}_maxdiffs.txt", "w") as maxdiff_file: print('max overall util diff', np.max(np.abs(all_util_diffs)), file=maxdiff_file) print('max overall alloc diff', np.max(np.abs(all_alloc_max_diffs)), file=maxdiff_file) print('max overall payment diff', np.max(np.abs(all_pay_diffs)), file=maxdiff_file) model_names = ['1x2_sparsemax_linearpmt_distill_fast', '2x2_sparsemax_linearpmt_distill_fast', '2x2_sparsemax_in_linsigpmt_scratch_fast', '1x2_sparsemax_in_linsigpmt_scratch_fast'] # model_names = ['1x2_sparsemax_in_linsigpmt_scratch', '2x2_sparsemax_in_linsigpmt_scratch', '1x2_sparsemax_in_linsigpmt_scratch_fast', '2x2_sparsemax_in_linsigpmt_scratch_fast'] # model_names = ['1x2_sparsemax_linearpmt_distill_fast', '2x2_sparsemax_linearpmt_distill_fast'] # model_names = ['2x2_sparsemax_linearpmt_distill_fast'] # model_names = ['2x2_sparsemax_linearpmt_distill_fast', '2x2_sparsemax_in_linsigpmt_scratch_fast','2x2_sparsemax_in_linsigpmt_scratch'] # model_names = ['2x2_sparsemax_in_linsigpmt_scratch'] # model_names = ['2x3_sparsemax_in_linsigpmt_scratch_fast', '2x3_sparsemax_linearpmt_distill_fast','3x2_sparsemax_in_linsigpmt_scratch_fast', '3x2_sparsemax_linearpmt_distill_fast','3x3_sparsemax_in_linsigpmt_scratch_fast', '3x3_sparsemax_linearpmt_distill_fast'] # model_names = ['2x3_sparsemax_linearpmt_distill_fast', '3x2_sparsemax_linearpmt_distill_fast', '3x3_sparsemax_linearpmt_distill_fast'] # model_names = ['1x2_sparsemax_linearpmt_distill_fast', '1x2_sparsemax_linearpmt_distill_fast_4l','1x2_sparsemax_linearpmt_distill_fast_5l'] results_dict = {} regrets_dict = {} output_prefix = 'clipping_scaling/' # don't forget to include trailing slash here num_samples = 1000 agent=0 presampled_points = {} presampled_points[(1,2)] = [torch.rand(1, 2) for _ in range(num_samples)] presampled_points[(2,2)] = [torch.rand(2, 2) for _ in range(num_samples)] presampled_points[(3,2)] = [torch.rand(3, 2) for _ in range(num_samples)] presampled_points[(2,3)] = [torch.rand(2, 3) for _ in range(num_samples)] presampled_points[(3,3)] = [torch.rand(3, 3) for _ in range(num_samples)] for model_name in model_names: dir_name = f"{output_prefix}{model_name}_{EXP_TIME}" experiment_results = experiment_and_save(model_name, presampled_points, agent=agent) regrets = [r.get("regret", 0.0) for r in experiment_results] results_dict[model_name] = experiment_results regrets_dict[model_name] = regrets process_and_plot_results(experiment_results, dir_name, model_name, EXP_TIME)
{ "alphanum_fraction": 0.7173061224, "author": null, "avg_line_length": 45.3703703704, "converted": null, "ext": "py", "file": null, "hexsha": "88574781cb5cda87453d7ccf566b1cd556e28f98", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-10-06T15:35:41.000Z", "max_forks_repo_forks_event_min_datetime": "2021-10-06T15:35:41.000Z", "max_forks_repo_head_hexsha": "c0e80fdbdbb10557071f677ac88236105084eee2", "max_forks_repo_licenses": [ "BSD-Source-Code" ], "max_forks_repo_name": "currymj/certified-regretnet", "max_forks_repo_path": "regretnet/mipcertify/experiments/updated_experiment.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "c0e80fdbdbb10557071f677ac88236105084eee2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-Source-Code" ], "max_issues_repo_name": "currymj/certified-regretnet", "max_issues_repo_path": "regretnet/mipcertify/experiments/updated_experiment.py", "max_line_length": 263, "max_stars_count": null, "max_stars_repo_head_hexsha": "c0e80fdbdbb10557071f677ac88236105084eee2", "max_stars_repo_licenses": [ "BSD-Source-Code" ], "max_stars_repo_name": "currymj/certified-regretnet", "max_stars_repo_path": "regretnet/mipcertify/experiments/updated_experiment.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3070, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 12250 }
import scipy.io as sio import numpy as np from skimage import data, io import cv2 M = sio.loadmat('M.mat') M = M['M'] # cv2.imshow('M', M) # cv2.waitKey(0) # # A = cv2.imread('kodim11.JPG', 3) # print type(A) # cv2.imshow('Aasd', A) # cv2.waitKey(0) Dict = sio.loadmat('Dict.mat') Dict = Dict['Dict'] # normalized = np.zeros((256,256), dtype=float) # min = np.minimum(Dict) # max = np.maximum(Dict) # for i in range(256): # for j in range(256): # normalized[i][j] = (Dict[i][j] - min) / (max - min) cv2.imshow("Dict", Dict) cv2.waitKey(0) # def im2col(mtx, block_size): # mtx_shape = mtx.shape # print mtx_shape # sx = mtx_shape[0] - block_size[0] + 1 # sy = mtx_shape[1] - block_size[1] + 1 # # result = np.empty((block_size[0] * block_size[1], sx * sy)) # # for i in range(sy): # for j in range(sx): # result[:, i * sx + j] = mtx[j:j + block_size[0], i:i + block_size[1]].ravel(order='F') # return result # # X = im2col(M, (16, 16)) # print type(X), np.shape(X)
{ "alphanum_fraction": 0.5842259007, "author": null, "avg_line_length": 24.4523809524, "converted": null, "ext": "py", "file": null, "hexsha": "100b202f62481dcd275fb0a153e03db913a81709", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-04-06T12:18:26.000Z", "max_forks_repo_forks_event_min_datetime": "2019-12-25T08:15:40.000Z", "max_forks_repo_head_hexsha": "e4b6608b0d76fb1ee1e216650173599ee3ca60ba", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "bigmms/mixed-norm-power-constrained-sparse-representation", "max_forks_repo_path": "Data/Test_load_mat.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "e4b6608b0d76fb1ee1e216650173599ee3ca60ba", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "bigmms/mixed-norm-power-constrained-sparse-representation", "max_issues_repo_path": "Data/Test_load_mat.py", "max_line_length": 100, "max_stars_count": null, "max_stars_repo_head_hexsha": "e4b6608b0d76fb1ee1e216650173599ee3ca60ba", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "bigmms/mixed-norm-power-constrained-sparse-representation", "max_stars_repo_path": "Data/Test_load_mat.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 361, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 1027 }
"""Reads the text data stored as sparse matrix.""" import numpy as np import scipy.sparse as sp from sklearn.model_selection import train_test_split import pandas as pd def removeFirstColumn(data): new_data = [] for i in range(len(data)): new_data.append(data[i][1:]) new_data = np.array(new_data) return new_data def LoadData(inputfile): print("read") print("loaded") data = np.load(inputfile, mmap_mode='r') print("loaded") print(type(data)) print(data.shape) return data def read_text_file_check_zeros(file_path, zeros_file_path): reviews = [] count = 0 index = 0 zero_indices = [] print(file_path) zeros_handle = open(zeros_file_path, 'w') with open(file_path, 'r') as fp: for line in fp: if count == 0: count += 1 continue result = line.split('\t') result = result[1:len(result) - 1] try: result = [float(i) for i in result] except ValueError: print(result) # print(len(result)) ''' nzeros = 0 for val in result: if val == 0.0: nzeros+=1 #b = sum(result) if nzeros>=len(result)-3:#b == float(0): zeros_handle.write(str(index)+"\n") zero_indices.append(index) else: reviews.append(result) ''' reviews.append(result) # if count ==150000: # break if count % 1000 == 0: print(count) count += 1 index += 1 print("Consumed " + str(count) + " lines") zeros_handle.close() return reviews, zero_indices def read_text_file_no_check(file_path, zero_indices,heading=1,column=1): reviews = [] count = 0 index = 0 with open(file_path, 'r') as fp: for line in fp: if heading==1 and count == 0: count += 1 continue result = line.split('\t') if column==1: result = result[1:len(result) - 1] else: result = result[0:len(result) - 1] # print(count) if index not in zero_indices: result = [float(i) for i in result] reviews.append(result) # if count ==150000: # break if count % 1000 == 0: print(count) count += 1 index += 1 print("Consumed " + str(count) + " lines") return reviews def WriteNumPyAll(file_path, new_name, zeros_file_path, check_zeros, zero_indices,normalize,heading=1,column=1): print("Reading " + file_path) if check_zeros == 0: data = read_text_file_no_check(file_path, zero_indices,heading,column) else: data, zero_indices = read_text_file_check_zeros(file_path, zeros_file_path) print("Data size ",len(data),len(data[0])) if normalize == 1: print("Normalizing Data") data = pd.DataFrame.from_records(data) data.fillna(0, inplace=True) data = (data - data.min()) / (data.max() - data.min()) data.fillna(0, inplace=True) data = data.values print("Size after normalization",data.shape) else: data = np.array(data) print(data.shape) print("Writing " + new_name) np.save(new_name, data) return zero_indices if __name__ == '__main__': folder = 'sem_eval_16_bert_run' train_text_file = './data_text_embedding/' + folder + '/city_training_output_train.txt' test_text_file = './data_text_embedding/' + folder + '/sem_eval16_testing_output.txt' train_sent_file = './data_text_embedding/' + folder + '/rev_sentiment_features_new_train.txt' test_sent_file = './data_text_embedding/' + folder + '/rev_sentiment_features_new_test.txt' sent_train_out = './data_text_embedding/' + folder + '/sentiment_features_train_new' sent_test_out = './data_text_embedding/' + folder + '/sentiment_features_test_new' text_train_out = './data_text_embedding/' + folder + '/text_features_train_new' text_test_out = './data_text_embedding/' + folder + '/text_features_test_new' zeros_file_path = './data_text_embedding/' + folder + '/sent_train_zeros.txt' #zero_indices = WriteNumPyAll(train_sent_file, sent_train_out, zeros_file_path, 1, [],1) zero_indices=[] #WriteNumPyAll(train_text_file, text_train_out, "", 0, zero_indices,1,0,0) zeros_file_path = './data_text_embedding/spam_chicago_run/sent_test_zeros.txt' #zero_indices = WriteNumPyAll(test_sent_file, sent_test_out, zeros_file_path, 1, [],1) WriteNumPyAll(test_text_file, text_test_out, "", 0, zero_indices,1,0,0)
{ "alphanum_fraction": 0.5903465347, "author": null, "avg_line_length": 33.6666666667, "converted": null, "ext": "py", "file": null, "hexsha": "c24014cb944377060ce705b220410f6c20955b1f", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-01-04T10:02:38.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-04T10:02:38.000Z", "max_forks_repo_head_hexsha": "75679b63050189236f4f045db7bed558a666da90", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "yassienshaalan/DTOPS", "max_forks_repo_path": "JASM/preprocessing/ConvertTextFileToNumpy.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "75679b63050189236f4f045db7bed558a666da90", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "yassienshaalan/DTOPS", "max_issues_repo_path": "JASM/preprocessing/ConvertTextFileToNumpy.py", "max_line_length": 112, "max_stars_count": 3, "max_stars_repo_head_hexsha": "75679b63050189236f4f045db7bed558a666da90", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "yassienshaalan/DTOPS", "max_stars_repo_path": "JASM/preprocessing/ConvertTextFileToNumpy.py", "max_stars_repo_stars_event_max_datetime": "2022-02-27T06:59:05.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-21T01:22:40.000Z", "num_tokens": 1165, "path": null, "reason": "import numpy,import scipy", "repo": null, "save_path": null, "sha": null, "size": 4848 }
[STATEMENT] lemma satisifies_atom_restrict_to_Cons: "v \<Turnstile>\<^sub>a\<^sub>s restrict_to I (set as) \<Longrightarrow> (i \<in> I \<Longrightarrow> v \<Turnstile>\<^sub>a a) \<Longrightarrow> v \<Turnstile>\<^sub>a\<^sub>s restrict_to I (set ((i,a) # as))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>v \<Turnstile>\<^sub>a\<^sub>s (snd ` (set as \<inter> I \<times> UNIV)); i \<in> I \<Longrightarrow> v \<Turnstile>\<^sub>a a\<rbrakk> \<Longrightarrow> v \<Turnstile>\<^sub>a\<^sub>s (snd ` (set ((i, a) # as) \<inter> I \<times> UNIV)) [PROOF STEP] unfolding satisfies_atom_set_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>\<forall>a\<in>snd ` (set as \<inter> I \<times> UNIV). v \<Turnstile>\<^sub>a a; i \<in> I \<Longrightarrow> v \<Turnstile>\<^sub>a a\<rbrakk> \<Longrightarrow> \<forall>a\<in>snd ` (set ((i, a) # as) \<inter> I \<times> UNIV). v \<Turnstile>\<^sub>a a [PROOF STEP] by auto
{ "alphanum_fraction": null, "author": null, "avg_line_length": null, "converted": null, "ext": null, "file": "Simplex_Simplex", "hexsha": null, "include": null, "lang": null, "length": 2, "llama_tokens": 377, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": null, "max_forks_repo_licenses": null, "max_forks_repo_name": null, "max_forks_repo_path": null, "max_issues_count": null, "max_issues_repo_head_hexsha": null, "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": null, "max_issues_repo_name": null, "max_issues_repo_path": null, "max_line_length": null, "max_stars_count": null, "max_stars_repo_head_hexsha": null, "max_stars_repo_licenses": null, "max_stars_repo_name": null, "max_stars_repo_path": null, "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": null, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": null }
import numpy as np from sympy import symbols from . import example_smooth_reservoir_models as ESRM from .smooth_model_run import SmoothModelRun def critics(): symbs = symbols("t k_01 k_10 k_0o k_1o") t, k_01, k_10, k_0o, k_1o = symbs srm = ESRM.critics(symbs) pardict = {k_0o: 0.01, k_1o: 0.08, k_01: 0.09, k_10: 1} start_values = np.array([0.001, 0.001]) times = np.linspace(0, 100, 1000) pwc_mr = SmoothModelRun(srm, pardict, start_values, times) return pwc_mr def nonlinear_two_pool(): symbs = symbols("t k_01 k_10 k_0o k_1o") t, k_01, k_10, k_0o, k_1o = symbs srm = ESRM.nonlinear_two_pool(symbs) # now create the modelrun pardict = { k_01: 1/100, k_10: 1/100, k_0o: 1/2, k_1o: 1/2 } times = np.linspace(0, 20, 1600) # time grid forward start_values = np.array([1, 2]) pwc_mr = SmoothModelRun(srm, pardict, start_values, times) return pwc_mr def emanuel_1(): symbs = symbols( """ I_1 I_3 x_1 x_2 x_3 x_4 x_5 t F_1 F_2 F_3 F_4 F_5 F_21 F_41 F_42 F_52 F_43 F_53 F_54 """ ) (I_1, I_3, x_1, x_2, x_3, x_4, x_5, t, F_1, F_2, F_3, F_4, F_5, F_21, F_41, F_42, F_52, F_43, F_53, F_54) = symbs srm = ESRM.emanuel(symbs) # now create the modelrun pardict = { I_1: 77, I_3: 36, F_1: 2.081, F_2: 0.0686, F_3: 0.5217, F_4: 0.5926, F_5: 9.813e-3, F_21: 0.8378, F_41: 0.5676, F_42: 0.0322, F_52: 4.425e-3, F_43: 0.1739, F_53: 0.0870, F_54: 0.0370 } start_values = np.array( [37.00144, 451.89224, 69.00518, 80.2446, 1118.12122] ) times = np.arange(0, (10+(1/365)), 1/365) # time grid forward pwc_mr = SmoothModelRun(srm, pardict, start_values, times) return pwc_mr
{ "alphanum_fraction": 0.6038674033, "author": null, "avg_line_length": 30.1666666667, "converted": null, "ext": "py", "file": null, "hexsha": "73e6d49701842df30038213a510474181808b907", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4724555c33f11395ddc32738e8dfed7349ee155f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "goujou/CompartmentalSystems", "max_forks_repo_path": "src/CompartmentalSystems/example_smooth_model_runs.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "4724555c33f11395ddc32738e8dfed7349ee155f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "goujou/CompartmentalSystems", "max_issues_repo_path": "src/CompartmentalSystems/example_smooth_model_runs.py", "max_line_length": 73, "max_stars_count": null, "max_stars_repo_head_hexsha": "4724555c33f11395ddc32738e8dfed7349ee155f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "goujou/CompartmentalSystems", "max_stars_repo_path": "src/CompartmentalSystems/example_smooth_model_runs.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 800, "path": null, "reason": "import numpy,from sympy", "repo": null, "save_path": null, "sha": null, "size": 1810 }
import numpy as np class TransReplayBuffer(object): def __init__(self, size): self.size = size self.buffer = [] def get_single(self, index): return self.buffer[index] def offset(self): self.buffer.pop(0) def get_batch(self, batch_size): return self.get_truncated_episodes_batch(batch_size) def get_truncated_episodes_batch(self, batch_size): sample_range = len(self.buffer) - batch_size + 1 start_indice = np.random.choice(sample_range, 1, replace=False)[0] batch_buffer = [self.buffer[i+start_indice] for i in range(batch_size)] return batch_buffer def add_experience(self, trans): est_len = 1 + len(self.buffer) if est_len > self.size: self.offset() self.buffer.append(trans) def clear(self): self.buffer = [] class EpisodeReplayBuffer(object): def __init__(self, size): self.size = size self.buffer = [] def get_single(self, index): return self.buffer[index] def offset(self): self.buffer.pop(0) def get_batch(self, batch_size): length = len(self.buffer) indices = np.random.choice(length, batch_size, replace=False) batch_buffer = [] for i in indices: batch_buffer.extend(self.buffer[i]) return batch_buffer def add_experience(self, episode): est_len = 1 + len(self.buffer) if est_len > self.size: self.offset() self.buffer.append(episode)
{ "alphanum_fraction": 0.6208684381, "author": null, "avg_line_length": 26.1525423729, "converted": null, "ext": "py", "file": null, "hexsha": "bbaf52e95d5e623d7467fb62bb1eb337bcd217a4", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 18, "max_forks_repo_forks_event_max_datetime": "2022-03-20T18:39:05.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-12T03:10:17.000Z", "max_forks_repo_head_hexsha": "8f39b387c5b83825a5747bafc3ab89176313ab32", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eddie-atkinson/MAPDN", "max_forks_repo_path": "utilities/replay_buffer.py", "max_issues_count": 9, "max_issues_repo_head_hexsha": "8f39b387c5b83825a5747bafc3ab89176313ab32", "max_issues_repo_issues_event_max_datetime": "2022-03-17T09:11:04.000Z", "max_issues_repo_issues_event_min_datetime": "2021-11-15T09:40:08.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eddie-atkinson/MAPDN", "max_issues_repo_path": "utilities/replay_buffer.py", "max_line_length": 79, "max_stars_count": 30, "max_stars_repo_head_hexsha": "8f39b387c5b83825a5747bafc3ab89176313ab32", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eddie-atkinson/MAPDN", "max_stars_repo_path": "utilities/replay_buffer.py", "max_stars_repo_stars_event_max_datetime": "2022-03-12T09:06:53.000Z", "max_stars_repo_stars_event_min_datetime": "2021-10-31T13:23:52.000Z", "num_tokens": 341, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1543 }
\chapter{Additional link functions for neural networks}
{ "alphanum_fraction": 0.8103448276, "author": null, "avg_line_length": 14.5, "converted": null, "ext": "tex", "file": null, "hexsha": "bbd51b0e5311ba6c3119e28328713fbf511a9617", "include": null, "lang": "TeX", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/neuralNetworksLink/00-00-Chapter_name.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/neuralNetworksLink/00-00-Chapter_name.tex", "max_line_length": 55, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/neuralNetworksLink/00-00-Chapter_name.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 58 }
#!/usr/bin/env python import numpy class Point: def __init__(self, x, y): self.x = x self.y = y def euclideanDist(p1, p2): from math import sqrt return sqrt((p1.x-p2.x)**2 + (p1.y-p2.y)**2) def getMinDist(p1, precision=0.001, startX=0, endX=3): """Get x of point on (x,x^2) that has minimal distance to given Point p.""" minDist = -1 for x in numpy.arange(startX, endX, precision): p2 = Point(x, x**2) dist = euclideanDist(p1, p2) if minDist == -1 or dist < minDist: minDist = dist return minDist """for i in numpy.arange(0, 3, 0.01): minDist = getMinDist(Point(0, i)) if abs(i-minDist) < 0.005: print(i, minDist)""" print(getMinDist(Point(0,4.25), precision=0.001, startX=0, endX=3)) #print(euclideanDist(Point(0,5),Point(2, 2**2))) #print(getMinDist(5, 0.00001, 2, 3))
{ "alphanum_fraction": 0.5972540046, "author": null, "avg_line_length": 26.4848484848, "converted": null, "ext": "py", "file": null, "hexsha": "c54672db672bf9b2057afeb9fe8fffc7697bc14c", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9558d8b3c19776cb068b9753dcd3f88645dd7134", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "everbot/LaTeX-examples", "max_forks_repo_path": "documents/math-minimal-distance-to-cubic-function/calcMinDist.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "9558d8b3c19776cb068b9753dcd3f88645dd7134", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "everbot/LaTeX-examples", "max_issues_repo_path": "documents/math-minimal-distance-to-cubic-function/calcMinDist.py", "max_line_length": 79, "max_stars_count": 2, "max_stars_repo_head_hexsha": "6829f6cf9710b314a4bf0b64abdae5bcf6997fd0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "keithmannock/LaTeX-examples", "max_stars_repo_path": "documents/math-minimal-distance-to-cubic-function/calcMinDist.py", "max_stars_repo_stars_event_max_datetime": "2017-11-24T22:16:18.000Z", "max_stars_repo_stars_event_min_datetime": "2017-11-02T10:09:12.000Z", "num_tokens": 303, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 874 }
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ #include "hphp/runtime/vm/jit/linear-scan.h" #include "hphp/runtime/base/smart-containers.h" #include "hphp/runtime/vm/jit/ir-unit.h" #include "hphp/runtime/vm/jit/native-calls.h" #include "hphp/runtime/vm/jit/print.h" #include "hphp/runtime/vm/jit/ir.h" #include "hphp/runtime/vm/jit/trace-builder.h" #include "hphp/runtime/vm/jit/code-gen-x64.h" #include "hphp/runtime/vm/jit/state-vector.h" #include "hphp/runtime/vm/jit/check.h" #include "hphp/runtime/vm/jit/phys-reg.h" #include "hphp/runtime/vm/jit/abi-arm.h" #include "hphp/runtime/vm/jit/abi-x64.h" #include <boost/noncopyable.hpp> namespace HPHP { namespace JIT{ using namespace JIT::reg; TRACE_SET_MOD(hhir); struct LinearScan : private boost::noncopyable { explicit LinearScan(IRUnit&); RegAllocInfo allocRegs(); private: class RegState { friend class LinearScan; public: bool isReserved() const { return m_reserved; } bool isCallerSaved() const { return X64::kCallerSaved.contains(m_reg); } bool isCalleeSaved() const { return !isCallerSaved(); } bool isAllocated() const { return m_ssaTmp != nullptr; } bool isPinned() const { return m_pinned; } bool isRetAddr() const { if (!m_ssaTmp) return false; Type type = m_ssaTmp->type(); return type == Type::RetAddr; } PhysReg::Type type() const { return m_reg.type(); } private: SSATmp* m_ssaTmp; // non-null when allocated // Maintain the position of this register so that we can quickly // remove it from the lists. // A non-reserved reg is in either LinearScan::m_freeCallerSaved, // LinearScan::m_freeCalleeSaved, or LinearScan::m_allocatedRegs. // <m_pos> of a reserved reg is undefined. smart::list<RegState*>::iterator m_pos; PhysReg m_reg; bool m_pinned; // do not free this register if pinned // We stress test register allocation by reducing the number of // free registers. // <m_reserved> is true if the register is a reserved register // (i.e., rbx, rsp, rbp, r10, and r12) or it is marked as not free for // stress testing. bool m_reserved; }; struct SlotInfo { // the SSATmp that represents this spill location SSATmp* spillTmp; // The latest SSATmp that has the most recent reloaded spilled value // If it's NULL, we have to reload this slot before using it. SSATmp* latestReload; }; class PreColoringHint { public: PreColoringHint() { clear(); } bool preColorsTmp(RegState* reg) const; PhysReg getPreColoringReg(SSATmp* tmp, uint32_t index) const; void clear(); void add(SSATmp* tmp, uint32_t index, int argNum); private: // indexed by register number PhysReg::Map<std::pair<SSATmp*, uint32_t>> m_preColoredTmps; }; class StateSave { public: StateSave() {} void save(LinearScan* ls); void restore(LinearScan* ls); private: PhysReg::Map<RegState> m_regs; }; typedef smart::map<Block*, StateSave> ExitTraceMap; private: void allocRegToInstruction(InstructionList::iterator it); int allocRegToTmp(SSATmp* ssaTmp, uint32_t index); void assignRegToTmp(RegState* reg, SSATmp* ssaTmp, uint32_t index); void freeRegsAtId(uint32_t id); void spill(SSATmp* tmp); void numberInstructions(const BlockList& blocks); template<typename T> SSATmp* cns(T val) { return m_unit.cns(val); } void initFreeList(); void coalesce(); void genSpillStats(int numSpillLocs); void allocRegsOneTrace(BlockList::iterator& blockIt, ExitTraceMap& etm); void allocRegsToTrace(); uint32_t createSpillSlot(SSATmp* tmp); static SSATmp* getSpilledTmp(SSATmp* tmp); static SSATmp* getOrigTmp(SSATmp* tmp); uint32_t assignSpillLoc(); void collectInfo(BlockList::iterator it, IRTrace* trace); RegNumber getJmpPreColor(SSATmp* tmp, uint32_t regIndx, bool isReload); void computePreColoringHint(); void findFullSIMDCandidates(); IRInstruction* nextNative() const; uint32_t nextNativeId() const; void pushFreeReg(RegState* reg); RegState* popFreeReg(smart::list<RegState*>& freeList); void freeReg(RegState* reg); RegState* getFreeReg(PhysReg::Type type, bool preferCallerSaved); RegState* getReg(RegState* reg); PhysReg::Type getRegType(const SSATmp *tmp, int locIdx) const; bool crossNativeCall(const SSATmp* tmp) const; RegAllocInfo computeRegs() const; void resolveJmpCopies(); void dumpIR(const SSATmp* tmp, const char* msg) { if (HPHP::Trace::moduleEnabled(HPHP::Trace::hhir, kExtraLevel)) { std::ostringstream str; print(str, tmp, &m_allocInfo[tmp], &m_lifetime); HPHP::Trace::traceRelease("--- %s: %s\n", msg, str.str().c_str()); } } void dumpIR(const IRInstruction* inst, const char* msg) { if (HPHP::Trace::moduleEnabled(HPHP::Trace::hhir, kExtraLevel)) { auto regs = computeRegs(); std::ostringstream str; print(str, inst, &regs, &m_lifetime); HPHP::Trace::traceRelease("--- %s: %s\n", msg, str.str().c_str()); } } private: // Register allocation may generate Spill/Reload. IRUnit& m_unit; PhysReg::Map<RegState> m_regs; // Lists of free caller and callee-saved registers, respectively. smart::list<RegState*> m_freeCallerSaved[PhysReg::kNumTypes]; smart::list<RegState*> m_freeCalleeSaved[PhysReg::kNumTypes]; // List of assigned registers, sorted high to low by lastUseId. smart::list<RegState*> m_allocatedRegs; smart::vector<SlotInfo> m_slots; // Spill info indexed by slot id BlockList m_blocks; // all basic blocks in reverse postorder IdomVector m_idoms; // immediate dominator vector // any tmp that has been spilled has an entry in this array with // the spill-slot number, which is an index into m_slots[]. tmps that // have not spilled have -1. StateVector<SSATmp, int32_t> m_spillSlots; LifetimeInfo m_lifetime; // Internal lifetime state LinearIdVector& m_linear; // linear id for each inst UsesVector& m_uses; // use count of each tmp // the list of native instructions in the trace sorted by instruction ID; // i.e. a filtered list in the same order as visited by m_blocks. smart::list<IRInstruction*> m_natives; // stores pre-coloring hints PreColoringHint m_preColoringHint; // a map from SSATmp* to a list of Jmp instructions that have it as // a source. typedef smart::vector<IRInstruction*> JmpList; StateVector<SSATmp, JmpList> m_jmps; // final allocation for each SSATmp StateVector<SSATmp, PhysLoc> m_allocInfo; // SSATmps requiring 2 64-bit registers that are eligible for // allocation to a single SIMD register boost::dynamic_bitset<> m_fullSIMDCandidates; // reserved linear ids for each exit trace smart::flat_map<IRTrace*, uint32_t> m_exitIds; }; static_assert(X64::kReservedRSPSpillSpace == NumPreAllocatedSpillLocs * sizeof(void*), "kReservedRSPSpillSpace changes require updates in " "LinearScan"); // The dst of IncRef, Mov, StRef, and StRefNT has the same value // as the src. For analysis purpose, we put them in one equivalence class. // This canonicalize function returns the representative of <tmp>'s // equivalence class. The function computes the representative by // following the dst-src chain. static SSATmp* canonicalize(SSATmp* tmp) { while (true) { IRInstruction* inst = tmp->inst(); Opcode opc = inst->op(); // The dst of IncRef, Mov, StRef, and StRefNT has the same value // as the src. // We follow these instructions to canonicalize an SSATmp. switch (opc) { case IncRef: case Mov: case StRef: case StRefNT: tmp = inst->src(0); break; default: return tmp; } } } RegAllocInfo LinearScan::computeRegs() const { RegAllocInfo regs(m_unit); for (auto b : m_blocks) { for (auto& i : *b) { for (auto s : i.srcs()) regs[i][s] = m_allocInfo[s]; for (auto& d : i.dsts()) regs[i][d] = m_allocInfo[d]; } } return regs; } void LinearScan::StateSave::save(LinearScan* ls) { for (auto r : ls->m_regs) { m_regs[r] = ls->m_regs[r]; } } void LinearScan::StateSave::restore(LinearScan* ls) { ls->m_allocatedRegs.clear(); for (int i = 0; i < PhysReg::kNumTypes; i++) { ls->m_freeCalleeSaved[i].clear(); ls->m_freeCallerSaved[i].clear(); } for (auto i : m_regs) { ls->m_regs[i] = m_regs[i]; RegState* reg = &ls->m_regs[i]; if (reg->isReserved()) continue; if (reg->isAllocated()) { SSATmp* tmp = reg->m_ssaTmp; for (int r = 0; r < ls->m_allocInfo[tmp].numAllocated(); r++) { if (ls->m_allocInfo[tmp].reg(r) == i) { ls->assignRegToTmp(reg, tmp, r); } } } else { ls->pushFreeReg(reg); } } } LinearScan::LinearScan(IRUnit& unit) : m_unit(unit) , m_idoms(unit, nullptr) , m_spillSlots(unit, -1) , m_lifetime(unit) , m_linear(m_lifetime.linear) , m_uses(m_lifetime.uses) , m_jmps(unit, JmpList()) , m_allocInfo(unit, PhysLoc()) , m_fullSIMDCandidates(unit.numTmps()) { m_exitIds.reserve(unit.exits().size()); for (auto reg : m_regs) { m_regs[reg].m_ssaTmp = nullptr; m_regs[reg].m_reg = reg; m_regs[reg].m_pinned = false; m_regs[reg].m_reserved = false; } // Mark reserved regs. using namespace X64; m_regs[rVmSp] .m_reserved = true; m_regs[rsp] .m_reserved = true; m_regs[rVmFp] .m_reserved = true; m_regs[rAsm] .m_reserved = true; m_regs[rVmTl] .m_reserved = true; m_regs[rCgGP] .m_reserved = true; m_regs[rCgXMM0].m_reserved = true; m_regs[rCgXMM1].m_reserved = true; // Reserve extra regs for testing purpose. uint32_t numFreeRegs = RuntimeOption::EvalHHIRNumFreeRegs; for (auto r : m_regs) { if (!m_regs[r].m_reserved) { if (numFreeRegs == 0) { m_regs[r].m_reserved = true; } else { --numFreeRegs; } } } } PhysReg::Type LinearScan::getRegType(const SSATmp* tmp, int locIdx) const { if (!RuntimeOption::EvalHHIRAllocSIMDRegs) return PhysReg::GP; // If we're selecting a register for the type, it means this SSATmp // didn't get it's value allocated to a SIMD register, which // otherwise would store the type too. if (locIdx == 1) return PhysReg::GP; if (tmp->isA(Type::Dbl)) return PhysReg::SIMD; if (packed_tv) return PhysReg::GP; DEBUG_ONLY Type tmpType = tmp->type(); uint32_t tmpId = tmp->id(); if (tmp->inst()->op() == Reload) { // We don't have an entry for reloaded SSATmps in // m_fullSIMDCandidates, since they're inserted after this set is // computed. So we approximate this property for the reloaded // SSATmp using the original SSATmp that was spilled. In other // words, if the original SSATmp was a candidate to be allocated // to a full SIMD register, then so is the reloaded SSATmp. This // might be a bit conservative, but avoids recomputing the analysis. auto* reload = tmp->inst(); auto* spill = reload->src(0)->inst(); tmpId = spill->src(0)->id(); } if (m_fullSIMDCandidates[tmpId]) { FTRACE(6, "getRegType(SSATmp {} : {}): it's a candidate for full SIMD register\n", tmpId, tmpType.toString()); FTRACE(6, "getRegType(SSATmp {}): crossNative = {} ; # freeCalleeSaved[GP] = {}\n", tmpId, crossNativeCall(tmp), m_freeCalleeSaved[PhysReg::GP].size()); // Note that there are no callee-saved SIMD registers in the x64 // ABI. So, if tmp crosses native calls and there are 2 free GP // callee-saved registers, then allocate tmp to GP registers. if (RuntimeOption::EvalHHIREnableCalleeSavedOpt && crossNativeCall(tmp) && m_freeCalleeSaved[PhysReg::GP].size() >= 2) { return PhysReg::GP; } return PhysReg::SIMD; } return PhysReg::GP; } PhysReg forceAlloc(SSATmp& dst) { auto inst = dst.inst(); auto opc = inst->op(); // Note that the point of StashGeneratorSP is to save a StkPtr // somewhere other than rVmSp. (TODO(#2288359): make rbx not // special.) bool abnormalStkPtr = opc == StashGeneratorSP; if (!abnormalStkPtr && dst.isA(Type::StkPtr)) { assert(opc == DefSP || opc == ReDefSP || opc == ReDefGeneratorSP || opc == PassSP || opc == DefInlineSP || opc == Call || opc == CallArray || opc == SpillStack || opc == SpillFrame || opc == CufIterSpillFrame || opc == ExceptionBarrier || opc == RetAdjustStack || opc == InterpOne || opc == InterpOneCF || opc == GenericRetDecRefs || opc == CheckStk || opc == GuardStk || opc == AssertStk || opc == CastStk || opc == CoerceStk || opc == SideExitGuardStk || MInstrEffects::supported(opc)); return arch() == Arch::X64 ? X64::rVmSp : ARM::rVmSp; } // LdContActRec and LdAFWHActRec, loading a generator's AR, is the only time // we have a pointer to an AR that is not in rVmFp. bool abnormalFramePtr = opc == LdContActRec || opc == LdAFWHActRec; if (!abnormalFramePtr && dst.isA(Type::FramePtr)) { return arch() == Arch::X64 ? X64::rVmFp : ARM::rVmFp; } if (opc == DefMIStateBase) { assert(dst.isA(Type::PtrToCell)); return arch() == Arch::X64 ? PhysReg(reg::rsp) : PhysReg(vixl::sp); } return InvalidReg; } void LinearScan::allocRegToInstruction(InstructionList::iterator it) { IRInstruction* inst = &*it; dumpIR(inst, "allocating to instruction"); // Reload all source operands if necessary. // Mark registers as unpinned. for (auto r : m_regs) { m_regs[r].m_pinned = false; } smart::vector<bool> needsReloading(inst->numSrcs(), true); for (uint32_t i = 0; i < inst->numSrcs(); ++i) { SSATmp* tmp = inst->src(i); int32_t slotId = m_spillSlots[tmp]; if (slotId == -1) { needsReloading[i] = false; } else if ((tmp = m_slots[slotId].latestReload)) { needsReloading[i] = false; inst->setSrc(i, tmp); } if (!needsReloading[i]) { for (int i = 0, n = m_allocInfo[tmp].numAllocated(); i < n; ++i) { m_regs[m_allocInfo[tmp].reg(i)].m_pinned = true; } } } for (uint32_t i = 0; i < inst->numSrcs(); ++i) { if (needsReloading[i]) { SSATmp* tmp = inst->src(i); int32_t slotId = m_spillSlots[tmp]; // <tmp> is spilled, and not reloaded. // Therefore, We need to reload the value into a new SSATmp. // Insert the Reload instruction. SSATmp* spillTmp = m_slots[slotId].spillTmp; IRInstruction* reload = m_unit.gen(Reload, inst->marker(), spillTmp); inst->block()->insert(it, reload); // Create <reloadTmp> which inherits <tmp>'s slot ID and // <spillTmp>'s last use ID. // Replace <tmp> with <reloadTmp> in <inst>. SSATmp* reloadTmp = reload->dst(); m_uses[reloadTmp].lastUse = m_uses[spillTmp].lastUse; m_spillSlots[reloadTmp] = slotId; inst->setSrc(i, reloadTmp); // reloadTmp and tmp share the same type. Since it was spilled, it // must be using its entire needed-count of registers. assert(reloadTmp->type() == tmp->type()); for (int locIndex = 0; locIndex < tmp->numWords();) { locIndex += allocRegToTmp(reloadTmp, locIndex); } // Remember this reload tmp in case we can reuse it in later blocks. m_slots[slotId].latestReload = reloadTmp; dumpIR(reload, "created reload"); } } freeRegsAtId(m_linear[inst]); // Update next native. if (nextNative() == inst) { assert(!m_natives.empty()); m_natives.pop_front(); computePreColoringHint(); } Range<SSATmp*> dsts = inst->dsts(); if (dsts.empty()) return; for (SSATmp& dst : dsts) { for (int numAllocated = 0, n = dst.numWords(); numAllocated < n; ) { auto reg = forceAlloc(dst); if (reg != InvalidReg) { assignRegToTmp(&m_regs[reg], &dst, 0); numAllocated++; continue; } if (!RuntimeOption::EvalHHIRDeadCodeElim || m_uses[dst].lastUse != 0) { numAllocated += allocRegToTmp(&dst, numAllocated); } else { numAllocated++; } } } if (!RuntimeOption::EvalHHIRDeadCodeElim) { // if any outputs were unused, free regs now. freeRegsAtId(m_linear[inst]); } } bool LinearScan::crossNativeCall(const SSATmp* tmp) const { return m_uses[tmp].lastUse > nextNativeId(); } /* * Allocates a register to ssaTmp's index component (0 for value, 1 for type). * Returns the number of 64-bit register-space allocated. This is normally 1, * but it's 2 when both the type and value need registers and they're allocated * together to one 128-bit SIMD register. */ int LinearScan::allocRegToTmp(SSATmp* ssaTmp, uint32_t index) { bool preferCallerSaved = true; PhysReg::Type regType = getRegType(ssaTmp, index); FTRACE(6, "getRegType(SSATmp {}, {}) = {}\n", ssaTmp->id(), index, int(regType)); assert(regType == PhysReg::GP || index == 0); // no type-only in SIMD regs if (RuntimeOption::EvalHHIREnableCalleeSavedOpt) { preferCallerSaved = !crossNativeCall(ssaTmp); } RegState* reg = nullptr; if (!preferCallerSaved) { reg = getFreeReg(regType, false); if (reg->isCallerSaved()) { // If we are out of callee-saved registers, fall into the logic of // assigning a caller-saved register. pushFreeReg(reg); // getFreeReg pins the reg. Need restore it here. reg->m_pinned = false; reg = nullptr; } } if (reg == nullptr && RuntimeOption::EvalHHIREnablePreColoring) { // Pre-colors ssaTmp if it's used as an argument of next native. // Search for the original tmp instead of <ssaTmp> itself, because // the pre-coloring hint is not aware of reloaded tmps. SSATmp* orig = getOrigTmp(ssaTmp); RegNumber targetRegNo = m_preColoringHint.getPreColoringReg(orig, index); if (targetRegNo == reg::noreg) { targetRegNo = getJmpPreColor(orig, index, orig != ssaTmp); } if (targetRegNo == reg::noreg && ssaTmp->inst()->op() == AssertType) { targetRegNo = m_allocInfo[ssaTmp->inst()->src(0)].reg(index); } if (targetRegNo != reg::noreg) { reg = getReg(&m_regs[PhysReg(targetRegNo)]); } } if (reg == nullptr && RuntimeOption::EvalHHIREnablePreColoring && ssaTmp->inst()->isNative()) { // Pre-colors ssaTmp if it's the return value of a native. if (index == 0) { reg = getReg(&m_regs[rax]); } else if (index == 1) { reg = getReg(&m_regs[rdx]); } else { not_reached(); } } if (reg == nullptr) { // No pre-coloring for this tmp. // Pick a regular caller-saved reg. reg = getFreeReg(regType, true); } assert(reg); if (!preferCallerSaved && reg->isCallerSaved()) { // ssaTmp spans native, but we failed to find a free callee-saved reg. // We eagerly add a spill ssaTmp, and update ssaTmp's live range // to end with next native, because we know we have to spill it at // the next native. // Setting the last use ID to the next native is conservative. // Setting it to the last use before the next native would be more precise, // but that would be more expensive to compute. if (m_spillSlots[ssaTmp] == -1) { createSpillSlot(ssaTmp); } m_uses[ssaTmp].lastUse = nextNativeId(); } assignRegToTmp(reg, ssaTmp, index); if (m_allocInfo[ssaTmp].isFullSIMD()) { // Type and value allocated together to a single SIMD register return 2; } return 1; } void LinearScan::assignRegToTmp(RegState* reg, SSATmp* ssaTmp, uint32_t index) { reg->m_ssaTmp = ssaTmp; // mark inst as using this register if (ssaTmp->numWords() == 2 && reg->type() == PhysReg::SIMD) { assert(index == 0); m_allocInfo[ssaTmp].setRegFullSIMD(reg->m_reg); } else { m_allocInfo[ssaTmp].setReg(reg->m_reg, index); } uint32_t lastUseId = m_uses[ssaTmp].lastUse; if (reg->isReserved()) { return; } // insert into the list of assigned registers sorted by last use id auto it = m_allocatedRegs.begin(); for (; it != m_allocatedRegs.end(); ++it) { if (lastUseId > m_uses[(*it)->m_ssaTmp].lastUse) { break; } } reg->m_pos = m_allocatedRegs.insert(it, reg); } class SpillLocManager { public: explicit SpillLocManager(uint32_t startSpillLoc) : m_nextSpillLoc(startSpillLoc) { } /* * Allocates a new spill location. */ uint32_t allocSpillLoc() { return m_nextSpillLoc++; } void alignTo16Bytes() { if (PhysLoc::offset(m_nextSpillLoc) % 16 != 0) { m_nextSpillLoc++; } } uint32_t getNumSpillLocs() const { return m_nextSpillLoc; } void setNextSpillLoc(uint32_t nextSpillLoc) { m_nextSpillLoc = nextSpillLoc; } private: uint32_t m_nextSpillLoc; }; // Assign spill location numbers to Spill/Reload. uint32_t LinearScan::assignSpillLoc() { uint32_t maxSpillLoc = 0; SpillLocManager spillLocManager(0); // visit blocks in reverse postorder and instructions in forward order, // assigning a spill slot id to each Spill. We don't reuse slot id's, // but both could be reused either by visiting the dominator tree in // preorder or by analyzing lifetimes and reusing id/registers between // non-conflicting spills. // As an intermediate step, re-use id's for exit traces smart::map<Block*, uint32_t> exitLocMap; for (Block* block : m_blocks) { auto it = exitLocMap.find(block); if (it != exitLocMap.end()) { spillLocManager.setNextSpillLoc(it->second); } for (IRInstruction& inst : *block) { if (nextNative() == &inst) { assert(!m_natives.empty()); m_natives.pop_front(); } if (inst.op() == Spill) { SSATmp* dst = inst.dst(); SSATmp* src = inst.src(0); TRACE(3, "[counter] 1 spill a tmp that %s native\n", crossNativeCall(dst) ? "spans" : "does not span"); for (int locIndex = 0; locIndex < src->numWords(); ++locIndex) { // SSATmps with 2 regs are aligned to 16 bytes because they may be // allocated to SIMD registers, either before or after being reloaded if (src->numWords() == 2 && locIndex == 0) { spillLocManager.alignTo16Bytes(); } auto spillLoc = spillLocManager.allocSpillLoc(); m_allocInfo[dst].setSlot(locIndex, spillLoc); if (m_allocInfo[src].isFullSIMD()) { // Allocate the next, consecutive spill slot for this SSATmp too spillLoc = spillLocManager.allocSpillLoc(); m_allocInfo[dst].setSlot(1, spillLoc); break; } } } if (inst.op() == Reload) { SSATmp* src = inst.src(0); for (int locIndex = 0; locIndex < src->numWords(); ++locIndex) { TRACE(3, "[counter] reload\n"); } } } uint32_t totalSpillLocs = spillLocManager.getNumSpillLocs(); if (totalSpillLocs > maxSpillLoc) maxSpillLoc = totalSpillLocs; if (block->trace()->isMain()) { if (Block* taken = block->taken()) { if (!taken->trace()->isMain()) { exitLocMap[taken] = totalSpillLocs; } } } } return maxSpillLoc; } void LinearScan::collectInfo(BlockList::iterator it, IRTrace* trace) { m_natives.clear(); m_uses.reset(); // TODO(#2536764): serious time sink while (it != m_blocks.end()) { Block* block = *it++; bool offTrace = block->trace() != trace; if (offTrace) { if (!trace->isMain()) return; int lastId = m_exitIds[block->trace()]; for (IRInstruction& inst : *block) { for (auto* src : inst.srcs()) { if (lastId > m_uses[src].lastUse) { m_uses[src].lastUse = lastId; } } } } else { for (IRInstruction& inst : *block) { for (auto* src : inst.srcs()) { m_uses[src].lastUse = m_linear[inst]; } if (inst.isNative()) m_natives.push_back(&inst); } IRInstruction* jmp = &block->back(); if (jmp->op() == Jmp && jmp->numSrcs() != 0) { for (SSATmp* src : jmp->srcs()) { m_jmps[src].push_back(jmp); } } } } } void LinearScan::computePreColoringHint() { m_preColoringHint.clear(); IRInstruction* inst = nextNative(); if (inst == nullptr) { return; } Opcode opc = inst->op(); using NativeCalls::CallMap; using NativeCalls::ArgType; if (CallMap::hasInfo(opc)) { unsigned reg = 0; for (auto const& arg : CallMap::info(opc).args) { switch (arg.type) { case ArgType::SSA: m_preColoringHint.add(inst->src(arg.ival), 0, reg++); break; case ArgType::TV: case ArgType::MemberKeyS: case ArgType::MemberKeyIS: m_preColoringHint.add(inst->src(arg.ival), 0, reg++); m_preColoringHint.add(inst->src(arg.ival), 1, reg++); break; case ArgType::ExtraImm: case ArgType::Imm: break; } // Some opcodes (ex. SetM) can have more arguments than there are argument // registers. These will always spill so don't do any coloring for them. if (reg >= X64::kNumRegisterArgs) { break; } } return; } // For instructions that want to hint a continuous increasing range // of sources to a continuous increasing range of argument // registers. auto normalHint = [&](int count, int srcBase, int argBase) { for (int i = 0; i < count; ++i) { m_preColoringHint.add(inst->src(i + srcBase), 0, i + argBase); } }; switch (opc) { case LdFunc: m_preColoringHint.add(inst->src(0), 0, 1); break; case NativeImpl: m_preColoringHint.add(inst->src(1), 0, 0); break; case AKExists: normalHint(2, 0, 0); break; case Eq: case Neq: case Same: case NSame: { auto src1 = inst->src(0); auto src2 = inst->src(1); auto type1 = src1->type(); auto type2 = src2->type(); if ((type1.isArray() && type2.isArray()) || (type1.isString() && type2.isString()) || (type1.isString() && !src1->isConst()) || ((type1 == Type::Obj || type1 == Type::Res) && (type2 == Type::Obj || type2 == Type::Res))) { m_preColoringHint.add(src1, 0, 0); m_preColoringHint.add(src2, 0, 1); } } break; case IterInit: case WIterInit: { m_preColoringHint.add(inst->src(0), 0, 1); } break; case LdSSwitchDestFast: normalHint(1, 0, 0); break; case LdSSwitchDestSlow: normalHint(1, 0, 0); break; case LdGblAddr: normalHint(1, 0, 0); break; case LdClsPropAddr: normalHint(3, 0, 0); break; case LdCls: m_preColoringHint.add(inst->src(0), 0, 1); break; case BoxPtr: normalHint(1, 0, 0); break; default: break; } } // Given a label, dest index for that label, and register index, scan // the sources of all incoming Jmps to see if any have a register // allocated at the specified index. static RegNumber findLabelSrcReg(StateVector<SSATmp,PhysLoc>& regs, IRInstruction* label, unsigned dstIdx, uint32_t regIndex) { assert(label->op() == DefLabel); SSATmp* withReg = label->block()->findSrc(dstIdx, [&](SSATmp* src) { return regs[src].reg(regIndex) != InvalidReg && src->inst()->block()->hint() != Block::Hint::Unlikely; }); return withReg ? regs[withReg].reg(regIndex) : reg::noreg; } // This function attempts to find a pre-coloring hint from two // different sources: If tmp comes from a DefLabel, it will scan up to // the SSATmps providing values to incoming Jmps to look for a // hint. If tmp is consumed by a Jmp, look for other incoming Jmps // to its destination and see if any of them have already been given a // register. If all of these fail, let normal register allocation // proceed unhinted. RegNumber LinearScan::getJmpPreColor(SSATmp* tmp, uint32_t regIndex, bool isReload) { IRInstruction* srcInst = tmp->inst(); const JmpList& jmps = m_jmps[tmp]; if (isReload && (srcInst->op() == DefLabel || !jmps.empty())) { // If we're precoloring a Reload of a temp that we'd normally find // a hint for, just return the register allocated to the spilled // temp. auto reg = m_allocInfo[tmp].reg(regIndex); assert(reg != reg::noreg); return reg; } if (srcInst->op() == DefLabel) { // Figure out which dst of the label is tmp for (unsigned i = 0, n = srcInst->numDsts(); i < n; ++i) { if (srcInst->dst(i) == tmp) { auto reg = findLabelSrcReg(m_allocInfo, srcInst, i, regIndex); // It's ok for reg to be reg::noreg here if all the incoming values are // constant. return reg; } } not_reached(); } // If srcInst wasn't a label, check if tmp is used by any Jmp // instructions. If it is, trace to the Jmp's label and use the // same procedure as above. for (unsigned ji = 0, jn = jmps.size(); ji < jn; ++ji) { IRInstruction* jmp = jmps[ji]; IRInstruction* label = &jmp->taken()->front(); // Figure out which src of the Jmp is tmp for (unsigned si = 0, sn = jmp->numSrcs(); si < sn; ++si) { SSATmp* src = jmp->src(si); if (tmp == src) { // For now, a DefLabel should never have a register assigned // to it before any of its incoming Jmp instructions. always_assert(m_allocInfo[label->dst(si)].reg(regIndex) == reg::noreg); auto reg = findLabelSrcReg(m_allocInfo, label, si, regIndex); if (reg != reg::noreg) return reg; } } } return reg::noreg; } // Create the initial free list. // It must be called after computePreColoringHint, because the order of // caller-saved regs depends on pre-coloring hints. void LinearScan::initFreeList() { // reserve extra regs for testing purpose. for (auto r : m_regs) { if (!m_regs[r].m_reserved) { pushFreeReg(&m_regs[r]); } } } void LinearScan::coalesce() { for (auto block : m_blocks) { for (auto& inst : *block) { for (uint32_t i = 0; i < inst.numSrcs(); ++i) { auto src = inst.src(i); auto origSrc = canonicalize(src); if (origSrc != src) { // Replace every operand with its canonicalized version. inst.setSrc(i, origSrc); } } } } } // Assign ids to each instruction in linear order. void LinearScan::numberInstructions(const BlockList& blocks) { m_spillSlots.reset(); m_uses.reset(); uint32_t nextId = 1; for (auto* block : blocks) { for (auto& inst : *block) { uint32_t id = nextId++; m_linear[inst] = id; for (SSATmp* tmp : inst.srcs()) { m_uses[tmp].lastUse = id; m_uses[tmp].count++; } } if (block->taken() && block->isMain() && !block->taken()->isMain()) { // reserve a spot for the lastUseId when we're processing the main // trace, if the last use is really in an exit trace. m_exitIds[block->taken()->trace()] = nextId++; } } } void LinearScan::genSpillStats(int numSpillLocs) { if (!moduleEnabled(HPHP::Trace::statgroups, 1)) return; static bool enabled = getenv("HHVM_STATS_SPILLS"); if (!enabled) return; int numMainSpills = 0; int numExitSpills = 0; int numMainReloads = 0; int numExitReloads = 0; forEachInst( m_blocks, [&](IRInstruction* inst) { if (inst->op() == Spill) { if (inst->block()->isMain()) { numMainSpills++; } else { numExitSpills++; } } else if (inst->op() == Reload) { if (inst->block()->isMain()) { numMainReloads++; } else { numExitReloads++; } } } ); static StringData* spillStats = makeStaticString("SpillStats"); static StringData* mainSpills = makeStaticString("MainSpills"); static StringData* mainReloads = makeStaticString("MainReloads"); static StringData* exitSpills = makeStaticString("ExitSpills"); static StringData* exitReloads = makeStaticString("ExitReloads"); static StringData* spillSpace = makeStaticString("SpillSpace"); auto entry = m_unit.entry(); // entry block auto const marker = entry->front().marker(); auto addStat = [&](const StringData* key, int value) { entry->prepend(m_unit.gen(IncStatGrouped, marker, cns(spillStats), cns(key), cns(value))); }; addStat(mainSpills, numMainSpills); addStat(mainReloads, numMainReloads); addStat(exitSpills, numExitSpills); addStat(exitReloads, numExitReloads); addStat(spillSpace, numSpillLocs); } /* * Finds the set of SSATmps that should be considered for allocation * to a full SIMD register. These are the SSATmps that satisfy all the * following conditions: * a) it requires 2 64-bit registers * b) it's defined in a load instruction * c) all its uses are simple stores to memory * * The computed set of SSATmps is stored in m_fullSIMDCandidates. */ void LinearScan::findFullSIMDCandidates() { boost::dynamic_bitset<> notCandidates(m_unit.numTmps()); m_fullSIMDCandidates.reset(); for (auto* block : m_blocks) { for (auto& inst : *block) { for (SSATmp& tmp : inst.dsts()) { if (tmp.numWords() == 2 && inst.isLoad() && !inst.isControlFlow()) { m_fullSIMDCandidates[tmp.id()] = true; } } int idx = 0; for (SSATmp* tmp : inst.srcs()) { if (tmp->numWords() == 2 && !inst.storesCell(idx)) { notCandidates[tmp->id()] = true; } idx++; } } } m_fullSIMDCandidates -= notCandidates; } // Insert a Shuffle just before each Jmp, to copy the Jmp's src values // to the target label's assigned destination registers. void LinearScan::resolveJmpCopies() { for (auto b : m_blocks) { if (!b->taken()) continue; auto jmp = &b->back(); auto n = jmp->numSrcs(); if (jmp->op() == Jmp && n > 0) { auto srcs = jmp->srcs(); auto dests = new (m_unit.arena()) PhysLoc[n]; auto labelDests = jmp->taken()->front().dsts(); for (unsigned i = 0; i < n; ++i) { dests[i] = m_allocInfo[labelDests[i]]; } auto shuffle = m_unit.gen(Shuffle, jmp->marker(), ShuffleData(dests, n, n), std::make_pair(n, &srcs[0])); b->insert(b->iteratorTo(jmp), shuffle); } } } RegAllocInfo LinearScan::allocRegs() { // Pre: Ensure there are no existing Shuffle instructions assert(checkNoShuffles(m_unit)); m_blocks = rpoSortCfg(m_unit); m_idoms = findDominators(m_unit, m_blocks); if (RuntimeOption::EvalHHIREnableCoalescing) { // <coalesce> doesn't need instruction numbering. coalesce(); } if (!packed_tv) { findFullSIMDCandidates(); } allocRegsToTrace(); numberInstructions(m_blocks); uint32_t numSpillLocs = assignSpillLoc(); if (numSpillLocs > (uint32_t)NumPreAllocatedSpillLocs) { PUNT(LinearScan_TooManySpills); } if (m_slots.size()) genSpillStats(numSpillLocs); resolveJmpCopies(); auto regs = computeRegs(); if (dumpIREnabled()) { dumpTrace(kRegAllocLevel, m_unit, " after reg alloc ", &regs, &m_lifetime, nullptr, nullptr); } return regs; } void LinearScan::allocRegsOneTrace(BlockList::iterator& blockIt, ExitTraceMap& etm) { auto const trace = (*blockIt)->trace(); collectInfo(blockIt, trace); computePreColoringHint(); auto v = etm.find(*blockIt); if (v != etm.end()) { assert(!trace->isMain()); v->second.restore(this); } else { assert(blockIt == m_blocks.begin() && trace->isMain()); initFreeList(); } // First, visit every instruction, allocating registers as we go, // and inserting Reload instructions where necessary. bool isMain = trace->isMain(); size_t sz = m_slots.size(); while (blockIt != m_blocks.end()) { Block* block = *blockIt; if (block->trace() != trace) { if (!isMain) { break; } else { ++blockIt; continue; } } FTRACE(5, "Block{}: {} ({})\n", trace->isMain() ? "" : " (exit trace)", (*blockIt)->id(), (*blockIt)->postId()); // clear remembered reloads that don't dominate this block for (SlotInfo& slot : m_slots) { if (SSATmp* reload = slot.latestReload) { if (!dominates(reload->inst()->block(), block, m_idoms)) { slot.latestReload = nullptr; } } } for (auto it = block->begin(), end = block->end(); it != end; ++it) { allocRegToInstruction(it); dumpIR(&*it, "allocated to instruction "); } if (isMain) { assert(block->trace()->isMain()); if (block->taken() && !block->taken()->trace()->isMain()) { etm[block->taken()].save(this); } } ++blockIt; } // Now that we have visited all instructions on this trace, // and inserted Reloads for SSATmps which needed to be spilled, // we can go back and insert the spills. // On the main trace, insert the spill right after the instruction // that generated the value (without traversing everything else). // On exit traces, if the instruction that generated the value // is on the main trace, insert the spill at the start of the trace, // otherwise, after the instruction that generated the value size_t begin = sz; size_t end = m_slots.size(); while (begin < end) { SlotInfo& slot = m_slots[begin++]; IRInstruction* spill = slot.spillTmp->inst(); IRInstruction* inst = spill->src(0)->inst(); Block* block = inst->block(); if (!isMain && block->trace()->isMain()) { // We're on an exit trace, but the def is on the // main trace, so put it at the start of this trace if (spill->block()) { // its already been inserted in another exit trace assert(!spill->block()->trace()->isMain()); spill = m_unit.cloneInstruction(spill); } trace->front()->prepend(spill); } else if (inst->isBlockEnd()) { block->next()->prepend(spill); } else { auto pos = block->iteratorTo(inst); block->insert(++pos, spill); } } } void LinearScan::allocRegsToTrace() { ExitTraceMap etm; numberInstructions(m_blocks); if (HPHP::Trace::moduleEnabled(HPHP::Trace::hhir, 5)) { std::stringstream s; s << "RPO: "; for (auto& b : m_blocks) { s << folly::format("{}{} ", b->isMain() ? "M" : "E", b->id()); } s << "\n"; HPHP::Trace::traceRelease("%s\n", s.str().c_str()); } BlockList::iterator it = m_blocks.begin(); while (it != m_blocks.end()) { allocRegsOneTrace(it, etm); } for (it = m_blocks.begin(); it != m_blocks.end();) { if ((*it)->isMain()) { ++it; continue; } allocRegsOneTrace(it, etm); } } void LinearScan::freeRegsAtId(uint32_t id) { // free all registers whose lifetime ends at this id // Note that we free registers before we allocate a register // to this instruction, so we have to be careful to finish using // a register before over-writing it. for (auto it = m_allocatedRegs.begin(); it != m_allocatedRegs.end(); ) { auto next = it; ++next; RegState* reg = *it; assert(reg->m_ssaTmp); if (m_uses[reg->m_ssaTmp].lastUse <= id) { m_allocatedRegs.erase(it); freeReg(reg); } it = next; } } // Try to get a specific register. // Returns NULL if <reg> is not in the free list; // otherwise, return <reg> and remove it from the free list. LinearScan::RegState* LinearScan::getReg(RegState* reg) { if (reg->isReserved() || reg->isAllocated()) { return nullptr; } auto type = reg->type(); auto& freeList = (reg->isCallerSaved() ? m_freeCallerSaved[type] : m_freeCalleeSaved[type]); freeList.erase(reg->m_pos); // Pin it so that other operands in the same instruction will not reuse it. reg->m_pinned = true; return reg; } LinearScan::RegState* LinearScan::getFreeReg(PhysReg::Type type, bool preferCallerSaved) { if (m_freeCallerSaved[type].empty() && m_freeCalleeSaved[type].empty()) { assert(!m_allocatedRegs.empty()); // no free registers --> free a register from the allocatedRegs // Pick the first register in <m_allocatedRegs> that is: // 1. not used for any source operand in the current instruction, and // 2. not used for the return address of a function. auto canSpill = [&] (RegState* reg) { return !reg->isPinned() && !reg->isRetAddr() && reg->type() == type; }; auto pos = std::find_if(m_allocatedRegs.begin(), m_allocatedRegs.end(), canSpill); if (pos == m_allocatedRegs.end()) { PUNT(RegSpill); } spill((*pos)->m_ssaTmp); } smart::list<RegState*>* preferred = nullptr; smart::list<RegState*>* other = nullptr; if (preferCallerSaved) { preferred = &m_freeCallerSaved[type]; other = &m_freeCalleeSaved[type]; } else { preferred = &m_freeCalleeSaved[type]; other = &m_freeCallerSaved[type]; } RegState* theFreeReg = nullptr; if (!preferred->empty()) { theFreeReg = popFreeReg(*preferred); } else { theFreeReg = popFreeReg(*other); } assert(theFreeReg); // Pin it so that other operands in the same instruction will not reuse it. theFreeReg->m_pinned = true; return theFreeReg; } void LinearScan::freeReg(RegState* reg) { pushFreeReg(reg); // The <tmp> shouldn't be reused any more. SSATmp* tmp = reg->m_ssaTmp; int32_t slotId = m_spillSlots[tmp]; if (slotId != -1) { m_slots[slotId].latestReload = nullptr; } reg->m_ssaTmp = nullptr; } void LinearScan::pushFreeReg(RegState* reg) { PhysReg::Type type = reg->type(); auto& freeList = (reg->isCallerSaved() ? m_freeCallerSaved[type] : m_freeCalleeSaved[type]); // If next native is going to use <reg>, put <reg> to the back of the // queue so that it's unlikely to be misused by irrelevant tmps. if (RuntimeOption::EvalHHIREnablePreColoring && type == PhysReg::GP && (reg->m_reg == PhysReg(rax) || m_preColoringHint.preColorsTmp(reg))) { freeList.push_back(reg); reg->m_pos = (--freeList.end()); } else { freeList.push_front(reg); reg->m_pos = freeList.begin(); } } LinearScan::RegState* LinearScan::popFreeReg(smart::list<RegState*>& freeList) { if (freeList.empty()) { return nullptr; } RegState* reg = freeList.front(); freeList.pop_front(); return reg; } void LinearScan::spill(SSATmp* tmp) { dumpIR(tmp, "spilling"); // If we're spilling, we better actually have registers allocated. assert(m_allocInfo[tmp].numAllocated() > 0); assert(m_allocInfo[tmp].numWords() == tmp->numWords()); // Free the registers used by <tmp>. // Need call freeReg and modify <m_allocatedRegs>. for (auto it = m_allocatedRegs.begin(); it != m_allocatedRegs.end(); ) { auto next = it; ++next; RegState* reg = *it; if (reg->m_ssaTmp == tmp) { freeReg(reg); m_allocatedRegs.erase(it); } it = next; } if (m_spillSlots[tmp] == -1) { // <tmp> hasn't been spilled before. // We need to create a new spill slot for it. uint32_t slotId = createSpillSlot(tmp); // createSpillSlot sets the latest reloaded value of slotId to tmp. // Here, we need reset this value because tmp is spilled and no longer // synced with memory. m_slots[slotId].latestReload = nullptr; } } // Create a spill slot for <tmp>. uint32_t LinearScan::createSpillSlot(SSATmp* tmp) { uint32_t slotId = m_slots.size(); m_spillSlots[tmp] = slotId; auto* spillInst = m_unit.gen(Spill, tmp->inst()->marker(), tmp); SSATmp* spillTmp = spillInst->dst(); SlotInfo si; si.spillTmp = spillTmp; si.latestReload = tmp; m_slots.push_back(si); // The spill slot inherits the last use ID of the spilled tmp. m_uses[si.spillTmp].lastUse = m_uses[tmp].lastUse; return slotId; } IRInstruction* LinearScan::nextNative() const { return m_natives.empty() ? nullptr : m_natives.front(); } uint32_t LinearScan::nextNativeId() const { IRInstruction* next = nextNative(); return next ? m_linear[next] : -1; } SSATmp* LinearScan::getSpilledTmp(SSATmp* tmp) { assert(tmp->inst()->op() == Reload); SSATmp* slot = tmp->inst()->src(0); assert(slot->inst()->op() == Spill); return slot->inst()->src(0); } // If <tmp> is a reloaded value, follow the spill-reload chain to find // its source; otherwise, return <tmp> itself. SSATmp* LinearScan::getOrigTmp(SSATmp* tmp) { if (tmp->inst()->op() == Reload) return getSpilledTmp(tmp); return tmp; } bool LinearScan::PreColoringHint::preColorsTmp(RegState* reg) const { assert(reg->m_reg.isGP()); return m_preColoredTmps[reg->m_reg].first != nullptr; } // Get the pre-coloring register of (<tmp>, <index>). // A native call has at most six arguments, so the time complexity is // not a big problem. PhysReg LinearScan::PreColoringHint::getPreColoringReg( SSATmp* tmp, uint32_t index) const { for (auto reg : m_preColoredTmps) { if (m_preColoredTmps[reg].first == tmp && m_preColoredTmps[reg].second == index) { assert(reg.isGP()); return reg; } } return InvalidReg; } void LinearScan::PreColoringHint::clear() { for (auto reg : m_preColoredTmps) { m_preColoredTmps[reg].first = nullptr; m_preColoredTmps[reg].second = 0; } } // Provide a hint that (<tmp>, <index>) is used as the <argNum>-th arg // in next native. void LinearScan::PreColoringHint::add(SSATmp* tmp, uint32_t index, int argNum) { assert(argNum < X64::kNumRegisterArgs); auto reg = X64::argNumToRegName[argNum]; assert(reg != InvalidReg && reg.isGP()); m_preColoredTmps[reg].first = tmp; m_preColoredTmps[reg].second = index; } ////////////////////////////////////////////////////////////////////// RegAllocInfo allocRegsForUnit(IRUnit& unit) { return LinearScan(unit).allocRegs(); } }} // HPHP::JIT
{ "alphanum_fraction": 0.6209224557, "author": null, "avg_line_length": 32.1698240866, "converted": null, "ext": "cpp", "file": null, "hexsha": "e91d36f9f727e3d1d7dd2f257a3345c42acabf4e", "include": null, "lang": "C++", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-12-25T04:36:39.000Z", "max_forks_repo_forks_event_min_datetime": "2018-12-25T04:36:39.000Z", "max_forks_repo_head_hexsha": "5c6896ccd1b51466233c08b7bd2c6b60bcdb4ff5", "max_forks_repo_licenses": [ "PHP-3.01", "Zend-2.0" ], "max_forks_repo_name": "amit2014/hhvm", "max_forks_repo_path": "hphp/runtime/vm/jit/linear-scan.cpp", "max_issues_count": null, "max_issues_repo_head_hexsha": "5c6896ccd1b51466233c08b7bd2c6b60bcdb4ff5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "PHP-3.01", "Zend-2.0" ], "max_issues_repo_name": "amit2014/hhvm", "max_issues_repo_path": "hphp/runtime/vm/jit/linear-scan.cpp", "max_line_length": 80, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9c9b632e17ed8a725f28fe8a19d0fb52df38a5d7", "max_stars_repo_licenses": [ "PHP-3.01", "Zend-2.0" ], "max_stars_repo_name": "serphen/hiphop-php", "max_stars_repo_path": "hphp/runtime/vm/jit/linear-scan.cpp", "max_stars_repo_stars_event_max_datetime": "2020-08-06T16:16:58.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-06T16:16:58.000Z", "num_tokens": 13414, "path": null, "reason": null, "repo": null, "save_path": null, "sha": null, "size": 47547 }
# -*- coding: utf-8 -*- import numpy as np import os from knn_practice.knn_1 import classify0 # 把32✖️32的二进制图像矩阵转换为1✖️1024的向量 def img2vector(filename): return_vect = np.zeros((1, 1024)) with open(filename) as fr: for i in range(32): line_str = fr.readline() for j in range(32): return_vect[0, 32 * i + j] = int(line_str[j]) return return_vect def handwriting_class_test(): training_file_list = os.listdir( os.path.join(os.getcwd(), 'trainingDigits') ) test_file_list = os.listdir( os.path.join(os.getcwd(), 'testDigits') ) hw_labels = [] m = len(training_file_list) training_mat = np.zeros((m, 1024)) for i in range(m): filename_str = training_file_list[i] files_str = filename_str.split('.')[0] # 文件名已经标明了该图像代表的标签(数字) class_num_str = int(files_str.split('_')[0]) hw_labels.append(class_num_str) full_file_path = os.path.join( os.getcwd(), 'trainingDigits', filename_str) training_mat[i] = img2vector(full_file_path) error_count = 0.0 m_test = len(test_file_list) for i in range(m_test): filename_str = test_file_list[i] files_str = filename_str.split('.')[0] # 文件名已经标明了该图像代表的标签(数字) class_num_str = int(files_str.split('_')[0]) full_file_path = os.path.join( os.getcwd(), 'testDigits', filename_str) vector_under_test = img2vector(full_file_path) classifier_result = classify0(vector_under_test, training_mat, hw_labels, 3) print('分类器判断该数字为:%d, 实际的数字为:%d' % (classifier_result, class_num_str)) if classifier_result != class_num_str: error_count += 1.0 print('错误总数:%d' % error_count) print('错误率:%f' % (error_count / float(m_test))) if __name__ == '__main__': handwriting_class_test()
{ "alphanum_fraction": 0.6256599789, "author": null, "avg_line_length": 32.1016949153, "converted": null, "ext": "py", "file": null, "hexsha": "dfaf9e9d24831e16fd3b86f7d4fa5338d335903f", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2df27781d116b2e263f6fc9ca6cb96eb3d71d046", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Sea-Monster/MLIA-practice", "max_forks_repo_path": "knn_practice/knn_3.py", "max_issues_count": null, "max_issues_repo_head_hexsha": "2df27781d116b2e263f6fc9ca6cb96eb3d71d046", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Sea-Monster/MLIA-practice", "max_issues_repo_path": "knn_practice/knn_3.py", "max_line_length": 84, "max_stars_count": null, "max_stars_repo_head_hexsha": "2df27781d116b2e263f6fc9ca6cb96eb3d71d046", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Sea-Monster/MLIA-practice", "max_stars_repo_path": "knn_practice/knn_3.py", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 529, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 1894 }
""" This script defines some functions used for visualize the final results. """ import numpy as np import matplotlib.pyplot as plt import torch from modules.model import Net def import_best_model(best_model, category, phi, theta): """ Argumnets: best_model: .pth file containing model parameters category: an integer 1 ~ 809 phi: an integer 20 or 30 theta: an integer 0 ~ 359 Outputs: returned image and mask by the pretrained model """ # load the best model state_dict = torch.load(best_model) model = Net() model.load_state_dict(state_dict) model.eval() with torch.no_grad(): # convert c, phi and theta into corresponding inputs for the network # construct the class vector c = np.zeros(809) c[category-1] = 1 # construct the view vector v = np.zeros(4) v[0] = np.sin(theta/180 * np.pi) v[1] = np.cos(theta/180 * np.pi) v[2] = np.sin(phi/180 * np.pi) v[3] = np.sin(phi/180 * np.pi) # construct the tranform parameter vector t = np.ones(12) # transform them into tensor and into tench.FloatTensor c = torch.from_numpy(c) v = torch.from_numpy(v) t = torch.from_numpy(t) c = c.float() v = v.float() t = t.float() # add the batch axis c = c.unsqueeze(0) v = v.unsqueeze(0) t = t.unsqueeze(0) # compute out the image and mask image, mask = model(c, v, t) # convert to the form which can be displayed by plt. image = image.squeeze().numpy().transpose(1, 2, 0) image[np.where(image<=0)] = 0 image[np.where(image>=1)] = 1 mask = mask.squeeze().numpy() return image, mask if __name__ == "__main__": best_model = "./best_model_78.pth" image, mask = import_best_model(best_model, 666, 20, 300) # create two subplots, show the image at the first # and the mask at the second one. fig = plt.figure() ax1 = fig.add_subplot(1, 2, 1) plt.imshow(image) plt.axis('off') ax2 = fig.add_subplot(1, 2, 2) plt.imshow(mask) plt.axis('off') plt.show()
{ "alphanum_fraction": 0.5678370178, "author": null, "avg_line_length": 28.1341463415, "converted": null, "ext": "py", "file": null, "hexsha": "4720629639023fa0fea3869b630730c1a790bde9", "include": true, "lang": "Python", "length": null, "llama_tokens": null, "mathlib_filename": null, "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2021-11-08T12:21:52.000Z", "max_forks_repo_forks_event_min_datetime": "2018-11-29T00:01:08.000Z", "max_forks_repo_head_hexsha": "eb68001e15a3c0d3018bca8b94e731b7cbbdd5a7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wll199566/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks", "max_forks_repo_path": "evaluate.py", "max_issues_count": 1, "max_issues_repo_head_hexsha": "eb68001e15a3c0d3018bca8b94e731b7cbbdd5a7", "max_issues_repo_issues_event_max_datetime": "2020-04-10T09:49:20.000Z", "max_issues_repo_issues_event_min_datetime": "2020-04-10T09:49:20.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wll199566/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks", "max_issues_repo_path": "evaluate.py", "max_line_length": 76, "max_stars_count": 6, "max_stars_repo_head_hexsha": "122d975a67f683d72718a60febbde351764db8a6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "bmahlbrand/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks", "max_stars_repo_path": "evaluate.py", "max_stars_repo_stars_event_max_datetime": "2021-06-30T14:11:59.000Z", "max_stars_repo_stars_event_min_datetime": "2018-11-02T16:45:36.000Z", "num_tokens": 595, "path": null, "reason": "import numpy", "repo": null, "save_path": null, "sha": null, "size": 2307 }