text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
import scipy
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
def basic_cube():
"""
Cube based on ordering in program
"""
return np.array([
[-7.156285 , -3.80337925, -1.95817204],
[-7.156285 , -3.80337925, -1.70817204],
[-7.156285 , -3.55337925, -1.70817204],
[-7.156285 , -3.55337925, -1.95817204],
[-6.906285 , -3.80337925, -1.95817204],
[-6.906285 , -3.80337925, -1.70817204],
[-6.906285 , -3.55337925, -1.70817204],
[-6.906285 , -3.55337925, -1.95817204]])
def compute_edge_sites(cube_vertex):
pair_idx = np.array([
[0,1],
[0,3],
[2,3],
[1,2],
[0,4],
[3,7],
[2,6],
[1,5],
[4,5],
[4,7],
[6,7],
[5,6],
])
pairs = cube_vertex[pair_idx]
edge = np.mean(pairs, axis=1)
return edge
def unit_cube():
return np.array([
[0,0,0],
[0,0,1],
[0,1,1],
[0,1,0],
[1,0,0],
[1,0,1],
[1,1,1],
[1,1,0]
])
def all_operations_vertex():
def rot_opposite_faces_x(idx):
return idx[[4,0,3,7,5,1,2,6]]
def rot_opposite_faces_y(idx):
return idx[[3,0,1,2,7,4,5,6]]
def rot_opposite_faces_z(idx):
return idx[[4,5,1,0,7,6,2,3]]
def rot_cart_frame(idx):
return idx[[0,4,5,1,3,7,6,2]]
def rot_opposite_edges(idx):
return idx[[4,7,6,5,0,3,2,1]]
start_idx = np.arange(0,8)
idx_list = [start_idx]
for i in range(3):
idx_list.append(rot_opposite_faces_x(idx_list[-1]))
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(3):
rot_idx_list.append(rot_opposite_faces_y(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(3):
rot_idx_list.append(rot_opposite_faces_z(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(2):
rot_idx_list.append(rot_cart_frame(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(2):
rot_idx_list.append(rot_opposite_edges(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
all_idx = np.vstack(idx_list)
# all_idx = np.unique(all_idx,axis=0)
return all_idx
def all_operations_edge(idx_list):
def rot_opposite_faces_x(idx):
return idx[[4,9,5,1,8,10,2,0,7,11,6,3]]
def rot_opposite_faces_y(idx):
return idx[[1,2,3,0,5,6,7,4,9,10,11,8]]
def rot_opposite_faces_z(idx):
return idx[[8,4,0,7,9,1,3,11,10,5,2,6]]
def rot_cart_frame(idx):
return idx[[4,0,7,8,1,3,11,9,5,2,6,10]]
def rot_opposite_edges(idx):
return idx[[9,8,11,10,4,7,6,5,1,0,3,2]]
start_idx = np.arange(0,12)
idx_list = [start_idx]
for i in range(3):
idx_list.append(rot_opposite_faces_x(idx_list[-1]))
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(3):
rot_idx_list.append(rot_opposite_faces_y(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(3):
rot_idx_list.append(rot_opposite_faces_z(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(2):
rot_idx_list.append(rot_cart_frame(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
temp_idx_list = []
for entry in idx_list:
rot_idx_list = [entry]
for i in range(2):
rot_idx_list.append(rot_opposite_edges(rot_idx_list[-1]))
temp_idx_list += rot_idx_list
idx_list += temp_idx_list
all_idx = np.vstack(idx_list)
# all_idx = np.unique(all_idx,axis=0)
return all_idx
def apply_vertex_symmetry(vertex_idx):
#### Let's perform all rotations on lookup idx first
symmetry_idx_list = all_operations_vertex()
all_vertex_idx = []
for idx_list in symmetry_idx_list:
all_vertex_idx.append(vertex_idx[idx_list])
return all_vertex_idx
def apply_edge_symmetry(edge_idx):
### Construct corresponding symmetry relevant ordings for vertex/edge
### for triangulation
edge_symmetry_idx_list = all_operations_edge(edge_idx)
edge_symmetry_idx_list = np.array(edge_symmetry_idx_list)
all_edge_idx = []
for row in edge_symmetry_idx_list:
all_edge_idx.append(edge_idx[:,row])
return all_edge_idx
########### Let's build the vertex lookup table
all_comb = np.meshgrid([0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1])
all_comb = np.c_[
all_comb[0].ravel(),
all_comb[1].ravel(),
all_comb[2].ravel(),
all_comb[3].ravel(),
all_comb[4].ravel(),
all_comb[5].ravel(),
all_comb[6].ravel(),
all_comb[7].ravel()]
vertex_lookup = np.zeros((2,2,2,2,2,2,2,2,12))
def tostring(array):
"""
1D array to string
"""
return ",".join([str(x) for x in array])
def fromstring(array_str):
return np.fromstring(array_str, dtype=int, sep=",")
## Program fourteen primitives
## https://www.researchgate.net/publication/3410984_Brodlie_K_Improving_the_robustness_and_accuracy_of_the_marching_cubes_algorithm_for_isosurfacing_IEEE_Trans_Viz_and_Comput_Graph_91_16-29/figures?lo=1
#### For holding information to operate on using symmetry operations and store
#### tri connectivity
vertex_mask_idx = np.zeros((15,8)).astype(int)
tri_mask = np.zeros((16,12))
#### Build connectivity dict for these simple cases
#### Each entry is 2D array with one entry per connectivity and number of entries
#### equal to the number of triangles
tri_connectivity = {}
#### Same as tri_connectivity but populated with volume information for volume
#### adjustments to be made for each type.
#### Valued entered is a ratio out of 1 with respect to the volume of the
#### voxel that the entry adds.
tri_volume = {}
tri_volume_modifier = {}
### I will define the edges and the verticies that make up the shape that is
### needed to calculate the volume using a ConvexHull method. Data type
### is such that for each entry, there will be list of shapes that need to be
### evaluated. Each shape is defined by a tuple with the first being a mask for
### vertices and the second being a mask for edges.
volume_shape_mask = {}
#### 1. First entry all zeros
entry = vertex_mask_idx[0]
tri_connectivity[tostring(entry)] = np.zeros((1,12))
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
## Set volume
tri_volume[tostring(entry)] = 0
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 2. Simple Triangle
vertex_mask_idx[1,[0]] = 1
tri_mask[1,[0,1,4]] = 1
entry = vertex_mask_idx[1]
tri_connectivity[tostring(entry)] = np.zeros((1,12))
tri_connectivity[tostring(entry)][0][[0,1,4]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 3. Simple Plane down
vertex_mask_idx[2,[0,4]] = 1
tri_mask[2,[0,1,8,9]] = 1
entry = vertex_mask_idx[2]
tri_connectivity[tostring(entry)] = np.zeros((2,12))
tri_connectivity[tostring(entry)][0][[0,1,9]] = 1
tri_connectivity[tostring(entry)][1][[0,8,9]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.125
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 4. Across face double triangle
vertex_mask_idx[3,[0,5]] = 1
## First Tri
tri_mask[3,[0,1,4]] = 1
## Second Tri
tri_mask[3,[7,8,11]] = 1
entry = vertex_mask_idx[3]
tri_connectivity[tostring(entry)] = np.zeros((2,12))
tri_connectivity[tostring(entry)][0][[0,1,4]] = 1
tri_connectivity[tostring(entry)][1][[7,8,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 2*0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 5. Across body double triangle
vertex_mask_idx[4,[0,6]] = 1
## First Tri
tri_mask[4,[0,1,4]] = 1
## Second Tri
tri_mask[4,[6,10,11]] = 1
entry = vertex_mask_idx[4]
tri_connectivity[tostring(entry)] = np.zeros((2,12))
tri_connectivity[tostring(entry)][0][[0,1,4]] = 1
tri_connectivity[tostring(entry)][1][[6,10,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 2*0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 6. Three Bottom Corners
vertex_mask_idx[5,[3,4,7]] = 1
tri_mask[5,[1,4,8,10,2]] = 1
entry = vertex_mask_idx[5]
tri_connectivity[tostring(entry)] = np.zeros((3,12))
tri_connectivity[tostring(entry)][0][[1,4,8]] = 1
tri_connectivity[tostring(entry)][1][[1,8,2]] = 1
tri_connectivity[tostring(entry)][2][[2,8,10]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.35416667
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 7. One plane down one tri
vertex_mask_idx[6,[0,4,6]] = 1
## Plane down
tri_mask[6,[0,8,1,9]] = 1
## Upper Tri 6
tri_mask[6,[6,10,11]] = 1
entry = vertex_mask_idx[6]
tri_connectivity[tostring(entry)] = np.zeros((3,12))
tri_connectivity[tostring(entry)][0][[0,1,9]] = 1
tri_connectivity[tostring(entry)][1][[0,8,9]] = 1
tri_connectivity[tostring(entry)][2][[6,10,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.125+0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 8. Triple Tri
vertex_mask_idx[7,[1,4,6]] = 1
## Tri 1
tri_mask[7,[0,3,7]] = 1
## Tri 4
tri_mask[7,[4,8,9]] = 1
## Tri 6
tri_mask[7,[6,10,11]] = 1
entry = vertex_mask_idx[7]
tri_connectivity[tostring(entry)] = np.zeros((3,12))
tri_connectivity[tostring(entry)][0][[0,3,7]] = 1
tri_connectivity[tostring(entry)][1][[4,8,9]] = 1
tri_connectivity[tostring(entry)][2][[6,10,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 3*0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 9. Middle Plane
vertex_mask_idx[8,[0,3,4,7]] = 1
## Mid Plane
tri_mask[8,[0,2,8,10]] = 1
entry = vertex_mask_idx[8]
tri_connectivity[tostring(entry)] = np.zeros((2,12))
tri_connectivity[tostring(entry)][0][[0,8,10]] = 1
tri_connectivity[tostring(entry)][1][[0,2,10]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.5
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 10. Hexagon
vertex_mask_idx[9,[0,2,3,7]] = 1
## Hexagon
tri_mask[9,[0,3,4,6,9,10]] = 1
entry = vertex_mask_idx[9]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[0,3,6]] = 1
tri_connectivity[tostring(entry)][1][[0,6,10]] = 1
tri_connectivity[tostring(entry)][2][[0,9,10]] = 1
tri_connectivity[tostring(entry)][3][[0,4,9]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.375
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 11. Double Plane
vertex_mask_idx[10,[0,1,6,7]] = 1
## Plane 1
tri_mask[10,[1,3,4,7]] = 1
## Plane 2
tri_mask[10,[5,6,9,11]] = 1
entry = vertex_mask_idx[10]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[1,3,7]] = 1
tri_connectivity[tostring(entry)][1][[1,4,7]] = 1
tri_connectivity[tostring(entry)][2][[5,6,11]] = 1
tri_connectivity[tostring(entry)][3][[5,9,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.75
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 12.
vertex_mask_idx[11,[0,3,6,7]] = 1
## Plane
tri_mask[11,[0,2,4,6,9,11]] = 1
entry = vertex_mask_idx[11]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[4,9,11]] = 1
tri_connectivity[tostring(entry)][1][[2,6,11]] = 1
tri_connectivity[tostring(entry)][2][[0,2,4]] = 1
tri_connectivity[tostring(entry)][3][[2,4,11]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.375
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 13. 6+tri
vertex_mask_idx[12,[1,3,4,7]] = 1
## 6 Plane
tri_mask[12,[1,4,8,10,2]] = 1
## Tri 1
tri_mask[12,[0,3,7]] = 1
entry = vertex_mask_idx[12]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[1,4,8]] = 1
tri_connectivity[tostring(entry)][1][[1,8,2]] = 1
tri_connectivity[tostring(entry)][2][[2,8,10]] = 1
tri_connectivity[tostring(entry)][3][[0,3,7]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.5
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 14. Quad Tri
vertex_mask_idx[13,[0,2,5,7]] = 1
## Tri 0
tri_mask[13,[0,1,4]] = 1
## Tri 2
tri_mask[13,[2,3,6]] = 1
## Tri 5
tri_mask[13,[7,8,11]] = 1
## Tri 7
tri_mask[13,[5,9,10]] = 1
entry = vertex_mask_idx[13]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[0,1,4]] = 1
tri_connectivity[tostring(entry)][1][[2,3,6]] = 1
tri_connectivity[tostring(entry)][2][[7,8,11]] = 1
tri_connectivity[tostring(entry)][3][[5,9,10]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 4*0.02083333
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### 15.
vertex_mask_idx[14,[2,3,4,7]] = 1
entry = vertex_mask_idx[14]
tri_connectivity[tostring(entry)] = np.zeros((4,12))
tri_connectivity[tostring(entry)][0][[1,3,4]] = 1
tri_connectivity[tostring(entry)][1][[4,3,10]] = 1
tri_connectivity[tostring(entry)][2][[3,6,10]] = 1
tri_connectivity[tostring(entry)][3][[4,8,10]] = 1
## Set Opposite
not_entry = np.logical_not(entry).astype(int)
tri_connectivity[tostring(not_entry)] = tri_connectivity[tostring(entry)]
### Set volume
tri_volume[tostring(entry)] = 0.375
tri_volume[tostring(not_entry)] = 1-tri_volume[tostring(entry)]
tri_volume_modifier[tostring(entry)] = 0
tri_volume_modifier[tostring(not_entry)] = 1
#### Performing rotations to populate the entire tri_connectivity
iterations = [(keys,values) for keys,values in tri_connectivity.items()]
for key,value in iterations:
key_array = fromstring(key)
all_vertex = apply_vertex_symmetry(key_array)
all_edge = apply_edge_symmetry(value)
for temp_idx,vertex in enumerate(all_vertex):
tri_connectivity[tostring(vertex)] = all_edge[temp_idx]
iterations = [(keys,values) for keys,values in tri_volume.items()]
for key,value in iterations:
key_array = fromstring(key)
all_vertex = apply_vertex_symmetry(key_array)
for temp_idx,vertex in enumerate(all_vertex):
tri_volume[tostring(vertex)] = value
iterations = [(keys,values) for keys,values in tri_volume_modifier.items()]
for key,value in iterations:
key_array = fromstring(key)
all_vertex = apply_vertex_symmetry(key_array)
for temp_idx,vertex in enumerate(all_vertex):
tri_volume_modifier[tostring(vertex)] = value
#### Plotting all primitives
def plot_primitives(figname="marching_cubes_primitive_no_numbers.pdf"):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
cart_points = basic_cube()
fig = plt.figure(figsize=(24,24))
for entry_idx,vertex_row in enumerate(vertex_mask_idx):
ax = fig.add_subplot(4,4,entry_idx+1, projection='3d')
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.scatter(cart_points[:,0][0:8],
cart_points[:,1][0:8],
cart_points[:,2][0:8],
facecolor=(0,0,0,0),
edgecolor="k",
s=100)
# ## Add numbering
# for idx,point in enumerate(cart_points[0:8]):
# ax.text(point[0],
# point[1],
# point[2],
# "{}".format(idx),
# fontsize=16)
cube_vertex = cart_points[:8]
edge_vertex = compute_edge_sites(cube_vertex)
#### Visualize edge points
ax.scatter(edge_vertex[:,0],
edge_vertex[:,1],
edge_vertex[:,2],
edgecolor="k",
facecolor="tab:red",
s=100)
# ## Number edge cites
# for idx,point in enumerate(edge_vertex):
# ax.text(point[0],
# point[1],
# point[2],
# "{}".format(idx),
# fontsize=16)
## Plot relevant vertices
vertex_row_bool = vertex_row.astype(bool)
temp_vertex = cart_points[vertex_row_bool,:]
if len(temp_vertex) > 0:
ax.scatter(
temp_vertex[:,0],
temp_vertex[:,1],
temp_vertex[:,2],
c="tab:green",
edgecolor="k",
s=100)
## Tri idx
entry = tostring(vertex_row)
triangles_bool = tri_connectivity[entry].astype(bool)
array_to_mask = np.repeat(np.arange(0,12)[None,:],
triangles_bool.shape[0],
axis=0)
tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
if len(tri_idx) != 0:
ax.plot_trisurf(
edge_vertex[:,0],
edge_vertex[:,1],
edge_vertex[:,2],
triangles=tri_idx)
fig.savefig(figname,
dpi=400)
##### Plotting all in tri_connectivity
def plot_all_cubes(figname="all_marching_cubes.pdf"):
cart_points = basic_cube()
fig = plt.figure(figsize=(48,192))
entry_idx = 0
for key,value in tri_connectivity.items():
ax = fig.add_subplot(32,8,entry_idx+1, projection='3d')
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.scatter(cart_points[:,0][0:8],
cart_points[:,1][0:8],
cart_points[:,2][0:8])
## Add numbering
for idx,point in enumerate(cart_points[0:8]):
ax.text(point[0],
point[1],
point[2],
"{}".format(idx),
fontsize=16)
cube_vertex = cart_points[:8]
edge_vertex = compute_edge_sites(cube_vertex)
#### Visualize edge points
ax.scatter(edge_vertex[:,0],
edge_vertex[:,1],
edge_vertex[:,2],
edgecolor="k",
facecolor="tab:red")
## Number edge cites
for idx,point in enumerate(edge_vertex):
ax.text(point[0],
point[1],
point[2],
"{}".format(idx),
fontsize=16)
## Plot Triangle
triangles_bool = value.astype(bool)
array_to_mask = np.repeat(np.arange(0,12)[None,:],
triangles_bool.shape[0],
axis=0)
tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
if len(tri_idx) != 0:
ax.plot_trisurf(
edge_vertex[:,0],
edge_vertex[:,1],
edge_vertex[:,2],
triangles=tri_idx)
entry_idx += 1
fig.savefig("all_marching_cubes.pdf")
##### Deriviing volumes for each primitive
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
#
cart_points = unit_cube()
#flat_rows = {}
#flat_rows[tostring(np.array([1,1,0,0,0,0,0,0]))] = 1
#all_vertex = apply_vertex_symmetry(np.array([1,1,0,0,0,0,0,0]))
#for entry in all_vertex:
# flat_rows[tostring(entry)] = 1
#flat_rows[tostring(np.array([1,1,1,1,0,0,0,0]))] = 1
#all_vertex = apply_vertex_symmetry(np.array([1,1,1,1,0,0,0,0]))
#for entry in all_vertex:
# flat_rows[tostring(entry)] = 1
#
#
#def get_volume(vertex_row, edges):
# """
# Arguments
# ---------
# vertex_row: array
# Array of shape (8,) equal to a binary mask of all of the populated voxels
# edges: array
# Arry of shape (12,0) with edges, either normal or projected
#
# """
# edge_vertex=edges
#
# ## Check that the surface will not be flat with respec to the
# ## Z direction
# if tostring(vertex_row) in flat_rows:
# print("FLAT")
# try:
# triangles_bool = tri_connectivity[tostring(vertex_row)].astype(bool)
# array_to_mask = np.repeat(np.arange(0,12)[None,:],
# triangles_bool.shape[0],
# axis=0)
# tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
#
# all_tri = edge_vertex[tri_idx]
# vol = 0
# for tri_entry in all_tri[:-1]:
# xyz = tri_entry
# d = scipy.spatial.Delaunay(xyz[:,:2])
# except:
# rot_matrix = np.array([[1,0,0],[0,0,1],[0,1,0]])
# edge_vertex = np.dot(rot_matrix,edge_vertex.T).T
#
# case_11 = False
# if np.linalg.norm(vertex_row - np.array([1, 1, 0, 0, 0, 0, 1, 1])) < 1e-3:
# case_11 = True
# ## Logical NOT
# elif np.linalg.norm(vertex_row - np.array([0, 0, 1, 1, 1, 1, 0, 0])) < 1e-3:
# case_11 = True
#
# case_13 = False
# if np.linalg.norm(vertex_row - np.array([0, 1, 0, 1, 1, 0, 0, 1])) == 0:
# case_13 = True
# elif np.linalg.norm(vertex_row - np.array([1, 0, 1, 0, 0, 1, 1, 0])) == 0:
# case_13 = True
#
# ## Handle Case 11 with two planes in Z direction
# if case_11:
# entry = tostring(vertex_row)
# triangles_bool = tri_connectivity[entry].astype(bool)
# array_to_mask = np.repeat(np.arange(0,12)[None,:],
# triangles_bool.shape[0],
# axis=0)
# tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
#
# rot_matrix = np.array([[1,0,0],[0,0,1],[0,1,0]])
#
# edge_vertex = np.dot(rot_matrix,edge_vertex.T).T
# all_tri = edge_vertex[tri_idx]
#
# vol = 0
# for tri_entry in all_tri[0:2]:
# xyz = tri_entry
# d = scipy.spatial.Delaunay(xyz[:,:2])
# tri = xyz[d.vertices]
# a = tri[:,0,:2] - tri[:,1,:2]
# b = tri[:,0,:2] - tri[:,2,:2]
# proj_area = np.cross(a, b).sum(axis=-1)
# zavg = tri[:,:,2].sum(axis=1)
# vol += np.abs(zavg * np.abs(proj_area) / 6.0)
#
# for entry in all_tri[2:]:
# xyz = tri_entry
# d = scipy.spatial.Delaunay(xyz[:,:2])
# tri = xyz[d.vertices]
# a = tri[:,0,:2] - tri[:,1,:2]
# b = tri[:,0,:2] - tri[:,2,:2]
# proj_area = np.cross(a, b).sum(axis=-1)
# zavg = tri[:,:,2].sum(axis=1)
# vol += np.abs(zavg * np.abs(proj_area) / 6.0)
#
# return vol
#
# ### Handle case 13 with missing corner
# if case_13:
# entry = tostring(vertex_row)
# triangles_bool = tri_connectivity[entry].astype(bool)
# array_to_mask = np.repeat(np.arange(0,12)[None,:],
# triangles_bool.shape[0],
# axis=0)
# tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
#
# all_tri = edge_vertex[tri_idx]
# vol = 0
# for tri_entry in all_tri[:-1]:
# xyz = tri_entry
# d = scipy.spatial.Delaunay(xyz[:,:2])
# tri = xyz[d.vertices]
# a = tri[:,0,:2] - tri[:,1,:2]
# b = tri[:,0,:2] - tri[:,2,:2]
# proj_area = np.cross(a, b).sum(axis=-1)
# zavg = tri[:,:,2].sum(axis=1)
# vol += np.abs(zavg * np.abs(proj_area) / 6.0)
#
# tri_entry = all_tri[-1]
# xyz = tri_entry
# d = scipy.spatial.Delaunay(xyz[:,:2])
# tri = xyz[d.vertices]
# a = tri[:,0,:2] - tri[:,1,:2]
# b = tri[:,0,:2] - tri[:,2,:2]
# proj_area = np.cross(a, b).sum(axis=-1)
# zavg = tri[:,:,2].sum(axis=1)
# vol -= np.abs(zavg * np.abs(proj_area) / 6.0)
#
# return vol
#
# ## Tri idx
# entry = tostring(vertex_row)
# triangles_bool = tri_connectivity[entry].astype(bool)
# array_to_mask = np.repeat(np.arange(0,12)[None,:],
# triangles_bool.shape[0],
# axis=0)
# tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
#
# all_tri = edge_vertex[tri_idx]
# vol = 0
# for tri_entry in all_tri:
# xyz = tri_entry
# try:
# d = scipy.spatial.Delaunay(xyz[:,:2])
# except:
# continue
# tri = xyz[d.vertices]
# a = tri[:,0,:2] - tri[:,1,:2]
# b = tri[:,0,:2] - tri[:,2,:2]
# proj_area = np.cross(a, b).sum(axis=-1)
# zavg = tri[:,:,2].sum(axis=1)
# vol += np.abs(zavg * np.abs(proj_area) / 6.0)
#
# return vol
### Vertex Mask idx are all of the primitive entries.
### This leads to 163 entires
all_unique = []
primitive_dict = {}
for row in vertex_mask_idx:
row_str = tostring(row)
primitive_dict[row_str] = {row_str: 1}
all_temp = np.vstack(apply_vertex_symmetry(row))
### Algorithm for removing duplicate rows
R,C = np.triu_indices(all_temp.shape[0],1)
mask = (np.abs(all_temp[R] - all_temp[C]) < 1e-3).all(axis=(1))
I,G = R[mask], C[mask]
remove_idx = np.unique(G)
original_idx = np.arange(0,all_temp.shape[0])
final_idx = np.setdiff1d(original_idx,remove_idx)
all_temp = all_temp[final_idx]
all_unique.append(all_temp)
for entry in all_temp:
primitive_dict[row_str][tostring(entry)] = 1
### Now do same operation but for the not of the primitive
not_primitive_dict = {}
for row in vertex_mask_idx:
row = np.logical_not(row).astype(int)
row_str = tostring(row)
not_primitive_dict[row_str] = {row_str: 1}
all_temp = np.vstack(apply_vertex_symmetry(row))
### Algorithm for removing duplicate rows
R,C = np.triu_indices(all_temp.shape[0],1)
mask = (np.abs(all_temp[R] - all_temp[C]) < 1e-3).all(axis=(1))
I,G = R[mask], C[mask]
remove_idx = np.unique(G)
original_idx = np.arange(0,all_temp.shape[0])
final_idx = np.setdiff1d(original_idx,remove_idx)
all_temp = all_temp[final_idx]
all_unique.append(all_temp)
for entry in all_temp:
not_primitive_dict[row_str][tostring(entry)] = 1
### Defines the correct edges that form a triangle given the vertex of the cube
triangles = {}
triangles[0] = [0,1,4]
triangles[1] = [0,3,7]
triangles[2] = [2,3,6]
triangles[3] = [1,2,5]
triangles[4] = [4,8,9]
triangles[5] = [7,8,11]
triangles[6] = [6,10,11]
triangles[7] = [5,9,10]
## Nearest neighbors along edges of cube
nearest_neighbors = {}
nearest_neighbors[0] = [1,3,4]
nearest_neighbors[1] = [0,2,5]
nearest_neighbors[2] = [1,3,6]
nearest_neighbors[3] = [0,2,7]
nearest_neighbors[4] = [0,5,7]
nearest_neighbors[5] = [1,4,6]
nearest_neighbors[6] = [2,5,7]
nearest_neighbors[7] = [3,4,6]
### For primitive with plane and triangle to determine where the plane is
### and where the triangle is
plane_tri_dict = {}
for entry in primitive_dict["1,0,0,0,1,0,1,0"].keys():
temp_array = fromstring(entry)
pos_idx = np.where(temp_array == 1)[0]
for idx in pos_idx:
plane = [idx]
nn = nearest_neighbors[idx]
for value in nn:
if value in pos_idx:
plane.append(value)
break
if len(plane) == 2:
tri_idx = np.setdiff1d(pos_idx, plane)
break
plane_tri_dict[entry] = plane
def get_volume(vertex_row, vert, edges):
"""
Algorithm is as follows:
1. Identify what the primitive shape should be
2. Rotate into original? No, can't do that easily
3. Calculate the volume correctly based on this primitive shape
Arguments
---------
vertex_row: array
Array of shape (8,) equal to a binary mask of all of the populated voxels
vert: array
Array of shape (8,3) for cartesian positions of the cube.
edges: array
Arry of shape (12,3) with edges, either normal or projected
"""
row_str = tostring(vertex_row)
found = False
primitive = []
not_value = False
for key,value in primitive_dict.items():
if row_str in value:
found = True
primitive = key
break
if not found:
for key,value in not_primitive_dict.items():
if row_str in value:
primitive = key
found = True
not_value = True
# raise Exception("Just need to use logical_not as primitive and then just do 1-final volume."+
# " {}".format(row_str))
## Now go back over to find other equivalent
vertex_row = np.logical_not(vertex_row).astype(int)
row_str = tostring(vertex_row)
found = False
for key,value in primitive_dict.items():
if row_str in value:
found = True
primitive = key
break
break
if found == False:
raise Exception("{}".format(row_str))
# print(row_str,primitive)
triangles_bool = tri_connectivity[row_str].astype(bool)
## Mask to get active vertices
active_vert = vert[vertex_row.astype(bool)]
if primitive == '0,0,0,0,0,0,0,0':
return 0
## One triangle
elif primitive == '1,0,0,0,0,0,0,0':
active_edges = edges[triangles_bool[0]]
shape_vert = np.vstack([active_edges,active_vert])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Plane
elif primitive == '1,0,0,0,1,0,0,0':
plane_edges_mask = np.logical_or(triangles_bool[0],triangles_bool[1])
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Double Triangle
elif primitive == '1,0,0,0,0,1,0,0':
tri_idx = np.where(vertex_row == 1)[0]
vol = 0
for idx in tri_idx:
## Get edge idx for known triangle orientations
tri_edges_idx = triangles[idx]
tri_edges = edges[tri_edges_idx]
## Get vert for this triangle
temp_active_vert = vert[idx]
temp_shape_vert = np.vstack([tri_edges,temp_active_vert])
try:
temp_vol = ConvexHull(temp_shape_vert).volume
vol += temp_vol
except:
pass
## Double Triangle body diagonal
elif primitive == '1,0,0,0,0,0,1,0':
tri_idx = np.where(vertex_row == 1)[0]
vol = 0
for idx in tri_idx:
## Get edge idx for known triangle orientations
tri_edges_idx = triangles[idx]
tri_edges = edges[tri_edges_idx]
## Get vert for this triangle
temp_active_vert = vert[idx]
temp_shape_vert = np.vstack([tri_edges,temp_active_vert])
try:
temp_vol = ConvexHull(temp_shape_vert).volume
vol += temp_vol
except:
pass
## Strange shape, but it can safely be evaluated
## Three bottom corners
elif primitive == '0,0,0,1,1,0,0,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## One plane one triangle
elif primitive == '1,0,0,0,1,0,1,0':
# raise Exception("HARD TO EVALUATE")
pos_idx = np.where(vertex_row == 1)[0]
plane_idx = plane_tri_dict[row_str]
tri_idx = np.setdiff1d(pos_idx, plane)[0]
## Get triangle shape
tri_edge_idx = triangles[tri_idx]
tri_edges = edges[tri_edge_idx]
tri_vert = vert[tri_idx]
tet_vert = np.vstack([tri_vert,tri_edges])
tri_vol = 0
try:
tri_vol = ConvexHull(tet_vert).volume
except:
pass
## Get plane shape
plane_vert = vert[plane_idx]
## Get edges manually
plane_only_row = np.zeros((8,))
plane_only_row[plane_idx] = 1
plane_edges_mask = tri_connectivity[tostring(plane_only_row.astype(int))
].astype(bool)
plane_edges_mask = np.logical_or(triangles_bool[0],triangles_bool[1])
plane_edges = edges[plane_edges_mask]
plane_vert = np.vstack([plane_vert,plane_edges])
plane_vol = 0
try:
plane_vol = ConvexHull(plane_vert).volume
except:
pass
return tri_vol + plane_vol
## Three Triangles
elif primitive == '0,1,0,0,1,0,1,0':
## Just iterate over triangles
tri_idx = np.where(vertex_row == 1)[0]
vol = 0
for idx in tri_idx:
## Get edge idx for known triangle orientations
tri_edges_idx = triangles[idx]
tri_edges = edges[tri_edges_idx]
## Get vert for this triangle
temp_active_vert = vert[idx]
temp_shape_vert = np.vstack([tri_edges,temp_active_vert])
try:
temp_vol = ConvexHull(temp_shape_vert).volume
vol += temp_vol
except:
pass
## Simple plane, can just cat together
elif primitive == '1,0,0,1,1,0,0,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Hexagon, just cat together
elif primitive == '1,0,1,1,0,0,0,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Double plane. I think everything can just be cat together
elif primitive == '1,1,0,0,0,0,1,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
## Using 1 minus because it will be the more common case for the
## moleucles
try:
vol = 1-ConvexHull(shape_vert).volume
except:
vol = 0
## Weird, but can just stick everything together
elif primitive == '1,0,0,1,0,0,1,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Weird, but can just stick everything together
elif primitive == '0,1,0,1,1,0,0,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
## Four triangles
elif primitive == '1,0,1,0,0,1,0,1':
## Just iterate over triangles
tri_idx = np.where(vertex_row == 1)[0]
vol = 0
for idx in tri_idx:
## Get edge idx for known triangle orientations
tri_edges_idx = triangles[idx]
tri_edges = edges[tri_edges_idx]
## Get vert for this triangle
temp_active_vert = vert[idx]
temp_shape_vert = np.vstack([tri_edges,temp_active_vert])
try:
temp_vol = ConvexHull(temp_shape_vert).volume
vol += temp_vol
except:
pass
elif primitive == '0,0,1,1,1,0,0,1':
## Just get all active edges
plane_edges_mask = np.sum(triangles_bool,axis=0).astype(bool)
plane_edges = edges[plane_edges_mask]
shape_vert = np.vstack([active_vert,plane_edges])
try:
vol = ConvexHull(shape_vert).volume
except:
vol = 0
else:
raise Exception("PRIMITIVE NOT FOUND for {}".format(primitive))
if not_value:
## First compute volume for entire cube
spacing = np.linalg.norm(vert[0] - vert[1])
cube_vol = spacing*spacing*spacing
return cube_vol - vol
else:
return vol
if __name__ == "__main__":
pass
|
{"hexsha": "e96d4aee45de7723967f618b9e373afe3dd49741", "size": 40845, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymove/molecules/marching_cubes_lookup.py", "max_stars_repo_name": "manny405/PyMoVE", "max_stars_repo_head_hexsha": "82045fa27b3bd31f2159d3ad72dc0a373c5e7b23", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-01-24T10:35:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T07:55:44.000Z", "max_issues_repo_path": "pymove/molecules/marching_cubes_lookup.py", "max_issues_repo_name": "manny405/PyMoVE", "max_issues_repo_head_hexsha": "82045fa27b3bd31f2159d3ad72dc0a373c5e7b23", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymove/molecules/marching_cubes_lookup.py", "max_forks_repo_name": "manny405/PyMoVE", "max_forks_repo_head_hexsha": "82045fa27b3bd31f2159d3ad72dc0a373c5e7b23", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-28T16:37:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-28T16:37:48.000Z", "avg_line_length": 32.99273021, "max_line_length": 202, "alphanum_fraction": 0.6005875872, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 11448}
|
#!/usr/bin/python2
# This script computes every artist's Snoop Dogg Number their shortest path to Snoop Dogg by
# performing a breadth-first traversal and computing the results in a single pass of the vertices.
# This method can only be applied to the unweighted graph.
#
# This script runs in O(|E|) as far as I know. If that's true, I expect it to run in linear time
# for my purposes because the music collaboration graphs I'm working with are sparse and will never
# come close to being fully connected. This script took 20 seconds to run on an i7-6700k.
import psycopg2
import networkx as nx
# Connect to the MusicBrainz database and load graph from disk
connection = psycopg2.connect(database="musicbrainz", user="musicbrainz", password="", host="musicbrainz", port="5432")
cursor = connection.cursor()
graph = nx.read_gexf("graph/sdn-unweighted.gexf")
# Prepare the database
cursor.execute("DROP TABLE IF EXISTS snoopdogg_number_bfs;")
cursor.execute("""
CREATE TABLE snoopdogg_number_bfs (
artist TEXT NOT NULL,
distance INTEGER NOT NULL,
path TEXT NOT NULL,
PRIMARY KEY(artist)
);
""")
# Initialize dictionary with the Snoop Dogg as the base case
# TODO: Create class for storing artists' SDN and path.
sdn = {"Snoop Dogg" : (0, ["Snoop Dogg"])}
# Traverse the graph breadth-first and compute every artist's Snoop Dogg Number in O(V + E)
for edge in nx.bfs_edges(graph, "Snoop Dogg"):
parent = edge[0]
child = edge[1]
dist_to_snoopdogg = sdn[parent][0] + 1
path_to_snoopdogg = sdn[parent][1] + [child]
sdn[child] = (dist_to_snoopdogg, path_to_snoopdogg)
# Insert the data via one long query - this is an order of magnitude faster than one query per row
data_string = ','.join(cursor.mogrify('(%s,%s,%s)', (artist, sdn[artist][0], sdn[artist][1])) for artist in sdn) # mogrify requires python2
cursor.execute('INSERT INTO snoopdogg_number_bfs VALUES ' + data_string)
# TODO: Run query that adds all the artists from "nodes" table that have no path to Snoop Dogg.
# Apply all changes to the database
connection.commit()
connection.close()
print("Done!")
|
{"hexsha": "d6d4963b67d8067d78bbbe32d026ae2a20bc60f7", "size": 2137, "ext": "py", "lang": "Python", "max_stars_repo_path": "compute_snoopdogg_number_bfs.py", "max_stars_repo_name": "basimr/SnoopDoggNumber", "max_stars_repo_head_hexsha": "9574ba6c611ecfac100e8dcccc87c3ec5e33ed76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-11-01T18:58:54.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-30T17:58:09.000Z", "max_issues_repo_path": "compute_snoopdogg_number_bfs.py", "max_issues_repo_name": "basimr/snoop-dogg-number", "max_issues_repo_head_hexsha": "9574ba6c611ecfac100e8dcccc87c3ec5e33ed76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compute_snoopdogg_number_bfs.py", "max_forks_repo_name": "basimr/snoop-dogg-number", "max_forks_repo_head_hexsha": "9574ba6c611ecfac100e8dcccc87c3ec5e33ed76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0961538462, "max_line_length": 139, "alphanum_fraction": 0.727655592, "include": true, "reason": "import networkx", "num_tokens": 562}
|
import logging
import warnings
import keras
import keras.backend as K
import numpy as np
def load_model(path):
return keras.models.load_model(
path,
custom_objects={
'OffsetAndScale': OffsetAndScale,
'_sigmoid2': _sigmoid2
}
)
def simple_model(data_x,
data_y,
structure,
hidden_activation,
output_activation,
learning_rate,
weight_decay,
momentum,
minibatch_size,
loss_function):
input_node = keras.layers.Input((data_x.shape[1],))
std = np.std(data_x, axis=0, ddof=1)
std[np.where(std == 0)] = 1
model = OffsetAndScale(
offset=-np.mean(data_x, axis=0),
scale=1.0/std
)(input_node)
for n in structure:
model = keras.layers.Dense(
units=n,
kernel_regularizer=keras.regularizers.l2(weight_decay)
)(model)
model = hidden_activation(model)
model = keras.layers.Dense(
units=data_y.shape[1],
kernel_regularizer=keras.regularizers.l2(weight_decay),
)(model)
if output_activation:
model = output_activation(model)
model = keras.models.Model(inputs=input_node, outputs=model)
compile_args = {
'optimizer': keras.optimizers.SGD(
lr=learning_rate,
momentum=momentum
),
'loss': loss_function
}
fit_args = {
'batch_size': minibatch_size,
'epochs': 1000,
'callbacks': [
ThresholdEarlyStopping(verbose=1, min_epochs=50),
],
'validation_split': 0.1,
}
return model, compile_args, fit_args, None
def _sigmoid2(x):
import sys
MAXEXP = np.log(sys.float_info.max)
return K.switch(
K.greater_equal(-2*x, MAXEXP),
0.0 * x,
1.0 / (1.0 + K.exp(-2*x))
)
Sigmoid2 = keras.layers.Activation(_sigmoid2)
def _config(layer, config):
base_config = super(layer.__class__, layer).get_config()
return dict(base_config.items() + config.items())
class OffsetAndScale(keras.layers.Layer):
""" (x + offset) * scale """
def __init__(self, offset, scale, **kwargs):
self.offset = offset
self.scale = scale
if isinstance(self.scale, dict) and self.scale['type'] == 'ndarray':
self.scale = np.array(self.scale['value']).astype('float32')
if isinstance(self.offset, dict) and self.offset['type'] == 'ndarray':
self.offset = np.array(self.offset['value']).astype('float32')
super(OffsetAndScale, self).__init__(**kwargs)
def call(self, x):
return (x + self.offset) * self.scale
def get_config(self):
return _config(self, {
'offset': self.offset,
'scale': self.scale
})
class ThresholdEarlyStopping(keras.callbacks.EarlyStopping):
def __init__(self, monitor='val_loss', min_epochs=10,
threshold=0.995, increase=1.75, verbose=0, mode='auto'):
super(ThresholdEarlyStopping, self).__init__(
monitor=monitor,
patience=min_epochs,
verbose=verbose,
mode=mode
)
self.threshold = threshold
self.increase = increase
def on_epoch_end(self, epoch, logs={}):
if epoch < self.patience:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current, self.best):
if self.monitor_op(current, self.threshold*self.best):
self.patience = max(self.patience, epoch * self.increase)
self.best = current
else:
if self.verbose > 0:
print('Epoch %05d: early stopping' % (epoch))
self.model.stop_training = True
|
{"hexsha": "ef16163d3331262ed99ec56ba6164c21d86a199c", "size": 4012, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_utils.py", "max_stars_repo_name": "SuperSourav/transliterationLangugageDetect", "max_stars_repo_head_hexsha": "df2a812ed9488daeb33b262679c4716a26e26cfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keras_utils.py", "max_issues_repo_name": "SuperSourav/transliterationLangugageDetect", "max_issues_repo_head_hexsha": "df2a812ed9488daeb33b262679c4716a26e26cfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:57:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:22:25.000Z", "max_forks_repo_path": "keras_utils.py", "max_forks_repo_name": "SuperSourav/transliterationLangugageDetect", "max_forks_repo_head_hexsha": "df2a812ed9488daeb33b262679c4716a26e26cfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7466666667, "max_line_length": 78, "alphanum_fraction": 0.5735294118, "include": true, "reason": "import numpy", "num_tokens": 898}
|
from sklearn.base import RegressorMixin
from ...predictors.predictor import DL85Predictor
import numpy as np
from math import floor, ceil
class DL85Regressor(DL85Predictor, RegressorMixin):
"""An optimal binary decision tree regressor.
Parameters
----------
max_depth : int, default=1
Maximum depth of the tree to be found
min_sup : int, default=1
Minimum number of examples per leaf
max_error : int, default=0
Maximum allowed error. Default value stands for no bound. If no tree can be found that is strictly better, the model remains empty.
stop_after_better : bool, default=False
A parameter used to indicate if the search will stop after finding a tree better than max_error
time_limit : int, default=0
Allocated time in second(s) for the search. Default value stands for no limit. The best tree found within the time limit is stored, if this tree is better than max_error.
verbose : bool, default=False
A parameter used to switch on/off the print of what happens during the search
desc : bool, default=False
A parameter used to indicate if the sorting of the items is done in descending order of information gain
asc : bool, default=False
A parameter used to indicate if the sorting of the items is done in ascending order of information gain
repeat_sort : bool, default=False
A parameter used to indicate whether the sorting of items is done at each level of the lattice or only before the search
print_output : bool, default=False
A parameter used to indicate if the search output will be printed or not
backup_error : str, default = "mse"
Error to optimize if no user error function is provided. Can be one of {"mse", "quantile"}
quantile_value: float, default = 0.5
Quantile value. Only used when backup_error is "quantile"
Attributes
----------
tree_ : str
Outputted tree in serialized form; remains empty as long as no model is learned.
size_ : int
The size of the outputted tree
depth_ : int
Depth of the found tree
error_ : float
Error of the found tree
accuracy_ : float
Accuracy of the found tree on training set
lattice_size_ : int
The number of nodes explored before found the optimal tree
runtime_ : float
Time of the optimal decision tree search
timeout_ : bool
Whether the search reached timeout or not
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(
self,
max_depth=1,
min_sup=1,
error_function=None,
max_error=0,
stop_after_better=False,
time_limit=0,
verbose=False,
desc=False,
asc=False,
repeat_sort=False,
leaf_value_function=None,
print_output=False,
backup_error="mse",
quantile_value=0.5,
quantile_estimation="linear",
):
if backup_error not in ["mse", "quantile"]:
raise ValueError(f"{backup_error} is not a valid error function string.")
DL85Predictor.__init__(
self,
max_depth=max_depth,
min_sup=min_sup,
error_function=error_function,
fast_error_function=None,
max_error=max_error,
stop_after_better=stop_after_better,
time_limit=time_limit,
verbose=verbose,
desc=desc,
asc=asc,
repeat_sort=repeat_sort,
leaf_value_function=leaf_value_function,
print_output=print_output,
backup_error=backup_error,
quantile_value=quantile_value,
quantile_estimation=quantile_estimation,
)
self.to_redefine = self.leaf_value_function is None
@staticmethod
def mean_leaf_value(tids, y):
return np.mean(y[list(tids)], axis=0)
@staticmethod
def quantile_linear_estimation(tids, y, q):
return np.quantile(y[list(tids)], q)
@staticmethod
def quantile_optimal_estimation(tids, y, q):
N = len(tids)
h = (N-1)*q
corrected_q = q if q == 0.5 else (floor(h)/(N-1) if q > 0.5 else ceil(h)/(N-1))
return np.quantile(y[list(tids)], corrected_q)
def fit(self, X, y):
"""Implements the standard fitting function for a DL8.5 regressor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples, n_predictions)
The training output samples.
Returns
-------
self : object
Returns self.
"""
if self.backup_error == "quantile":
idx = np.argsort(y)
else:
idx = np.arange(len(y))
X = X[idx]
y = y[idx]
if self.to_redefine:
if self.backup_error == "mse":
self.leaf_value_function = lambda tids: self.mean_leaf_value(tids, y)
elif self.backup_error == "quantile":
if self.quantile_estimation == "linear":
self.leaf_value_function = lambda tids: self.quantile_linear_estimation(tids, y, self.quantile_value)
elif self.quantile_estimation == "optimal":
self.leaf_value_function = lambda tids: self.quantile_optimal_estimation(tids, y, self.quantile_value)
# call fit method of the predictor
DL85Predictor.fit(self, X, y)
# Return the regressor
return self
def predict(self, X):
"""Implements the standard predict function for a DL8.5 regressor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted value for each sample is the mean of the closest samples seen during fit.
"""
return DL85Predictor.predict(self, X)
|
{"hexsha": "fd3b24209d117c4f77fe8b9662ceb76414dca280", "size": 6104, "ext": "py", "lang": "Python", "max_stars_repo_path": "dl85/supervised/regressors/regressor.py", "max_stars_repo_name": "valentinlemaire/pydl8.5", "max_stars_repo_head_hexsha": "a846f3c36bacbbe01ff87c31413342069b0cf61b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dl85/supervised/regressors/regressor.py", "max_issues_repo_name": "valentinlemaire/pydl8.5", "max_issues_repo_head_hexsha": "a846f3c36bacbbe01ff87c31413342069b0cf61b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dl85/supervised/regressors/regressor.py", "max_forks_repo_name": "valentinlemaire/pydl8.5", "max_forks_repo_head_hexsha": "a846f3c36bacbbe01ff87c31413342069b0cf61b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.488372093, "max_line_length": 178, "alphanum_fraction": 0.625, "include": true, "reason": "import numpy", "num_tokens": 1369}
|
import numpy as np
from collections import OrderedDict
from rlcard.envs import Env
from rlcard.games.blackjack import Game
DEFAULT_GAME_CONFIG = {
'game_num_players': 1,
}
class BlackjackEnv(Env):
''' Blackjack Environment
'''
def __init__(self, config):
''' Initialize the Blackjack environment
'''
self.name = 'blackjack'
self.default_game_config = DEFAULT_GAME_CONFIG
self.game = Game()
super().__init__(config)
self.rank2score = {"A":11, "2":2, "3":3, "4":4, "5":5, "6":6, "7":7, "8":8, "9":9, "T":10, "J":10, "Q":10, "K":10}
self.actions = ['hit', 'stand']
self.state_shape = [[2] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
def _get_legal_actions(self):
''' Get all leagal actions
Returns:
encoded_action_list (list): return encoded legal action list (from str to int)
'''
encoded_action_list = []
for i in range(len(self.actions)):
encoded_action_list.append(i)
return encoded_action_list
def _extract_state(self, state):
''' Extract the state representation from state dictionary for agent
Args:
state (dict): Original state from the game
Returns:
observation (list): combine the player's score and dealer's observable score for observation
'''
cards = state['state']
my_cards = cards[0]
dealer_cards = cards[1]
def get_scores_and_A(hand):
score = 0
has_a = 0
for card in hand:
score += self.rank2score[card[1:]]
if card[1] == 'A':
has_a = 1
if score > 21 and has_a == 1:
score -= 10
return score, has_a
my_score, _ = get_scores_and_A(my_cards)
dealer_score, _ = get_scores_and_A(dealer_cards)
obs = np.array([my_score, dealer_score])
legal_actions = OrderedDict({i: None for i in range(len(self.actions))})
extracted_state = {'obs': obs, 'legal_actions': legal_actions}
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in self.actions]
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoff of a game
Returns:
payoffs (list): list of payoffs
'''
payoffs = []
for i in range(self.num_players):
if self.game.winner['player' + str(i)] == 2:
payoffs.append(1) # Dealer bust or player get higher score than dealer
elif self.game.winner['player' + str(i)] == 1:
payoffs.append(0) # Dealer and player tie
else:
payoffs.append(-1) # Player bust or Dealer get higher score than player
return np.array(payoffs)
def _decode_action(self, action_id):
''' Decode the action for applying to the game
Args:
action id (int): action id
Returns:
action (str): action for the game
'''
return self.actions[action_id]
|
{"hexsha": "e0797a542775e26c25f5c7447276198bd122ffad", "size": 3269, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlcard/envs/blackjack.py", "max_stars_repo_name": "syntaxp/rlcard", "max_stars_repo_head_hexsha": "3dbccfd9a046f0ccb0996bc2bb83969fb553d024", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1735, "max_stars_repo_stars_event_min_datetime": "2019-09-05T12:49:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:02:07.000Z", "max_issues_repo_path": "rlcard/envs/blackjack.py", "max_issues_repo_name": "hsywhu/rlcard", "max_issues_repo_head_hexsha": "963cf6886dfaf5f089e9c8d0039a1dbff87aca6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 197, "max_issues_repo_issues_event_min_datetime": "2019-09-14T05:59:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T19:21:19.000Z", "max_forks_repo_path": "rlcard/envs/blackjack.py", "max_forks_repo_name": "hsywhu/rlcard", "max_forks_repo_head_hexsha": "963cf6886dfaf5f089e9c8d0039a1dbff87aca6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 476, "max_forks_repo_forks_event_min_datetime": "2019-09-13T15:25:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T01:41:29.000Z", "avg_line_length": 32.0490196078, "max_line_length": 122, "alphanum_fraction": 0.5757112267, "include": true, "reason": "import numpy", "num_tokens": 764}
|
'''
Defines a scalar field over a grid
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
'''
from typing import (List, TypeVar, Iterator, Union, Optional, # @UnusedImport
TYPE_CHECKING)
from pathlib import Path
import numpy as np
from .base import DataFieldBase
from ..grids import UnitGrid, CartesianGrid
from ..grids.base import GridBase
from ..tools.expressions import ScalarExpression
if TYPE_CHECKING:
from ..grids.boundaries.axes import BoundariesData # @UnusedImport
from .vectorial import VectorField # @UnusedImport
class ScalarField(DataFieldBase):
""" Single scalar field on a grid
Attributes:
grid (:class:`~pde.grids.GridBase`):
The underlying grid defining the discretization
data (:class:`np.ndarray`):
Scalar values at the support points of the grid
label (str):
Name of the field
"""
rank = 0
@classmethod
def from_expression(cls, grid: GridBase, expression: str,
label: str = None) -> "ScalarField":
""" create a scalar field on a grid from a given expression
Args:
grid (:class:`~pde.grids.GridBase`):
Grid defining the space on which this field is defined
expression (str):
Mathematical expression for the scalar value as a function of
the position on the grid. The expression may contain standard
mathematical functions and it may depend on the axes labels of
the grid.
label (str, optional):
Name of the field
"""
expr = ScalarExpression(expression=expression, signature=grid.axes)
points = {name: grid.cell_coords[..., i]
for i, name in enumerate(grid.axes)}
return cls(grid=grid, data=expr(**points), label=label)
@classmethod
def from_image(cls, path: Union[Path, str], bounds=None, periodic=False,
label: str = None) -> "ScalarField":
""" create a scalar field from an image
Args:
path (:class:`Path` or str): The path to the image
bounds (tuple, optional): Gives the coordinate range for each axis.
This should be two tuples of two numbers each, which mark the
lower and upper bound for each axis.
periodic (bool or list): Specifies which axes possess periodic
boundary conditions. This is either a list of booleans defining
periodicity for each individual axis or a single boolean value
specifying the same periodicity for all axes.
label (str, optional):
Name of the field
"""
from matplotlib.pyplot import imread
# read image and convert to grayscale
data = imread(path)
if data.ndim == 2:
pass # is already gray scale
elif data.ndim == 3:
# convert to gray scale using ITU-R 601-2 luma transform:
weights = np.array([0.299, 0.587, 0.114])
data = data[..., :3] @ weights
else:
raise RuntimeError(f'Image data has wrong shape: {data.shape}')
# transpose data to use mathematical conventions for axes
data = data.T[:, ::-1]
# determine the associated grid
if bounds is None:
grid: GridBase = UnitGrid(data.shape, periodic=periodic)
else:
grid = CartesianGrid(bounds, data.shape, periodic=periodic)
return cls(grid, data, label=label)
@DataFieldBase._data_flat.setter # type: ignore
def _data_flat(self, value):
""" set the data from a value from a collection """
self._data = value[0]
def laplace(self, bc: "BoundariesData",
out: Optional['ScalarField'] = None,
label: str = 'laplace') -> 'ScalarField':
""" apply Laplace operator and return result as a field
Args:
bc: Gives the boundary conditions applied to fields that are
required for calculating the Laplacian.
out (ScalarField, optional): Optional scalar field to which the
result is written.
label (str, optional): Name of the returned field
Returns:
ScalarField: the result of applying the operator
"""
if out is not None:
assert isinstance(out, ScalarField)
laplace = self.grid.get_operator('laplace', bc=bc)
return self.apply(laplace, out=out, label=label)
# def solve_poisson(self, out: Optional['ScalarField']=None,
# label: str="solution to Poisson's equation"):
# r""" solve Poisson's equation with the current field as inhomogeneity.
#
# Denoting the current field by :math:`x`, we thus solve for :math:`y`,
# defined by the equation
#
# .. math::
# \nabla^2 y(\boldsymbol r) = -x(\boldsymbol r)
#
#
# Args:
# out (ScalarField, optional): Optional scalar field to which the
# result is written.
# label (str, optional): Name of the returned field
#
# Returns:
# ScalarField: the result of applying the operator
# """
# solve_poisson = self.grid.get_operator('poisson_solver',
# bc='periodic')
# data = solve_poisson(self.data)
#
# if out is None:
# return ScalarField(self.grid, data, label=label)
# else:
# out.data = data
# if label:
# out.label = label
# return out
def gradient(self, bc: "BoundariesData",
out: Optional['VectorField'] = None,
label: str = 'gradient') -> 'VectorField':
""" apply gradient operator and return result as a field
Args:
bc: Gives the boundary conditions applied to fields that are
required for calculating the gradient.
out (VectorField, optional): Optional vector field to which the
result is written.
label (str, optional): Name of the returned field
Returns:
VectorField: the result of applying the operator
"""
from .vectorial import VectorField # @Reimport
gradient = self.grid.get_operator('gradient', bc=bc)
if out is None:
out = VectorField(self.grid, gradient(self.data), label=label)
else:
assert isinstance(out, VectorField)
gradient(self.data, out=out.data)
return out
@property
def integral(self) -> float:
""" float: integral of the scalar field over space """
return self.grid.integrate(self.data)
def to_scalar(self, scalar: Union[str, int] = 'abs',
label: Optional[str] = None) -> "ScalarField":
""" return a modified scalar field by applying `method`
Args:
scalar (str or int): For scalar fields, only `abs` is supported.
label (str, optional): Name of the returned field
Returns:
ScalarField: the scalar result
"""
if scalar == 'abs' or scalar == 'norm':
data = np.abs(self.data)
elif scalar == 'squared_sum':
data = self.data**2
else:
raise ValueError(f'Unknown method `{scalar}` for `to_scalar`')
return ScalarField(grid=self.grid, data=data, label=label)
|
{"hexsha": "9fb50e626652d8614f026e5c5f095182282b0847", "size": 7840, "ext": "py", "lang": "Python", "max_stars_repo_path": "pde/fields/scalar.py", "max_stars_repo_name": "xuanxu/py-pde", "max_stars_repo_head_hexsha": "de33d938aea8680eff872ae1b64569895662a248", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pde/fields/scalar.py", "max_issues_repo_name": "xuanxu/py-pde", "max_issues_repo_head_hexsha": "de33d938aea8680eff872ae1b64569895662a248", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pde/fields/scalar.py", "max_forks_repo_name": "xuanxu/py-pde", "max_forks_repo_head_hexsha": "de33d938aea8680eff872ae1b64569895662a248", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6355140187, "max_line_length": 80, "alphanum_fraction": 0.5646683673, "include": true, "reason": "import numpy", "num_tokens": 1642}
|
import json
import numpy as np
import tables
import os
import pandas as pd
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPainter, QPen
from PyQt5.QtWidgets import QApplication, QMessageBox
from tierpsy.gui.SWTrackerViewer_ui import Ui_SWTrackerViewer
from tierpsy.gui.TrackerViewerAux import TrackerViewerAuxGUI
from tierpsy.analysis.int_ske_orient.correctHeadTailIntensity import createBlocks, _fuseOverlapingGroups
class EggWriter():
def __init__(self):
self.fname = os.path.expanduser(os.path.join('~', 'Desktop', 'egg_events_raw.txt'))
def add(self, vfilename, frame_number):
if vfilename is not None:
base_name = os.path.splitext(os.path.basename(vfilename))[0]
line = '\n{}\t{}'.format(base_name, frame_number)
with open(self.fname, 'a+') as fid:
fid.write(line)
def tag_bad(self):
with open(self.fname, 'a+') as fid:
fid.write('X')
def export(self):
if not os.path.exists(self.fname):
return
tab = pd.read_table(self.fname, header=None)
tab.columns = ['base_name', 'frame_number']
tab_g = tab.groupby('base_name')
fexport= os.path.expanduser(os.path.join('~', 'Desktop', 'egg_events.tsv'))
with open(fexport, 'w') as fid:
for base_name, dat in tab_g:
frame_numbers = []
for f in dat['frame_number'].values:
try:
frame_numbers.append(int(f))
except:
pass
if frame_numbers:
frame_numbers = sorted(set(frame_numbers))
line = '\t'.join([base_name] + list(map(str, frame_numbers))) + '\n'
fid.write(line)
class SWTrackerViewer_GUI(TrackerViewerAuxGUI):
def __init__(self, ui='', mask_file=''):
if not ui:
super().__init__(Ui_SWTrackerViewer())
else:
super().__init__(ui)
self.setWindowTitle("Single Worm Viewer")
self.skel_block = []
self.skel_block_n = 0
self.is_stage_move = []
self.is_feat_file = False
self.ui.spinBox_skelBlock.valueChanged.connect(self.changeSkelBlock)
self.ui.checkBox_showLabel.stateChanged.connect(self.updateImage)
if mask_file:
self.vfilename = mask_file
self.updateVideoFile()
self.egg_writer = EggWriter()
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename, possible_ext = ['_skeletons.hdf5', '_features.hdf5'])
self.updateImage()
# change frame number using the keys
def keyPressEvent(self, event):
# go the previous block
if event.key() == Qt.Key_BracketLeft:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n - 1)
# go to the next block
elif event.key() == Qt.Key_BracketRight:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n + 1)
elif event.key() == Qt.Key_Semicolon:
if self.ui.checkBox_showLabel.isChecked():
self.ui.checkBox_showLabel.setChecked(0)
else:
self.ui.checkBox_showLabel.setChecked(1)
elif event.key() == Qt.Key_E:
self.egg_writer.add(self.vfilename, self.frame_number)
elif event.key() == Qt.Key_X:
self.egg_writer.tag_bad()
super().keyPressEvent(event)
def updateSkelFile(self, skel_file, dflt_skel_size = 10):
super().updateSkelFile(skel_file)
self.ui.spinBox_skelBlock.setMaximum(max(len(self.skel_block) - 1, 0))
self.ui.spinBox_skelBlock.setMinimum(0)
if self.skel_block_n != 0:
self.skel_block_n = 0
self.ui.spinBox_skelBlock.setValue(0)
else:
self.changeSkelBlock(0)
self.skel_block = []
self.is_stage_move = []
self.is_feat_file = False
VALID_ERRORS = (IOError, KeyError, tables.exceptions.HDF5ExtError, tables.exceptions.NoSuchNodeError)
#try to read the information from the features file if possible
if not self.trajectories_data is None:
try:
# I am reading an skeleton file, so there is information about the intensity blocks
with tables.File(self.skeletons_file, 'r') as fid:
#only used for skeletons, and to test the head/tail orientation. I leave it but probably should be removed for in the future
prov_str = fid.get_node('/provenance_tracking/INT_SKE_ORIENT').read()
func_arg_str = json.loads(
prov_str.decode("utf-8"))['func_arguments']
gap_size = json.loads(func_arg_str)['gap_size']
good = (self.trajectories_data['int_map_id'] > 0).values
has_skel_group = createBlocks(good, min_block_size=0)
if len(has_skel_group) > 0:
self.skel_block = _fuseOverlapingGroups(
has_skel_group, gap_size=gap_size)
except VALID_ERRORS:
self.skel_block = []
else:
try:
if self.stage_position_pix is None:
if '/stage_position_pix' in self.fid:
self.stage_position_pix = self.fid.get_node('/stage_position_pix')[:]
else:
n_frames = self.fid.get_node('/mask').shape[0]
self.stage_position_pix = np.full((n_frames,2), np.nan)
timestamp = self.fid.get_node('/timestamp/raw')[:]
with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:
#this could be better so I do not have to load everything into memory, but this is faster
self.trajectories_data = ske_file_id['/features_timeseries']
if self.trajectories_data['worm_index'].unique().size !=1:
QMessageBox.critical(
self,
'',
"There is more than one worm index. This file does not seem to have been analyzed with the WT2 option.",
QMessageBox.Ok
)
raise KeyError()
good = self.trajectories_data['timestamp'].isin(timestamp)
self.trajectories_data = self.trajectories_data[good]
self.trajectories_data.sort_values(by='timestamp', inplace=True)
if np.any(self.trajectories_data['timestamp'] < 0) or np.any(self.trajectories_data['timestamp'].isnull()):
QMessageBox.critical(
self,
'',
'There are invalid values in the timestamp. I cannot get the stage movement information.',
QMessageBox.Ok)
raise KeyError()
first_frame = np.where(timestamp==self.trajectories_data['timestamp'].min())[0][0]
last_frame = np.where(timestamp==self.trajectories_data['timestamp'].max())[0][0]
self.trajectories_data['frame_number'] = np.arange(first_frame, last_frame+1, dtype=np.int)
self.trajectories_data['skeleton_id'] = self.trajectories_data.index
self.traj_time_grouped = self.trajectories_data.groupby('frame_number')
self.is_feat_file = True
except VALID_ERRORS:
self.trajectories_data = None
self.traj_time_grouped = None
self.is_feat_file = False
if self.stage_position_pix is not None:
self.is_stage_move = np.isnan(self.stage_position_pix[:, 0])
self.updateImage()
def drawSkelSingleWorm(self):
frame_data = self.getFrameData(self.frame_number)
if frame_data is None:
return
row_data = frame_data.squeeze()
#for this viewer there must be only one particle per frame
if len(row_data) == 0:
return
isDrawSkel = self.ui.checkBox_showLabel.isChecked()
skel_id = int(row_data['skeleton_id'])
if isDrawSkel and skel_id >= 0:
self.frame_qimg = self.drawSkelResult(self.frame_img, self.frame_qimg, row_data, isDrawSkel)
return self.frame_qimg
def updateImage(self):
self.readCurrentFrame()
self.drawSkelSingleWorm()
#draw stage movement if necessary
if len(self.is_stage_move) > 0 and self.is_stage_move[self.frame_number]:
self.frame_qimg = self._drawRect(self.frame_qimg)
self.mainImage.setPixmap(self.frame_qimg)
def _drawRect(self, qimg):
painter = QPainter()
painter.begin(qimg)
pen = QPen()
pen_width = 3
pen.setWidth(pen_width)
pen.setColor(Qt.red)
painter.setPen(pen)
dw = qimg.width() - pen_width
dh = qimg.height() - pen_width
painter.drawRect(
1,
1,
dw,
dh)
painter.end()
return qimg
def changeSkelBlock(self, val):
self.skel_block_n = val
if len(self.skel_block) > 0:
self.ui.label_skelBlock.setText(
'Block limits: %i-%i' %
(self.skel_block[
self.skel_block_n]))
# move to the frame where the block starts
self.ui.spinBox_frame.setValue(
self.skel_block[self.skel_block_n][0])
else:
self.ui.label_skelBlock.setText('')
def closeEvent(self, event):
self.egg_writer.export()
super().closeEvent(event)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
ui = SWTrackerViewer_GUI()
ui.show()
sys.exit(app.exec_())
|
{"hexsha": "dbbbcdfcc308ac5467521248c6cc9046a7149e43", "size": 10137, "ext": "py", "lang": "Python", "max_stars_repo_path": "tierpsy/gui/SWTrackerViewer.py", "max_stars_repo_name": "mgh17/tierpsy-tracker", "max_stars_repo_head_hexsha": "a18c06aa80a5fb22fd51563d82c639b520742777", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-01-11T10:49:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T15:48:00.000Z", "max_issues_repo_path": "tierpsy/gui/SWTrackerViewer.py", "max_issues_repo_name": "mgh17/tierpsy-tracker", "max_issues_repo_head_hexsha": "a18c06aa80a5fb22fd51563d82c639b520742777", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-05-08T15:43:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T10:19:24.000Z", "max_forks_repo_path": "tierpsy/gui/SWTrackerViewer.py", "max_forks_repo_name": "mgh17/tierpsy-tracker", "max_forks_repo_head_hexsha": "a18c06aa80a5fb22fd51563d82c639b520742777", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-12-18T12:10:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T09:12:47.000Z", "avg_line_length": 37.2683823529, "max_line_length": 144, "alphanum_fraction": 0.5733451712, "include": true, "reason": "import numpy", "num_tokens": 2159}
|
'''
Created on Mar 30, 2015
@author: Ming Jiang and Jean-Luc Starck
CLASS FUNCTION class starlet2d()
Allow to perform a starlet transform, manipulate it (visualisation, thresholding, statistics, etc),
and to reconstruct.
If pysap is installed, then the pysparse module should be available and
the code will used C++ binding for fast calculation.
Otherwise full python code is used.
Details of the starlet transform can be found in
J.L. Starck, F. Murtagh, and J. Fadili,
Sparse Image and Signal Processing: Wavelets and
Related Geometric Multiscale Analysis,
Cambridge University Press, Cambridge (GB), 2016.
or
J.-L. Starck, J. Fadili and F. Murtagh,
"The Undecimated Wavelet Decomposition and its Reconstruction",
IEEE Transaction on Signal Processing , 16, 2, pp 297--309, 2007.
Example how to use the Class:
CW = starlet2d() # Create the class
CW.transform(Image) # Starlet transform of a 2D np array
CW.stat() # print statistics of all scales
r = CW.recons() # reconstruct an image from its coefficients
more examples are given at the end of this file.
'''
import numpy as np
import scipy.signal as psg
# import pcosmostat.sparsity.sparse2d.param as pm
from pycs.misc.cosmostat_init import *
from pycs.misc.stats import *
import sys
import imp
PYSAP_CXX = True
try:
import pysparse
# imp.find_module('pysparse')
except ImportError:
PYSAP_CXX=False
#if 'pysparse' in sys.modules:
# import pysparse
# PYSAP_CXX = True
if PYSAP_CXX is False:
print("Warning in starlet.py: do not find pysap bindings ==> use slow python code. ")
#print("PYSAP_CXX = ", PYSAP_CXX)
def test_ind(ind,N):
"""
function to handle the border using a mirror effect.
If the index is < 0 or >= N, where N is the size of image in one direction,
it returns the correct index in [0,N-1], using mirror effect.
Parameters
----------
ind : TYPE
DESCRIPTION.
N : TYPE
DESCRIPTION.
Returns
-------
res : TYPE
DESCRIPTION.
"""
res = ind
if ind < 0 :
res = -ind
if res >= N:
res = 2*N - 2 - ind
if ind >= N :
res = 2*N - 2 - ind
if res < 0:
res = -ind
return res
def b3splineTrans(im_in,step):
"""
Apply a 2d B-spline smmothing to an image, using holes in the smoothing
kernel (a-trous algorithm)
Parameters
----------
im_in : np.ndarray
input image.
step : int
the hole size.
Returns
-------
im_out : 2D np.ndarray
smoothed image.
"""
(nx,ny) = np.shape(im_in)
im_out = np.zeros((nx,ny))
c1 = 1./16
c2 = 1./4
c3 = 3./8
buff = np.zeros((nx,ny))
for i in np.arange(nx):
for j in np.arange(ny):
jl = test_ind(j-step,ny)
jr = test_ind(j+step,ny)
jl2 = test_ind(j-2*step,ny)
jr2 = test_ind(j+2*step,ny)
buff[i,j] = c3 * im_in[i,j] + c2 * (im_in[i,jl] + im_in[i,jr]) + c1 * (im_in[i,jl2] + im_in[i,jr2])
for j in np.arange(ny):
for i in np.arange(nx):
il = test_ind(i-step,nx)
ir = test_ind(i+step,nx)
il2 = test_ind(i-2*step,nx)
ir2 = test_ind(i+2*step,nx)
im_out[i,j] = c3 * buff[i,j] + c2 * (buff[il,j] + buff[ir,j]) + c1 * (buff[il2,j] + buff[ir2,j])
return im_out
def b3spline_fast(step):
"""
Kernel computation for fast smoothing using convolve2d function
Parameters
----------
step : TYPE
the hole size.
Returns
-------
kernel2d : 2D np.ndarray
calculated kernel.
"""
step_hole = int(step)
c1 = 1./16.
c2 = 1./4.
c3 = 3./8.
length = int(4*step_hole+1)
kernel1d = np.zeros((1,length))
kernel1d[0,0] = c1
kernel1d[0,-1] = c1
kernel1d[0,step_hole] = c2
kernel1d[0,-1-step_hole] = c2
kernel1d[0,2*step_hole] = c3
kernel2d = np.dot(kernel1d.T,kernel1d)
return kernel2d
def star2d(im, scale, gen2=False, bord=1, nb_procs=0, fast=True, verb=0):
"""
Routie to calculate the 1st and 2nd generation starlet transform.
if the global variable PYSAP_CXX is True, a C++ code will be used through
binding for this calculation.
Parameters
----------
im : 2D np.ndarray
input image.
scale : int.
number of scales.
gen2 : bool, optional
if True, performs the second generation starlet transform
bord : int, optional
Type of border used to handle the border effect. The default is 1.
this parameter is only used if the C++ pysap code is available.
nb_procs : int, optional
Numper of preocessor to use. Only used if the C++ pysap code is available
and if openmp is available. The default is 0.
fast : bool, optional
for python implementation only. If true, the convolve2d routine is used,
which is faster. The default is True.
verb : bool, optional
Verbose mode. The default is 0.
Returns
-------
3D np.ndarray
output wavelet transform [0:scale,0:nx,0:ny]
"""
# print ('IN STAR2D 2')
(nx,ny) = np.shape(im)
nz = scale
# Normalized transfromation
if PYSAP_CXX is True:
# print("BINDING: ", head, ", norm = ", l2norm)
# verb=1
ima = np.zeros((nx,ny))
ima[:,:]=im
psWT = pysparse.MRStarlet(bord, gen2, nb_procs,verb)
wl = psWT.transform(ima.astype(np.float),nz)
wt = (np.stack(wl)).astype(np.double)
else:
wt = np.zeros((nz,nx,ny))
step_hole = int(1)
im_in = np.copy(im)
for i in np.arange(nz-1):
if fast:
kernel2d = b3spline_fast(step_hole)
im_out = psg.convolve2d(im_in, kernel2d, boundary='symm',mode='same')
else:
im_out = b3splineTrans(im_in,step_hole)
if gen2:
if fast:
im_aux = psg.convolve2d(im_out, kernel2d, boundary='symm',mode='same')
else:
im_aux = b3splineTrans(im_out,step_hole)
wt[i,:,:] = im_in - im_aux
else:
wt[i,:,:] = im_in - im_out
im_in = np.copy(im_out)
step_hole *= 2
wt[nz-1,:,:] = np.copy(im_out)
return wt
def istar2d(wt, gen2=True, bord=0, nb_procs=0, fast=True, verb=0):
"""
Routie to calculate the 1st and 2nd generation incerse starlet transform.
if the global variable PYSAP_CXX is True, a C++ code will be used through
binding for this calculation.
Parameters
----------
wt : 3D np.ndarray
input wavelet transform.
gen2 : bool, optional
if True. assume the second generation starlet reconstruction.
bord : int, optional
Type of border used to handle the border effect. The default is 1.
this parameter is only used if the C++ pysap code is available.
nb_procs : int, optional
Numper of preocessor to use. Only used if the C++ pysap code is available
and if openmp is available. The default is 0.
fast : bool, optional
for python implementation only. If true, the convolve2d routine is used,
which is faster. The default is True.
verb : bool, optional
Verbose mode. The default is 0.
Returns
-------
2D np.ndarray:
Reconstructed image
"""
(nz,nx,ny) = np.shape(wt)
# PYSAP_CXX=0
if PYSAP_CXX is True:
# print("RECBINDING: ", head, ", norm = ", l2norm)
dat_list = []
for s in range(nz):
dat_list.append( wt[s,:,:].astype(np.float))
psWT = pysparse.MRStarlet(bord, gen2, nb_procs,verb)
imRec = (psWT.recons(dat_list)).astype(np.double)
else:
# trans = 1 if gen2 else 2
if gen2:
'''
h' = h, g' = Dirac
'''
step_hole = int(pow(2,nz-2))
imRec = np.copy(wt[nz-1,:,:])
for k in np.arange(nz-2,-1,-1):
if fast:
kernel2d = b3spline_fast(step_hole)
im_out = psg.convolve2d(imRec, kernel2d, boundary='symm',mode='same')
else:
im_out = b3splineTrans(imRec,step_hole)
imRec = im_out + wt[k,:,:]
step_hole /= 2
else:
'''
h' = Dirac, g' = Dirac
'''
# imRec = np.sum(wt,axis=0)
'''
h' = h, g' = Dirac + h
'''
imRec = np.copy(wt[nz-1,:,:])
step_hole = int(pow(2,nz-2))
for k in np.arange(nz-2,-1,-1):
if fast:
kernel2d = b3spline_fast(step_hole)
imRec = psg.convolve2d(imRec, kernel2d, boundary='symm',mode='same')
im_out = psg.convolve2d(wt[k,:,:], kernel2d, boundary='symm',mode='same')
else:
imRec = b3splineTrans(imRec,step_hole)
im_out = b3splineTrans(wt[k,:,:],step_hole)
imRec += wt[k,:,:]+im_out
step_hole /= 2
return imRec
def adstar2d(wtOri, gen2=True, bord=0, nb_procs=0, fast=True, verb=0):
"""
Routine to calculate the 1st and 2nd generation adjoint starlet operator.
if the global variable PYSAP_CXX is True, a C++ code will be used through
binding for this calculation. This routine is generally used when the gradient
of a functional involving a starlet transform operator is required.
Parameters
----------
wtOri : 3D np.ndarray
input wavelet transform.
gen2 : bool, optional
if True. assume the second generation starlet reconstruction.
bord : int, optional
Type of border used to handle the border effect. The default is 1.
this parameter is only used if the C++ pysap code is available.
nb_procs : int, optional
Numper of preocessor to use. Only used if the C++ pysap code is available
and if openmp is available. The default is 0.
fast : bool, optional
for python implementation only. If true, the convolve2d routine is used,
which is faster. The default is True.
verb : bool, optional
Verbose mode. The default is 0.
Returns
-------
2D np.ndarray:
Reconstructed image
"""
(nz,nx,ny) = np.shape(wtOri)
wt = np.copy(wtOri)
if PYSAP_CXX is True:
# print("BINDING")
dat_list = []
for s in range(nz):
dat_list.append((wt[s,:,:]).astype(float))
psWT = pysparse.MRStarlet(bord, gen2, nb_procs, verb)
imRec = (psWT.recons(dat_list,True)).astype(double)
else:
# print("NO BINDING")
# Unnormalization step
# !Attention: wt is not the original wt after unnormalization
imRec = np.copy(wt[nz-1,:,:])
step_hole = pow(2,nz-2)
for k in np.arange(nz-2,-1,-1):
if fast:
kernel2d = b3spline_fast(step_hole)
imRec = psg.convolve2d(imRec, kernel2d, boundary='symm',mode='same')
im_out = psg.convolve2d(wt[k,:,:], kernel2d, boundary='symm',mode='same')
if gen2:
im_out2 = psg.convolve2d(im_out, kernel2d, boundary='symm',mode='same')
imRec += wt[k,:,:] -im_out2
else: imRec += wt[k,:,:] -im_out
else:
imRec = b3splineTrans(imRec,step_hole)
im_out = b3splineTrans(wt[k,:,:],step_hole)
if gen2:
im_out2 = b3splineTrans(im_out,step_hole)
imRec += wt[k,:,:] -im_out2
else: imRec += wt[k,:,:]-im_out
step_hole /= 2
return imRec
#==========================================================================
#======================= Beginning of the STARLET CLASS ==================
#==========================================================================
class starlet2d():
"""
Class for the starlet decomposition and reconstruction
"""
name = "wt" # name of the class
gen2 = True # if true, it will the second genereal starlet transform
l2norm=False # if true, consider a l2 normalisation
nx=0 # image size first axis
ny=0 # image size second axis
ns=0 # number of scales
coef=0. # Starlet coefficients
TabNorm=0. # Coefficient normalixation table
SigmaNoise = 1. # noise standard deviation
TabNsigma = 0 # detection level per scale
Starlet_Gen1TabNorm =0 # Normalization table for the first generation starlet transform
# __init__ is the constructor
def __init__(self, name='wt', gen2=True,l2norm=True, bord=1, verb=False, nb_procs=0):
"""
Constructor
Parameters
----------
name : string, optional
name of transform. Used when information is printed. The default is 'wt'.
gen2 : bool, optional
if True. assume the second generation starlet reconstruction.
l2norm : bool, optional
if True, assume a l2 normalisation of the wavelet coefficients.
bord : int, optional
Type of border used to handle the border effect. The default is 1.
this parameter is only used if the C++ pysap code is available.
# case 0: bord = I_ZERO;
# case 1: bord = I_CONT;
# case 2: bord = I_MIRROR;
# case 3: bord = I_PERIOD;
nb_procs : int, optional
Numper of preocessor to use. Only used if the C++ pysap code is available
and if openmp is available. The default is 0.
verb : bool, optional
Returns
-------
None.
"""
self.name = name # self.name is an object variable
self.gen2=gen2
self.l2norm=l2norm
self.verb=verb
self.nb_procs=nb_procs
self.bord=bord
def get_gen1_starlet_tabnorm(self):
"""
Compute the normalisation coefficients at each scale of the firast generation
starlet transform.
Returns
-------
tabNs : TYPE
DESCRIPTION.
"""
im = np.zeros((self.nx,self.ny))
im = im.astype('float64')
im[int(self.nx/2),int(self.ny/2)] = np.float64(1.)
wt = star2d(im,self.ns,gen2=False)
tmp = wt**2
tabNs = np.sqrt(np.sum(np.sum(tmp,1),1))
return tabNs
def init_starlet(self, nx, ny, nscale=0):
"""
Initialize the scale for a given image size and a number of scales.
Parameters
----------
nx, ny : int
image size.
nscale : int, optional
Number of wavelet scales. The default is 0.
If it is 0, the numnber of scales is fixed to
log( MIN([nx,ny]))
Returns
-------
None.
"""
self.nx = int(nx)
self.ny = int(ny)
if nscale == 0:
mins = np.min( [nx,ny])
nscale = int(np.log(mins) // 1)
self.ns = int(nscale)
self.Starlet_Gen1TabNorm = self.get_gen1_starlet_tabnorm()
if self.l2norm:
self.TabNorm = np.ones(self.ns, dtype=float)
else:
self.TabNorm = self.get_gen1_starlet_tabnorm()
# for pysparse
self.nb_procs=0
def info(self): # sound is a method (a method is a function of an object)
"""
Print information relative to the intialisation.
"""
print(self.name, ": Nx = ", self.nx, ", Ny = ", self.ny, ", Ns = ", self.ns)
if self.gen2:
print("starlet 2nd generation")
else:
print("starlet 1st generation")
if self.l2norm:
print("l2 normalisation")
else:
print("l1 normalisation")
# print("Coef TabSize = ", np.shape(self.coef))
def stat(self):
"""
Print Min, Max, Mean and standard deviation of all scales.
Parameters
----------
None.
Returns
-------
None.
"""
print(self.name, ": Nx = ", self.nx, ", Ny = ", self.ny, ", Ns = ", self.ns)
for j in range(self.ns):
s = (self.coef)[j]
print("%s Scale %2d: Min = %f, Max = %f, Mean = %f, std = %f" % (self.name, j+1,s.min(), s.max(), s.mean(), s.std()))
# def transform(im,nscale,gen2=self.gen2,normalization=self.l2norm):
def transform(self, im, WTname=None):
"""
Apply the starlet transform to image. Coeffients are stored in
self.coef[:,:,:]. self.coef[s,:,:] is the wavelet scale at scale s.
See class routines get_scale, get_ptr_scale, put_scale to manipulate
the coefficients.
Parameters
----------
im : 2D np.ndarray
input image..
WTname : string, optional
Name given to the decomposition. The default is None.
Returns
-------
None.
"""
(Nx,Ny) = im.shape
if self.ns <=1 or self.nx != Nx or self.ny != Ny :
self.init_starlet(Nx, Ny, nscale=0)
if WTname is not None:
self.name = WTname
self.coef = star2d(im, self.ns, self.gen2, self.bord, self.nb_procs, True, self.verb)
if self.l2norm:
for i in np.arange(self.ns):
self.coef[i,:,:] /= self.Starlet_Gen1TabNorm[i]
def recons(self, adjoint=False):
"""
Reconstruct an image from its calculated starlet coefficients.
Parameters
----------
adjoint : bool, optional
If true, used the adjoint operator instead of the exact reconstruction one.
The default is False.
Returns
-------
rec : 2D np.ndarray
Reconstructed image.
"""
wt = np.copy(self.coef)
if self.l2norm:
for i in np.arange(self.ns):
wt[i,:,:] *= self.Starlet_Gen1TabNorm[i]
if adjoint:
rec = adstar2d(wt,gen2=self.gen2,bord=self.bord, nb_procs=self.nb_procs, fast=True, verb=self.verb)
else:
rec = istar2d(wt, gen2=self.gen2,bord=self.bord, nb_procs=self.nb_procs, fast=True, verb=self.verb)
return rec
def denoising(self, Image, SigmaNoise=0, Nsigma=3,ThresCoarse=False, hard=True):
"""
Do a denoising of the input image, by taking the wavelet decomposition,
thresholding it, and reconstructing the denoised image.
Parameters
----------
Image : 2D np.ndarray
DESCRIPTION.
SigmaNoise : float, optional
Standard deviation of the noise. Default is 0.
Nsigma: float, optional
Detection level. Defautl is 3 (.e. 3 SigmaNoise).
ThresCoarse : bool, optional
IF true the coarsest scale is removed. The default is False.
hard : bool, optional
Type of threshold, true for hard thresholding and false
for soft thresholding. The default is True.
Returns
-------
2D np.ndarray
Denoised image.
"""
if SigmaNoise == 0:
SigmaNoise = get_noise(Image)
self.SigmaNoise = SigmaNoise
self.transform(Image)
self.threshold(SigmaNoise=SigmaNoise, Nsigma=Nsigma, ThresCoarse=ThresCoarse, hard=hard)
return self.recons()
def pos_transform(self,im, nscale=0, Niter=100,fast=True,hard=False,den=False, KillCoarse=False, pos=True, SigmaNoise=0, Nsigma=3.,verb=False):
"""
Iterative method to make a decomposition on positive coefficients.
Coeffients are stored in self.coef[:,:,:].
See class routines get_scale, get_ptr_scale, put_scale to manipulate
the coefficients.
Parameters
----------
----------
im : 2D np.ndarray
input image.
hard : bool, optional
if True, use hard thresholding, and soft thresholding otherwise.
Default is False
den : bool, optional
if true, denoise also the coefficeints. Default is False
KillCoarse : bool, optional
IF true the coarsest scale is removed. The default is False.
fast : bool, optional
for python implementation only. If true, the convolve2d routine is used,
which is faster. The default is True.
pos: bool, optional
it true, keep only positive wavelet coefficients. Default is True.
SigmaNoise: float, optional
Standard deviation of the noise. Default is 0.
Nsigma: float, optional
Detection level. Defautl is 3 (.e. 3 SigmaNoise).
verb : bool, optional
Verbose mode. The default is 0.
Raises
------
ValueError
Can only be used if the number of scales > 1
Returns
-------
None.
"""
self.l2norm=True
(Nx,Ny) = im.shape
self.init_starlet(Nx, Ny, nscale=self.ns)
if self.ns <= 1:
raise ValueError('Number of scales must be > 1 ! '
'Input value = {} and is of type {}.'.format(nscale, type(nscale)))
rsd = np.copy(im)
self.transform(im)
mwt = self.coef.max()
# wt = np.copy(self.coef)
wt = np.zeros((self.ns,self.nx,self.ny))
for it in np.arange(Niter):
ld = mwt * (1. - (it+1.)/Niter)
if ld < 0:
ld = 0
if verb:
print ("Iter ", it, ": lambda="+str(ld), ", Resi = ", np.std(rsd))
self.transform(rsd)
wt += self.coef
if den:
if SigmaNoise != 0:
noise = mad(wt[0])
else:
noise = SigmaNoise
# print(noise)
hard_thresholding(wt,Nsigma*noise)
if hard:
hard_thresholding(wt,ld)
else:
soft_thresholding(wt,ld)
if pos is True:
wt[wt<0] = 0
if KillCoarse is True:
wt[self.ns-1,:,:] = 0
self.coef = np.copy(wt)
rec = self.recons()
# print (rec>=0).all()
rsd = im - rec
# fits.writeto('pstar2d'+str(it)+'.fits',rsd,clobber=True)
# print ((np.abs(rsd)).sum())
def get_scale(self, j):
"""
Return a copy of a given scale of the decomposition.
Parameters
----------
j : int
Scale number. It must be in [0:self.ns]
Returns
-------
Scale : 2D np.ndarray
jth wavelet scale of the decomposition.
"""
Scale = np.zeros((self.nx,self.ny))
Scale[:,:]=(self.coef)[j,:,:]
return Scale
def get_ptr_scale(self, j):
"""
Return a pointer to the jth scale. Modifying the return array will
impact the coefficients self.coef of the class.
Parameters
----------
j : int
Scale number. It must be in [0:self.ns]
Returns
-------
Scale : 2D np.ndarray
jth wavelet scale of the decomposition.
"""
return (self.coef)[j]
def put_scale(self, ScaleCoef, j):
"""
Replace the scale j in self.coef by the 2D array ScaleCoef.
Parameters
----------
ScaleCoef : 2D np.ndarray
New coefficients at scale j to be inserted in the class.
j : int
Scale number. It must be in [0:self.ns].
Returns
-------
None.
"""
self.coef[j,:,:] = ScaleCoef
def tvs(self, j):
"""
Display the scale j
Parameters
----------
j : int
Scale number. It must be in [0:self.ns].
Returns
-------
Window appearing showing scale j.
"""
s = self.get_ptr_scale(j)
tvilut(s)
def dump(self):
"""
Print all variable and function names of the class
Returns
-------
None.
"""
print(self.__dict__)
def get_noise(self):
"""
Estimate the noise in the data from the first wavelet scale
Returns
-------
SigmaNoise : float
estimated noise standard deviation.
"""
s = (self.coef)[0]
SigmaNoise = mad(s)
return SigmaNoise
def tvsl(self, j, SigmaNoise=0, Levels=[5]):
"""
Display the scale j, with contours corresponding to the noise detect
levels given in Levels. Several contour levels can be given.
Parameters
----------
j : int
Scale number. It must be in [0:self.ns].
Returns
-------
Window appearing showing scale j, with contours around structures
detected a specified levels.
"""
if SigmaNoise == 0:
SigmaNoise = self.get_noise()
# print(self.TabNorm)
# print("noise ", SigmaNoise)
Norm = self.TabNorm[j]
# print("norm = ", Norm)
TabLevels = np.array(Levels)
# print("TabLevels = ", TabLevels)
TabLevels = self.TabNorm[j] * SigmaNoise * np.array(Levels)
# print(TabLevels)
TabLevels[ TabLevels > (self.coef)[j].max()] = (self.coef)[j].max()
tvimacont((self.coef)[j], TabLevels, vmin=0, vmax=0, gamma=0.5, cmap='gist_stern')
def tvall(self, scales=None, multiview=False):
"""
Display a window with all scales.
Parameters
----------
scales : list, optional
selection of scales. The default is None.
multiview : int, optional
multiview. The default is False.
Returns
-------
None.
"""
tv_frames(self.coef, scales=None, multiview=False)
def get_tabsigma(self, nscale, Nsigma=3):
"""
Create the detection table TabNsigma[0:nsale-1], for diffent type
of calling.
By default, it is 4 at the finest scale at 3 at the others.
If Nsigma is an array small than the number of scales, the last value
of Nsigma is repeated.
exemple of call:
print(CLASS.get_tabsigma(4)) => array([4., 3., 3., 3.])
print(CLASS.get_tabsigma(4, Nsigma=[3,4]) => array([3, 4., 4., 4.])
Parameters
----------
nscale : int
number of scales.
Nsigma : int or 1D np.ndarray, optional
Detect level [per scale]. The default is [4,3,..,3]
Returns
-------
TabNsigma : 1D np.ndarray
Detection level per scale.
"""
TabNsigma = np.zeros(nscale)
for j in np.arange(nscale):
vssig = vsize(Nsigma)
if vssig[0] == 0:
TabNsigma[j] = Nsigma
if j==0:
TabNsigma[j] += 1
else:
if vssig[1] > j:
TabNsigma[j] = Nsigma[j]
else:
TabNsigma[j] = Nsigma[vssig[1]-1]
return TabNsigma
def threshold(self, SigmaNoise=0, Nsigma=3, ThresCoarse=False, hard=True, FirstDetectScale=0, KillCoarse=False, Verbose=False):
"""
Apply a hard or a soft thresholding on the coefficients self.coef
Parameters
----------
SigmaNoise : float, optional
Noise standard deviation. The default is 0.
If it is 0, it will be automatically estimated from the first scale.
Nsigma : 1D np.ndarray, optional
Detect level [per scale]. The default is [4,3,..,3]
ThresCoarse : bool, optional
If true the coarsest scale is also thresholded. The default is False.
hard : bool, optional
IF true, apply hard thresholding, and soft-thresholding otherwise.
The default is True.
FirstDetectScale : int, optional
Remove the first FirstDetectScale scales. The default is 0.
KillCoarse : bool, optional
IF true the coarsest scale is removed. The default is False.
Verbose : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
None.
"""
if ThresCoarse:
Last = self.ns
else:
Last = self.ns - 1
vs=vsize(SigmaNoise)
dim = vs[0]
if dim == 0:
if SigmaNoise == 0:
SigmaNoise = self.get_noise()
self.SigmaNoise = SigmaNoise
if Verbose:
print("SigmaNoise = ", SigmaNoise, ", vsize(SigmaNoise) = ", vs)
TabNsigma = self.get_tabsigma(self.ns, Nsigma=Nsigma)
if Verbose:
print("TabNsigma = ", TabNsigma)
for j in np.arange(Last):
s = self.get_ptr_scale(j)
if dim == 0:
Thres = SigmaNoise * TabNsigma[j] * self.TabNorm[j]
elif dim == 1:
Thres = SigmaNoise[j] * TabNsigma[j]
elif dim == 2:
Thres = SigmaNoise * TabNsigma[j] * self.TabNorm[j]
else:
# print(SigmaNoise.shape)
Nsig = TabNsigma[j]
Thres = SigmaNoise[j,:,:] * Nsig
self.TabNsigma = TabNsigma
if hard:
hard_thresholding(s,Thres)
else:
soft_thresholding(s,Thres)
if Verbose:
print(" scale ",j+1, ", % of non zeros = ", np.count_nonzero(s) * 100. / float(self.nx * self.ny))
if FirstDetectScale > 0:
self.coef[0:FirstDetectScale,:,:] = 0.
if KillCoarse:
self.coef[self.ns - 1,:,:] = 0.
def copy(self, name="wt"):
"""
Duplicate the class, making copy of the coefficients.
Parameters
----------
name : TYPE, optional
DESCRIPTION. The default is "wt".
Returns
-------
NewClass : starlet2d
Copy of the class.
"""
x = self
x.name = name
x.coef=np.zeros((x.ns,x.nx,x.ny))
x.TabNorm= np.copy(self.TabNorm)
return x
################################ END CLASS ######################
if __name__ == '__main__':
print ( "Main :)")
i = readfits("/Users/starck/Main/python/data/ngc2997.fits")
#[1]
# PYSAP_CXX=True
# In[1]:
ns=5
testbinding=1
if testbinding:
print("TEST BINDING FUNCTION")
gen2=1
WT = pysparse.MRStarlet()
wl = WT.transform(i,ns)
w = np.stack(wl)
# WT.info()
dat_list = []
for s in range(5):
dat_list.append(w[s,:,:])
r = WT.recons(dat_list)
info(r-i, name=" => resi blinding")
if (r-i).std() < 1e-5:
print ("OK TEST BINDING FUNCTION")
else:
print ("Error in TEST BINDING FUNCTION")
print (" ")
# In[2]:
testroutines=1
if testroutines:
print("TEST routines starlets")
bord=2
gen2=True
verb=0
w = star2d(i, ns, gen2=gen2, bord=bord, verb=verb)
r = istar2d(w, gen2=gen2, bord=bord, verb=verb)
info(i-r, name=" ==> resi")
if (r-i).std() < 1e-5:
print ("OK TEST 1 routines starlets")
else:
print ("Error in TEST 1 routines starlets")
gen2=False
w = star2d(i, ns, gen2=gen2)
r = istar2d(w, gen2=gen2)
info(i-r, name=" ==> resi")
if (r-i).std() < 1e-5:
print ("OK TEST 2 routines starlets")
else:
print ("Error in TEST 2 routines starlets")
print (" ")
testclass=1
if testclass:
gen2=False
l2norm=False
CW= starlet2d(gen2=gen2, l2norm=l2norm, name="wt C")
CW.transform(i)
r = CW.recons()
info(i-r, name=" ==> resi")
if (r-i).std() < 1e-5:
print ("OK TEST 1 Class starlet(gen1,l1norm)")
else:
print ("Error in TEST 1 Class starlet")
n = np.random.normal(loc=0.0, scale=1., size=(256,256))
gen2=True
l2norm=True
CW= starlet2d(gen2=gen2, l2norm=l2norm, name="wt C2")
CW.transform(n, WTname='noise')
CW.stat()
r = CW.recons()
info(n-r, name=" ==> resi")
if (r-n).std() < 1e-5:
print ("OK TEST 1 Class starlet (l2norm,gen2)")
else:
print ("Error in TEST 1 Class starlet")
print (" ")
testdenoise=1
if testdenoise:
CW= starlet2d()
CW.transform(i)
r = CW.denoising(i)
info(i-r, name=" ==> resi")
s=CW.SigmaNoise
print( s)
if (r-i).std() < 1.5 * CW.SigmaNoise:
print ("OK TEST 1 Class denoise")
else:
print ("Error in TEST 1 Class denoise")
print (" ")
testpos=1
if testpos:
CW= starlet2d()
CW.pos_transform(i, verb=False, pos=True)
CW.info()
CW.stat()
r= CW.recons()
info(r,name='REC')
info(i-r, name=" ==> resi")
ra = (i-r).max()
if ra.max() < 1.:
print ("OK TEST Pos starlet")
else:
print ("Error in TEST Pos starlet")
print (" ")
#for s in range(CW.ns):
# CW.tvs(s)
testttv=0
if testttv:
print ("OK TEST TV 1")
CW= starlet2d()
CW.transform(i)
for s in range(CW.ns):
CW.tvs(s)
# CW.tvall()
print ("OK TEST TV 2 ")
# CW.tvall(multiview=True)
|
{"hexsha": "e2058dee5789f16c56bcd6cf6026ed0e0c6d19a4", "size": 35135, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycs/sparsity/sparse2d/starlet.py", "max_stars_repo_name": "sfarrens/cosmostat", "max_stars_repo_head_hexsha": "a475315cda06dca346095a1e83cb6ad23979acae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-09T05:03:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T10:20:02.000Z", "max_issues_repo_path": "pycs/sparsity/sparse2d/starlet.py", "max_issues_repo_name": "sfarrens/cosmostat", "max_issues_repo_head_hexsha": "a475315cda06dca346095a1e83cb6ad23979acae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-04-28T17:09:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T16:24:43.000Z", "max_forks_repo_path": "pycs/sparsity/sparse2d/starlet.py", "max_forks_repo_name": "sfarrens/cosmostat", "max_forks_repo_head_hexsha": "a475315cda06dca346095a1e83cb6ad23979acae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-22T07:53:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-10T19:59:53.000Z", "avg_line_length": 33.8487475915, "max_line_length": 148, "alphanum_fraction": 0.5163796784, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8980}
|
# encoding: utf-8
# ******************************************************
# Author : zzw922cn
# Last modified: 2017-12-09 11:00
# Email : zzw922cn@gmail.com
# Filename : ed.py
# Description : Calculating edit distance for Automatic Speech Recognition
# ******************************************************
import tensorflow as tf
import numpy as np
phn = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h',
'axr', 'ay', 'b', 'bcl', 'ch', 'd', 'dcl',
'dh', 'dx', 'eh', 'el', 'em', 'en', 'eng',
'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#',
'hh', 'hv', 'ih', 'ix', 'iy', 'jh', 'k',
'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow',
'oy', 'p', 'pau', 'pcl', 'q', 'r', 's',
'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux',
'v', 'w', 'y', 'z', 'zh']
mapping = {'ux':'uw','axr':'er','em':'m','nx':'en','n':'en',
'eng':'ng','hv':'hh','cl':'sil','bcl':'sil','dcl':'sil',
'gcl':'sil','epi':'sil','h#':'sil','kcl':'sil','pau':'sil',
'pcl':'sil','tcl':'sil','vcl':'sil','l':'el','zh':'sh',
'aa':'ao','ix':'ih','ax':'ah'}
def group_phoneme(orig_phn,mapping):
group_phn = []
for val in orig_phn:
group_phn.append(val)
group_phn.append('sil')
for key in mapping.keys():
if key in orig_phn:
group_phn.remove(key)
group_phn.sort()
return group_phn
def list_to_sparse_tensor(targetList,mode='train'):
''' turn 2-D List to SparseTensor
'''
# NOTE: 'sil' is a new phoneme, you should care this.
indices = [] #index
vals = [] #value
group_phn = group_phoneme(phn,mapping)
for tI, target in enumerate(targetList):
for seqI, val in enumerate(target):
if(mode == 'train'):
indices.append([tI, seqI])
vals.append(val)
elif(mode == 'test'):
if(phn[val] in mapping.keys()):
val = group_phn.index(mapping[phn[val]])
indices.append([tI, seqI])
vals.append(val)
else:
raise ValueError("Invalid mode.",mode)
shape = [len(targetList), np.asarray(indices).max(0)[1]+1] #shape
return (np.array(indices), np.array(vals), np.array(shape))
def get_edit_distance(hyp_arr,truth_arr,mode='train'):
''' calculate edit distance
'''
graph = tf.Graph()
with graph.as_default():
truth = tf.sparse_placeholder(tf.int32)
hyp = tf.sparse_placeholder(tf.int32)
editDist = tf.edit_distance(hyp, truth, normalize=True)
with tf.Session(graph=graph) as session:
truthTest = list_to_sparse_tensor(truth_arr, mode)
hypTest = list_to_sparse_tensor(hyp_arr, mode)
feedDict = {truth: truthTest, hyp: hypTest}
dist = session.run(editDist, feed_dict=feedDict)
return dist
if __name__ == '__main__':
a=[[0,5,49]]
b=[[21,5,10]]
print(get_edit_distance(a,b,mode='test'))
print(len(phn))
print(len(mapping))
|
{"hexsha": "a2101ef9f8dc3ebd51a8d78d58fedb1f4dea34fb", "size": 3010, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/ed.py", "max_stars_repo_name": "HanSeokhyeon/Automatic_Speech_Recognition", "max_stars_repo_head_hexsha": "73b92e7b2b12e8b43294caa8eec5727d0ffc7a47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/ed.py", "max_issues_repo_name": "HanSeokhyeon/Automatic_Speech_Recognition", "max_issues_repo_head_hexsha": "73b92e7b2b12e8b43294caa8eec5727d0ffc7a47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-03-06T19:46:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:40:14.000Z", "max_forks_repo_path": "utils/ed.py", "max_forks_repo_name": "HanSeokhyeon/Automatic_Speech_Recognition", "max_forks_repo_head_hexsha": "73b92e7b2b12e8b43294caa8eec5727d0ffc7a47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5977011494, "max_line_length": 75, "alphanum_fraction": 0.5146179402, "include": true, "reason": "import numpy", "num_tokens": 873}
|
"""
MPO
A finite size matrix product operator type.
Keeps track of the orthogonality center.
"""
mutable struct MPO <: AbstractMPS
data::Vector{ITensor}
llim::Int
rlim::Int
end
function MPO(A::Vector{<:ITensor}; ortho_lims::UnitRange=1:length(A))
return MPO(A, first(ortho_lims) - 1, last(ortho_lims) + 1)
end
set_data(A::MPO, data::Vector{ITensor}) = MPO(data, A.llim, A.rlim)
MPO() = MPO(ITensor[], 0, 0)
function convert(::Type{MPS}, M::MPO)
return MPS(data(M); ortho_lims=ortho_lims(M))
end
function convert(::Type{MPO}, M::MPS)
return MPO(data(M); ortho_lims=ortho_lims(M))
end
function MPO(::Type{ElT}, sites::Vector{<:Index}) where {ElT<:Number}
N = length(sites)
v = Vector{ITensor}(undef, N)
if N == 0
return MPO()
elseif N == 1
v[1] = emptyITensor(ElT, dag(sites[1]), sites[1]')
return MPO(v)
end
space_ii = all(hasqns, sites) ? [QN() => 1] : 1
l = [Index(space_ii, "Link,l=$ii") for ii in 1:(N - 1)]
for ii in eachindex(sites)
s = sites[ii]
if ii == 1
v[ii] = emptyITensor(ElT, dag(s), s', l[ii])
elseif ii == N
v[ii] = emptyITensor(ElT, dag(l[ii - 1]), dag(s), s')
else
v[ii] = emptyITensor(ElT, dag(l[ii - 1]), dag(s), s', l[ii])
end
end
return MPO(v)
end
MPO(sites::Vector{<:Index}) = MPO(Float64, sites)
"""
MPO(N::Int)
Make an MPO of length `N` filled with default ITensors.
"""
MPO(N::Int) = MPO(Vector{ITensor}(undef, N))
"""
MPO([::Type{ElT} = Float64}, ]sites, ops::Vector{String})
Make an MPO with pairs of sites `s[i]` and `s[i]'`
and operators `ops` on each site.
"""
function MPO(::Type{ElT}, sites::Vector{<:Index}, ops::Vector{String}) where {ElT<:Number}
N = length(sites)
ampo = OpSum() + [ops[n] => n for n in 1:N]
M = MPO(ampo, sites)
# Currently, OpSum does not output the optimally truncated
# MPO (see https://github.com/ITensor/ITensors.jl/issues/526)
# So here, we need to first normalize, then truncate, then
# return the normalization.
lognormM = lognorm(M)
M ./= exp(lognormM / N)
truncate!(M; cutoff=1e-15)
M .*= exp(lognormM / N)
return M
end
function MPO(::Type{ElT}, sites::Vector{<:Index}, fops::Function) where {ElT<:Number}
ops = [fops(n) for n in 1:length(sites)]
return MPO(ElT, sites, ops)
end
MPO(sites::Vector{<:Index}, ops) = MPO(Float64, sites, ops)
"""
MPO([::Type{ElT} = Float64, ]sites, op::String)
Make an MPO with pairs of sites `s[i]` and `s[i]'`
and operator `op` on every site.
"""
function MPO(::Type{ElT}, sites::Vector{<:Index}, op::String) where {ElT<:Number}
return MPO(ElT, sites, fill(op, length(sites)))
end
MPO(sites::Vector{<:Index}, op::String) = MPO(Float64, sites, op)
function randomMPO(sites::Vector{<:Index}, m::Int=1)
M = MPO(sites, "Id")
for i in eachindex(sites)
randn!(M[i])
normalize!(M[i])
end
m > 1 && throw(ArgumentError("randomMPO: currently only m==1 supported"))
return M
end
function MPO(A::ITensor, sites::Vector{<:Index}; kwargs...)
return MPO(A, IndexSet.(prime.(sites), dag.(sites)); kwargs...)
end
function outer_mps_mps_deprecation_warning()
return "Calling `outer(ψ::MPS, ϕ::MPS)` for MPS `ψ` and `ϕ` with shared indices is deprecated. Currently, we automatically prime `ψ` to make sure the site indices don't clash, but that will no longer be the case in ITensors v0.4. To upgrade your code, call `outer(ψ', ϕ)`. Although the new interface seems less convenient, it will allow `outer` to accept more general outer products going forward, such as outer products where some indices are shared (a batched outer product) or outer products of MPS between site indices that aren't just related by a single prime level."
end
function deprecate_make_inds_unmatch(::typeof(outer), ψ::MPS, ϕ::MPS; kw...)
if hassameinds(siteinds, ψ, ϕ)
warn_once(outer_mps_mps_deprecation_warning(), :outer_mps_mps)
ψ = ψ'
end
return ψ, ϕ
end
"""
outer(x::MPS, y::MPS; <keyword argument>) -> MPO
Compute the outer product of `MPS` `x` and `MPS` `y`,
returning an `MPO` approximation. Note that `y` will be conjugated.
In Dirac notation, this is the operation `|x⟩⟨y|`.
If you want an outer product of an MPS with itself, you should
call `outer(x', x; kwargs...)` so that the resulting MPO
has site indices with indices coming in pairs of prime levels
of 1 and 0. If not, the site indices won't be unique which would
not be an outer product.
For example:
```julia
s = siteinds("S=1/2", 5)
x = randomMPS(s)
y = randomMPS(s)
outer(x, y) # Incorrect! Site indices must be unique.
outer(x', y) # Results in an MPO with pairs of primed and unprimed indices.
```
This allows for more general outer products, such as more general
MPO outputs which don't have pairs of primed and unprimed indices,
or outer products where the input MPS are vectorizations of MPOs.
For example:
```julia
s = siteinds("S=1/2", 5)
X = MPO(s, "Id")
Y = MPO(s, "Id")
x = convert(MPS, X)
y = convert(MPS, Y)
outer(x, y) # Incorrect! Site indices must be unique.
outer(x', y) # Incorrect! Site indices must be unique.
outer(addtags(x, "Out"), addtags(y, "In")) # This performs a proper outer product.
```
The keyword arguments determine the truncation, and accept
the same arguments as `contract(::MPO, ::MPO; kwargs...)`.
See also [`apply`](@ref), [`contract`](@ref).
"""
function outer(ψ::MPS, ϕ::MPS; kw...)
ψ, ϕ = deprecate_make_inds_unmatch(outer, ψ, ϕ; kw...)
ψmat = convert(MPO, ψ)
ϕmat = convert(MPO, dag(ϕ))
return contract(ψmat, ϕmat; kw...)
end
"""
projector(x::MPS; <keyword argument>) -> MPO
Computes the projector onto the state `x`. In Dirac notation, this is the operation `|x⟩⟨x|/|⟨x|x⟩|²`.
Use keyword arguments to control the level of truncation, which are
the same as those accepted by `contract(::MPO, ::MPO; kw...)`.
# Keywords
- `normalize::Bool=true`: whether or not to normalize the input MPS before forming the projector. If `normalize==false` and the input MPS is not already normalized, this function will not output a proper project, and simply outputs `outer(x, x) = |x⟩⟨x|`, i.e. the projector scaled by `norm(x)^2`.
- truncation keyword arguments accepted by `contract(::MPO, ::MPO; kw...)`.
See also [`outer`](@ref), [`contract`](@ref).
"""
function projector(ψ::MPS; normalize::Bool=true, kw...)
ψψᴴ = outer(ψ', ψ; kw...)
if normalize
normalize!(ψψᴴ[orthocenter(ψψᴴ)])
end
return ψψᴴ
end
# XXX: rename originalsiteind?
"""
siteind(M::MPO, j::Int; plev = 0, kwargs...)
Get the first site Index of the MPO found, by
default with prime level 0.
"""
siteind(M::MPO, j::Int; kwargs...) = siteind(first, M, j; plev=0, kwargs...)
# TODO: make this return the site indices that would have
# been used to create the MPO? I.e.:
# [dag(siteinds(M, j; plev = 0, kwargs...)) for j in 1:length(M)]
"""
siteinds(M::MPO; kwargs...)
Get a Vector of IndexSets of all the site indices of M.
"""
siteinds(M::MPO; kwargs...) = siteinds(all, M; kwargs...)
function siteinds(Mψ::Tuple{MPO,MPS}, n::Int; kwargs...)
return siteinds(uniqueinds, Mψ[1], Mψ[2], n; kwargs...)
end
function nsites(Mψ::Tuple{MPO,MPS})
M, ψ = Mψ
N = length(M)
@assert N == length(ψ)
return N
end
siteinds(Mψ::Tuple{MPO,MPS}; kwargs...) = [siteinds(Mψ, n; kwargs...) for n in 1:nsites(Mψ)]
# XXX: rename originalsiteinds?
"""
firstsiteinds(M::MPO; kwargs...)
Get a Vector of the first site Index found on each site of M.
By default, it finds the first site Index with prime level 0.
"""
firstsiteinds(M::MPO; kwargs...) = siteinds(first, M; plev=0, kwargs...)
function hassameinds(::typeof(siteinds), ψ::MPS, Hϕ::Tuple{MPO,MPS})
N = length(ψ)
@assert N == length(Hϕ[1]) == length(Hϕ[1])
for n in 1:N
!hassameinds(siteinds(Hϕ, n), siteinds(ψ, n)) && return false
end
return true
end
function inner_mps_mpo_mps_deprecation_warning()
return """
Calling `inner(x::MPS, A::MPO, y::MPS)` where the site indices of the `MPS` `x` and the `MPS` resulting from contracting `MPO` `A` with `MPS` `y` don't match is deprecated as of ITensors v0.3 and will result in an error in ITensors v0.4. The most common cause of this is something like the following:
```julia
s = siteinds("S=1/2")
psi = randomMPS(s)
H = MPO(s, "Id")
inner(psi, H, psi)
```
`psi` has the Index structure `-s-(psi)` and `H` has the Index structure `-s'-(H)-s-`, so the Index structure of would be `(dag(psi)-s- -s'-(H)-s-(psi)` unless the prime levels were fixed. Previously we tried fixing the prime level in situations like this, but we will no longer be doing that going forward.
There are a few ways to fix this. You can simply change:
```julia
inner(psi, H, psi)
```
to:
```julia
inner(psi', H, psi)
```
in which case the Index structure will be `(dag(psi)-s'-(H)-s-(psi)`.
Alternatively, you can use the `Apply` function:
```julia
inner(psi, Apply(H, psi))
```
In this case, `Apply(H, psi)` represents the "lazy" evaluation of `apply(H, psi)`. The function `apply(H, psi)` performs the contraction of `H` with `psi` and then unprimes the results, so this versions ensures that the prime levels of the inner product will match.
Although the new behavior seems less convenient, it makes it easier to generalize `inner(::MPS, ::MPO, ::MPS)` to other types of inputs, like `MPS` and `MPO` with different tag and prime conventions, multiple sites per tensor, `ITensor` inputs, etc.
"""
end
function deprecate_make_inds_match!(
::typeof(dot), ydag::MPS, A::MPO, x::MPS; make_inds_match::Bool=true
)
N = length(x)
if !hassameinds(siteinds, ydag, (A, x))
sAx = siteinds((A, x))
if any(s -> length(s) > 1, sAx)
n = findfirst(n -> !hassameinds(siteinds(ydag, n), siteinds((A, x), n)), 1:N)
error(
"""Calling `dot(ϕ::MPS, H::MPO, ψ::MPS)` with multiple site indices per MPO/MPS tensor but the site indices don't match. Even with `make_inds_match = true`, the case of multiple site indices per MPO/MPS is not handled automatically. The sites with unmatched site indices are:
inds(ϕ[$n]) = $(inds(ydag[n]))
inds(H[$n]) = $(inds(A[n]))
inds(ψ[$n]) = $(inds(x[n]))
Make sure the site indices of your MPO/MPS match. You may need to prime one of the MPS, such as `dot(ϕ', H, ψ)`.""",
)
end
if !hassameinds(siteinds, ydag, (A, x)) && make_inds_match
warn_once(inner_mps_mpo_mps_deprecation_warning(), :inner_mps_mpo_mps)
replace_siteinds!(ydag, sAx)
end
end
return ydag, A, x
end
"""
dot(y::MPS, A::MPO, x::MPS)
Same as [`inner`](@ref).
"""
function dot(y::MPS, A::MPO, x::MPS; make_inds_match::Bool=true, kwargs...)::Number
N = length(A)
check_hascommoninds(siteinds, A, x)
ydag = dag(y)
sim!(linkinds, ydag)
ydag, A, x = deprecate_make_inds_match!(dot, ydag, A, x; make_inds_match)
check_hascommoninds(siteinds, A, y)
O = ydag[1] * A[1] * x[1]
for j in 2:N
O = O * ydag[j] * A[j] * x[j]
end
return O[]
end
"""
inner(y::MPS, A::MPO, x::MPS)
Compute `⟨y|A|x⟩ = ⟨y|Ax⟩` efficiently and exactly without making any intermediate
MPOs. In general it is more efficient and accurate than `inner(y, apply(A, x))`.
This is helpful for computing the expectation value of an operator `A`, which would be:
```julia
inner(x, A, x)
```
assuming `x` is normalized.
If you want to compute `⟨By|Ax⟩` you can use `inner(B::MPO, y::MPS, A::MPO, x::MPS)`.
This is helpful for computing the variance of an operator `A`, which would be:
```julia
inner(A, x, A, x) - inner(x, A, x) ^ 2
```
assuming `x` is normalized.
$(make_inds_match_docstring_warning())
Same as [`dot`](@ref).
"""
inner(y::MPS, A::MPO, x::MPS; kwargs...) = dot(y, A, x; kwargs...)
function inner(y::MPS, Ax::Apply{Tuple{MPO,MPS}})
return inner(y', Ax.args[1], Ax.args[2])
end
"""
dot(B::MPO, y::MPS, A::MPO, x::MPS)
Same as [`inner`](@ref).
"""
function dot(B::MPO, y::MPS, A::MPO, x::MPS; make_inds_match::Bool=true, kwargs...)::Number
!make_inds_match && error(
"make_inds_match = false not currently supported in dot(::MPO, ::MPS, ::MPO, ::MPS)"
)
N = length(B)
if length(y) != N || length(x) != N || length(A) != N
throw(
DimensionMismatch(
"inner: mismatched lengths $N and $(length(x)) or $(length(y)) or $(length(A))"
),
)
end
ydag = dag(y)
prime!(ydag, 2)
Bdag = dag(B)
prime!(Bdag)
# Swap prime levels 1 -> 2 and 2 -> 1.
for j in eachindex(Bdag)
Axcommon = commonind(A[j], x[j])
ABcommon = uniqueind(filterinds(A[j]; tags="Site"), IndexSet(Axcommon))
swapprime!(Bdag[j], 2, 3)
swapprime!(Bdag[j], 1, 2)
swapprime!(Bdag[j], 3, 1)
noprime!(Bdag[j], prime(ABcommon, 2))
end
yB = ydag[1] * Bdag[1]
Ax = A[1] * x[1]
O = yB * Ax
for j in 2:N
yB = ydag[j] * Bdag[j]
Ax = A[j] * x[j]
yB *= O
O = yB * Ax
end
return O[]
end
# TODO: maybe make these into tuple inputs?
# Also can generalize to:
# inner((β, B, y), (α, A, x))
"""
inner(B::MPO, y::MPS, A::MPO, x::MPS)
Compute `⟨By|A|x⟩ = ⟨By|Ax⟩` efficiently and exactly without making any intermediate
MPOs. In general it is more efficient and accurate than `inner(apply(B, y), apply(A, x))`.
This is helpful for computing the variance of an operator `A`, which would be:
```julia
inner(A, x, A, x) - inner(x, A, x) ^ 2
```
$(make_inds_match_docstring_warning())
Same as [`dot`](@ref).
"""
inner(B::MPO, y::MPS, A::MPO, x::MPS) = dot(B, y, A, x)
function dot(M1::MPO, M2::MPO; make_inds_match::Bool=false, kwargs...)
if make_inds_match
error("In dot(::MPO, ::MPO), make_inds_match is not currently supported")
end
return _log_or_not_dot(M1, M2, false; make_inds_match=make_inds_match)
end
# TODO: implement by combining the MPO indices and converting
# to MPS
function logdot(M1::MPO, M2::MPO; make_inds_match::Bool=false, kwargs...)
if make_inds_match
error("In dot(::MPO, ::MPO), make_inds_match is not currently supported")
end
return _log_or_not_dot(M1, M2, true; make_inds_match=make_inds_match)
end
function tr(M::MPO; plev::Pair{Int,Int}=0 => 1, tags::Pair=ts"" => ts"")
N = length(M)
#
# TODO: choose whether to contract or trace
# first depending on the bond dimension. The scaling is:
#
# 1. Trace last: O(χ²d²) + O(χd²)
# 2. Trace first: O(χ²d²) + O(χ²)
#
# So tracing first is better if d > √χ.
#
L = tr(M[1]; plev=plev, tags=tags)
for j in 2:N
L *= M[j]
L = tr(L; plev=plev, tags=tags)
end
return L
end
"""
error_contract(y::MPS, A::MPO, x::MPS;
make_inds_match::Bool = true)
error_contract(y::MPS, x::MPS, x::MPO;
make_inds_match::Bool = true)
Compute the distance between A|x> and an approximation MPS y:
`| |y> - A|x> |/| A|x> | = √(1 + (<y|y> - 2*real(<y|A|x>))/<Ax|A|x>)`.
If `make_inds_match = true`, the function attempts match the site
indices of `y` with the site indices of `A` that are not common
with `x`.
"""
function error_contract(y::MPS, A::MPO, x::MPS; kwargs...)
N = length(A)
if length(y) != N || length(x) != N
throw(
DimensionMismatch("inner: mismatched lengths $N and $(length(x)) or $(length(y))")
)
end
iyy = dot(y, y; kwargs...)
iyax = dot(y', A, x; kwargs...)
iaxax = dot(A, x, A, x; kwargs...)
return sqrt(abs(1.0 + (iyy - 2 * real(iyax)) / iaxax))
end
error_contract(y::MPS, x::MPS, A::MPO) = error_contract(y, A, x)
"""
apply(A::MPO, x::MPS; kwargs...)
Contract the `MPO` `A` with the `MPS` `x` and then map the prime level of the resulting
MPS back to 0.
Equivalent to `replaceprime(contract(A, x; kwargs...), 2 => 1)`.
See also [`contract`](@ref) for details about the arguments available.
"""
function apply(A::MPO, ψ::MPS; kwargs...)
Aψ = contract(A, ψ; kwargs...)
return replaceprime(Aψ, 1 => 0)
end
(A::MPO)(ψ::MPS; kwargs...) = apply(A, ψ; kwargs...)
function contract(A::MPO, ψ::MPS; alg="densitymatrix", kwargs...)
if haskey(kwargs, :method)
# Backwards compatibility, use `method`.
alg = get(kwargs, :method, "densitymatrix")
end
# Keyword argument deprecations
if alg == "DensityMatrix"
@warn "In contract, method DensityMatrix is deprecated in favor of densitymatrix"
alg = "densitymatrix"
end
if alg == "Naive"
@warn "In contract, `alg=\"Naive\"` is deprecated in favor of `alg=\"naive\"`"
alg = "naive"
end
return contract(Algorithm(alg), A, ψ; kwargs...)
end
contract_mpo_mps_doc = """
contract(ψ::MPS, A::MPO; kwargs...) -> MPS
*(::MPS, ::MPO; kwargs...) -> MPS
contract(A::MPO, ψ::MPS; kwargs...) -> MPS
*(::MPO, ::MPS; kwargs...) -> MPS
Contract the `MPO` `A` with the `MPS` `ψ`, returning an `MPS` with the unique
site indices of the `MPO`.
For example, for an MPO with site indices with prime levels of 1 and 0, such as
`-s'-A-s-`, and an MPS with site indices with prime levels of 0, such as
`-s-x`, the result is an MPS `y` with site indices with prime levels of 1,
`-s'-y = -s'-A-s-x`.
Since it is common to contract an MPO with prime levels of 1 and 0 with an MPS with
prime level of 0 and want a resulting MPS with prime levels of 0, we provide a
convenience function `apply`:
```julia
apply(A, x; kwargs...) = replaceprime(contract(A, x; kwargs...), 2 => 1)`.
```
Choose the method with the `method` keyword, for example
`"densitymatrix"` and `"naive"`.
# Keywords
- `cutoff::Float64=1e-13`: the cutoff value for truncating the density matrix eigenvalues. Note that the default is somewhat arbitrary and subject to change, in general you should set a `cutoff` value.
- `maxdim::Int=maxlinkdim(A) * maxlinkdim(ψ))`: the maximal bond dimension of the results MPS.
- `mindim::Int=1`: the minimal bond dimension of the resulting MPS.
- `normalize::Bool=false`: whether or not to normalize the resulting MPS.
- `method::String="densitymatrix"`: the algorithm to use for the contraction. Currently the options are "densitymatrix", where the network formed by the MPO and MPS is squared and contracted down to a density matrix which is diagonalized iteratively at each site, and "naive", where the MPO and MPS tensor are contracted exactly at each site and then a truncation of the resulting MPS is performed.
See also [`apply`](@ref).
"""
@doc """
$contract_mpo_mps_doc
""" contract(::MPO, ::MPS)
contract(ψ::MPS, A::MPO; kwargs...) = contract(A, ψ; kwargs...)
*(A::MPO, B::MPS; kwargs...) = contract(A, B; kwargs...)
*(A::MPS, B::MPO; kwargs...) = contract(A, B; kwargs...)
# TODO: try this to copy the docstring
# Causing an error in Revise
#@doc """
#$contract_mpo_mps_doc
#""" *(::MPO, ::MPS)
#@doc (@doc contract(::MPO, ::MPS)) *(::MPO, ::MPS)
function contract(::Algorithm"densitymatrix", A::MPO, ψ::MPS; kwargs...)::MPS
n = length(A)
n != length(ψ) &&
throw(DimensionMismatch("lengths of MPO ($n) and MPS ($(length(ψ))) do not match"))
if n == 1
return MPS([A[1] * ψ[1]])
end
ψ_out = similar(ψ)
cutoff::Float64 = get(kwargs, :cutoff, 1e-13)
requested_maxdim::Int = get(kwargs, :maxdim, maxlinkdim(A) * maxlinkdim(ψ))
mindim::Int = max(get(kwargs, :mindim, 1), 1)
normalize::Bool = get(kwargs, :normalize, false)
any(i -> isempty(i), siteinds(commoninds, A, ψ)) &&
error("In `contract(A::MPO, x::MPS)`, `A` and `x` must share a set of site indices")
# In case A and ψ have the same link indices
A = sim(linkinds, A)
ψ_c = dag(ψ)
A_c = dag(A)
# To not clash with the link indices of A and ψ
sim!(linkinds, A_c)
sim!(linkinds, ψ_c)
sim!(siteinds, commoninds, A_c, ψ_c)
# A version helpful for making the density matrix
simA_c = sim(siteinds, uniqueinds, A_c, ψ_c)
# Store the left environment tensors
E = Vector{ITensor}(undef, n - 1)
E[1] = ψ[1] * A[1] * A_c[1] * ψ_c[1]
for j in 2:(n - 1)
E[j] = E[j - 1] * ψ[j] * A[j] * A_c[j] * ψ_c[j]
end
R = ψ[n] * A[n]
simR_c = ψ_c[n] * simA_c[n]
ρ = E[n - 1] * R * simR_c
l = linkind(ψ, n - 1)
ts = isnothing(l) ? "" : tags(l)
Lis = siteinds(uniqueinds, A, ψ, n)
Ris = siteinds(uniqueinds, simA_c, ψ_c, n)
F = eigen(ρ, Lis, Ris; ishermitian=true, tags=ts, kwargs...)
D, U, Ut = F.D, F.V, F.Vt
l_renorm, r_renorm = F.l, F.r
ψ_out[n] = Ut
R = R * dag(Ut) * ψ[n - 1] * A[n - 1]
simR_c = simR_c * U * ψ_c[n - 1] * simA_c[n - 1]
for j in reverse(2:(n - 1))
# Determine smallest maxdim to use
cip = commoninds(ψ[j], E[j - 1])
ciA = commoninds(A[j], E[j - 1])
prod_dims = dim(cip) * dim(ciA)
maxdim = min(prod_dims, requested_maxdim)
s = siteinds(uniqueinds, A, ψ, j)
s̃ = siteinds(uniqueinds, simA_c, ψ_c, j)
ρ = E[j - 1] * R * simR_c
l = linkind(ψ, j - 1)
ts = isnothing(l) ? "" : tags(l)
Lis = IndexSet(s..., l_renorm)
Ris = IndexSet(s̃..., r_renorm)
F = eigen(ρ, Lis, Ris; ishermitian=true, maxdim=maxdim, tags=ts, kwargs...)
D, U, Ut = F.D, F.V, F.Vt
l_renorm, r_renorm = F.l, F.r
ψ_out[j] = Ut
R = R * dag(Ut) * ψ[j - 1] * A[j - 1]
simR_c = simR_c * U * ψ_c[j - 1] * simA_c[j - 1]
end
if normalize
R ./= norm(R)
end
ψ_out[1] = R
setleftlim!(ψ_out, 0)
setrightlim!(ψ_out, 2)
return ψ_out
end
function contract(::Algorithm"naive", A::MPO, ψ::MPS; kwargs...)::MPS
truncate = get(kwargs, :truncate, true)
N = length(A)
if N != length(ψ)
throw(DimensionMismatch("lengths of MPO ($N) and MPS ($(length(ψ))) do not match"))
end
ψ_out = MPS(N)
for j in 1:N
ψ_out[j] = A[j] * ψ[j]
end
for b in 1:(N - 1)
Al = commonind(A[b], A[b + 1])
pl = commonind(ψ[b], ψ[b + 1])
C = combiner(Al, pl)
ψ_out[b] *= C
ψ_out[b + 1] *= dag(C)
end
if truncate
truncate!(ψ_out; kwargs...)
end
return ψ_out
end
function contract(A::MPO, B::MPO; kwargs...)
if hassameinds(siteinds, A, B)
error(
"In `contract(A::MPO, B::MPO)`, MPOs A and B have the same site indices. The indices of the MPOs in the contraction are taken literally, and therefore they should only share on site index per site so the contraction results in an MPO. You may want to use `replaceprime(contract(A', B), 2 => 1)` or `apply(A, B)` which automatically adjusts the prime levels assuming the input MPOs have pairs of primed and unprimed indices.",
)
end
cutoff::Float64 = get(kwargs, :cutoff, 1e-14)
resp_degen::Bool = get(kwargs, :respect_degenerate, true)
maxdim::Int = get(kwargs, :maxdim, maxlinkdim(A) * maxlinkdim(B))
mindim::Int = max(get(kwargs, :mindim, 1), 1)
N = length(A)
N != length(B) &&
throw(DimensionMismatch("lengths of MPOs A ($N) and B ($(length(B))) do not match"))
# Special case for a single site
N == 1 && return MPO([A[1] * B[1]])
A = orthogonalize(A, 1)
B = orthogonalize(B, 1)
A = sim(linkinds, A)
sA = siteinds(uniqueinds, A, B)
sB = siteinds(uniqueinds, B, A)
C = MPO(N)
lCᵢ = Index[]
R = ITensor(1)
for i in 1:(N - 2)
RABᵢ = R * A[i] * B[i]
left_inds = [sA[i]..., sB[i]..., lCᵢ...]
C[i], R = factorize(
RABᵢ,
left_inds;
ortho="left",
tags=commontags(linkinds(A, i)),
cutoff=cutoff,
maxdim=maxdim,
mindim=mindim,
kwargs...,
)
lCᵢ = dag(commoninds(C[i], R))
end
i = N - 1
RABᵢ = R * A[i] * B[i] * A[i + 1] * B[i + 1]
left_inds = [sA[i]..., sB[i]..., lCᵢ...]
C[N - 1], C[N] = factorize(
RABᵢ,
left_inds;
ortho="right",
tags=commontags(linkinds(A, i)),
cutoff=cutoff,
maxdim=maxdim,
mindim=mindim,
kwargs...,
)
truncate!(C; kwargs...)
return C
end
"""
apply(A::MPO, B::MPO; kwargs...)
Contract the `MPO` `A'` with the `MPO` `B` and then map the prime level of the resulting
MPO back to having pairs of indices with prime levels of 1 and 0.
Equivalent to `replaceprime(contract(A', B; kwargs...), 2 => 1)`.
See also [`contract`](@ref) for details about the arguments available.
"""
function apply(A::MPO, B::MPO; kwargs...)
AB = contract(A', B; kwargs...)
return replaceprime(AB, 2 => 1)
end
(A::MPO)(B::MPO; kwargs...) = apply(A, B; kwargs...)
contract_mpo_mpo_doc = """
contract(A::MPO, B::MPO; kwargs...) -> MPO
*(::MPO, ::MPO; kwargs...) -> MPO
Contract the `MPO` `A` with the `MPO` `B`, returning an `MPO` with the
site indices that are not shared between `A` and `B`.
If you are contracting two MPOs with the same sets of indices, likely you
want to call something like:
```julia
C = contract(A', B; cutoff=1e-12)
C = replaceprime(C, 2 => 1)
```
That is because if MPO `A` has the index structure `-s'-A-s-` and MPO `B`
has the Index structure `-s'-B-s-`, if we only want to contract over
on set of the indices, we would do `(-s'-A-s-)'-s'-B-s- = -s''-A-s'-s'-B-s- = -s''-C-s-`,
and then map the prime levels back to pairs of primed and unprimed indices with:
`replaceprime(-s''-C-s-, 2 => 1) = -s'-C-s-`.
Since this is a common use case, you can use the convenience function:
```julia
C = apply(A, B; cutoff=1e-12)
```
which is the same as the code above.
# Keywords
- `cutoff::Float64=1e-13`: the cutoff value for truncating the density matrix eigenvalues. Note that the default is somewhat arbitrary and subject to change, in general you should set a `cutoff` value.
- `maxdim::Int=maxlinkdim(A) * maxlinkdim(B))`: the maximal bond dimension of the results MPS.
- `mindim::Int=1`: the minimal bond dimension of the resulting MPS.
See also [`apply`](@ref) for details about the arguments available.
"""
@doc """
$contract_mpo_mpo_doc
""" contract(::MPO, ::MPO)
*(A::MPO, B::MPO; kwargs...) = contract(A, B; kwargs...)
# TODO: try this to copy the docstring
# Causing an error in Revise
#@doc """
#$contract_mpo_mpo_doc
#""" *(::MPO, ::MPO)
#@doc (@doc contract(::MPO, ::MPO)) *(::MPO, ::MPO)
"""
sample(M::MPO)
Given a normalized MPO `M`,
returns a `Vector{Int}` of `length(M)`
corresponding to one sample of the
probability distribution defined by the MPO,
treating the MPO as a density matrix.
The MPO `M` should have an (approximately)
positive spectrum.
"""
function sample(M::MPO)
N = length(M)
s = siteinds(M)
R = Vector{ITensor}(undef, N)
R[N] = M[N] * δ(dag(s[N]))
for n in reverse(1:(N - 1))
R[n] = M[n] * δ(dag(s[n])) * R[n + 1]
end
if abs(1.0 - R[1][]) > 1E-8
error("sample: MPO is not normalized, norm=$(norm(M[1]))")
end
result = zeros(Int, N)
ρj = M[1] * R[2]
Lj = ITensor()
for j in 1:N
s = siteind(M, j)
d = dim(s)
# Compute the probability of each state
# one-by-one and stop when the random
# number r is below the total prob so far
pdisc = 0.0
r = rand()
# Will need n, An, and pn below
n = 1
projn = ITensor()
pn = 0.0
while n <= d
projn = ITensor(s)
projn[s => n] = 1.0
pnc = (ρj * projn * prime(projn))[]
if imag(pnc) > 1e-8
@warn "In sample, probability $pnc is complex."
end
pn = real(pnc)
pdisc += pn
(r < pdisc) && break
n += 1
end
result[j] = n
if j < N
if j == 1
Lj = M[j] * projn * prime(projn)
elseif j > 1
Lj = Lj * M[j] * projn * prime(projn)
end
if j == N - 1
ρj = Lj * M[j + 1]
else
ρj = Lj * M[j + 1] * R[j + 2]
end
s = siteind(M, j + 1)
normj = (ρj * δ(s', s))[]
ρj ./= normj
end
end
return result
end
function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, M::MPO)
g = create_group(parent, name)
attributes(g)["type"] = "MPO"
attributes(g)["version"] = 1
N = length(M)
write(g, "rlim", M.rlim)
write(g, "llim", M.llim)
write(g, "length", N)
for n in 1:N
write(g, "MPO[$(n)]", M[n])
end
end
function HDF5.read(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{MPO})
g = open_group(parent, name)
if read(attributes(g)["type"]) != "MPO"
error("HDF5 group or file does not contain MPO data")
end
N = read(g, "length")
rlim = read(g, "rlim")
llim = read(g, "llim")
v = [read(g, "MPO[$(i)]", ITensor) for i in 1:N]
return MPO(v, llim, rlim)
end
|
{"hexsha": "e28acde1c7feb3846ab636cbdc0d313b8c6210b6", "size": 27936, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mps/mpo.jl", "max_stars_repo_name": "LinjianMa/ITensors.jl", "max_stars_repo_head_hexsha": "579bd97f45e1723367ba569f094dd1569817b8d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-27T08:13:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T19:19:37.000Z", "max_issues_repo_path": "src/mps/mpo.jl", "max_issues_repo_name": "LinjianMa/ITensors.jl", "max_issues_repo_head_hexsha": "579bd97f45e1723367ba569f094dd1569817b8d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mps/mpo.jl", "max_forks_repo_name": "LinjianMa/ITensors.jl", "max_forks_repo_head_hexsha": "579bd97f45e1723367ba569f094dd1569817b8d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-27T08:14:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-27T08:14:00.000Z", "avg_line_length": 31.04, "max_line_length": 575, "alphanum_fraction": 0.63180126, "num_tokens": 9541}
|
using Wakame
using Documenter
DocMeta.setdocmeta!(Wakame, :DocTestSetup, :(using Wakame); recursive=true)
makedocs(;
modules=[Wakame],
authors="Bernard Brenyah",
repo="https://github.com/PyDataBlog/Wakame.jl/blob/{commit}{path}#{line}",
sitename="Wakame.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://PyDataBlog.github.io/Wakame.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/PyDataBlog/Wakame.jl",
devbranch="main",
)
|
{"hexsha": "cbc7a54a3cf549b7f2d37c87f9418723586a7af3", "size": 585, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "PyDataBlog/Wakame.jl", "max_stars_repo_head_hexsha": "7492c73884a447a1a62101db01839b36e7c02c32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "PyDataBlog/Wakame.jl", "max_issues_repo_head_hexsha": "7492c73884a447a1a62101db01839b36e7c02c32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "PyDataBlog/Wakame.jl", "max_forks_repo_head_hexsha": "7492c73884a447a1a62101db01839b36e7c02c32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4, "max_line_length": 78, "alphanum_fraction": 0.6256410256, "num_tokens": 175}
|
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ddsp.losses."""
from absl.testing import parameterized
from ddsp import spectral_ops
from ddsp.test_util import gen_np_sinusoid
import numpy as np
import tensorflow.compat.v2 as tf
class STFTTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
hop_size = 128
overlap = 1.0 - float(hop_size) / frame_size
pad_end = True
s_np = spectral_ops.stft_np(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end)
s_tf = spectral_ops.stft(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end)
# TODO(jesseengel): The phase comes out a little different, figure out why.
self.assertAllClose(np.abs(s_np), np.abs(s_tf), rtol=1e-3, atol=1e-3)
class LoudnessTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
frame_rate = 250
ld_tf = spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=True)
ld_np = spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=False)
self.assertAllClose(np.abs(ld_np), np.abs(ld_tf), rtol=1e-3, atol=1e-3)
class PadOrTrimVectorToExpectedLengthTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.named_parameters(
('np_1d', False, 1),
('np_2d', False, 2),
('tf_1d', True, 1),
('tf_2d', True, 2),
)
def test_pad_or_trim_vector_to_expected_length(self, use_tf, num_dims):
vector_len = 10
padded_vector_expected_len = 15
trimmed_vector_expected_len = 4
# Generate target vectors for testing
vector = np.ones(vector_len) + np.random.uniform()
num_pad = padded_vector_expected_len - vector_len
target_padded = np.concatenate([vector, np.zeros(num_pad)])
target_trimmed = vector[:trimmed_vector_expected_len]
# Make a batch of target vectors
if num_dims > 1:
batch_size = 16
vector = np.tile(vector, (batch_size, 1))
target_padded = np.tile(target_padded, (batch_size, 1))
target_trimmed = np.tile(target_trimmed, (batch_size, 1))
vector_padded = spectral_ops.pad_or_trim_to_expected_length(
vector, padded_vector_expected_len, use_tf=use_tf)
vector_trimmmed = spectral_ops.pad_or_trim_to_expected_length(
vector, trimmed_vector_expected_len, use_tf=use_tf)
self.assertAllClose(target_padded, vector_padded)
self.assertAllClose(target_trimmed, vector_trimmmed)
class ComputeFeaturesTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
"""Creates some common default values for the test sinusoid."""
super().setUp()
self.amp = 0.75
self.frequency = 440.0
self.frame_rate = 250
self.frame_size = 512
def expected_f0_length(self, audio, padding):
n_t = audio.shape[-1]
frame_size = spectral_ops.CREPE_FRAME_SIZE
hop_size = int(16000 // self.frame_rate)
expected_len, _ = spectral_ops.get_framed_lengths(
n_t, frame_size, hop_size, padding)
return expected_len
def expected_db_length(self, audio, sr, padding):
n_t = audio.shape[-1]
hop_size = int(sr // self.frame_rate)
expected_len, _ = spectral_ops.get_framed_lengths(
n_t, self.frame_size, hop_size, padding)
return expected_len
@parameterized.named_parameters(
('same_.21secs', 'same', .21),
('same_.4secs', 'same', .4),
('center_.21secs', 'center', .21),
('center_.4secs', 'center', .4),
('valid_.21secs', 'valid', .21),
('valid_.4secs', 'valid', .4),
)
def test_compute_f0(self, padding, audio_len_sec):
"""Ensure that compute_f0 (crepe) has expected output shape."""
sr = 16000
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sr, audio_len_sec)
expected_len = self.expected_f0_length(audio_sin, padding)
f0_hz, f0_confidence = spectral_ops.compute_f0(
audio_sin, self.frame_rate, viterbi=True, padding=padding)
self.assertLen(f0_hz, expected_len)
self.assertLen(f0_confidence, expected_len)
self.assertTrue(np.all(np.isfinite(f0_hz)))
self.assertTrue(np.all(np.isfinite(f0_confidence)))
def test_batch_compute_db(self):
"""Ensure that compute_(loudness/power) can work on a batch."""
batch_size = 2
sample_rate = 16000
audio_len_sec = 0.21
padding = 'same'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
audio_batch = tf.tile(audio_sin[None, :], [batch_size, 1])
loudness = spectral_ops.compute_loudness(
audio_batch, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
power = spectral_ops.compute_power(
audio_batch, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness.shape, 2)
self.assertLen(power.shape, 2)
self.assertEqual(batch_size, loudness.shape[0])
self.assertEqual(batch_size, power.shape[0])
self.assertEqual(expected_len, loudness.shape[1])
self.assertEqual(expected_len, power.shape[1])
def test_compute_loudness_tf_np(self):
"""Ensure that compute_loudness is the same output for np and tf."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
loudness_tf = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size, use_tf=True)
loudness_np = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size, use_tf=False)
# Allow tolerance within 1dB
self.assertAllClose(loudness_tf.numpy(), loudness_np, atol=1, rtol=1)
@parameterized.named_parameters(
('16k_.21secs', 16000, .21),
('24k_.21secs', 24000, .21),
('44.1k_.21secs', 44100, .21),
('16k_.4secs', 16000, .4),
('24k_.4secs', 24000, .4),
('44.1k_.4secs', 44100, .4),
)
def test_compute_loudness(self, sample_rate, audio_len_sec):
"""Ensure that compute_loudness has expected output shape."""
padding = 'center'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
loudness = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness, expected_len)
self.assertTrue(np.all(np.isfinite(loudness)))
@parameterized.named_parameters(
('same', 'same'),
('valid', 'valid'),
('center', 'center'),
)
def test_compute_loudness_padding(self, padding):
"""Ensure that compute_loudness works with different paddings."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
loudness = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness, expected_len)
self.assertTrue(np.all(np.isfinite(loudness)))
@parameterized.named_parameters(
('16k_.21secs', 16000, .21),
('24k_.21secs', 24000, .21),
('44.1k_.21secs', 44100, .21),
('16k_.4secs', 16000, .4),
('24k_.4secs', 24000, .4),
('44.1k_.4secs', 44100, .4),
)
def test_compute_rms_energy(self, sample_rate, audio_len_sec):
"""Ensure that compute_rms_energy has expected output shape."""
padding = 'center'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
rms_energy = spectral_ops.compute_rms_energy(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(rms_energy, expected_len)
self.assertTrue(np.all(np.isfinite(rms_energy)))
@parameterized.named_parameters(
('same', 'same'),
('valid', 'valid'),
('center', 'center'),
)
def test_compute_power_padding(self, padding):
"""Ensure that compute_power (-> +rms) work with different paddings."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
power = spectral_ops.compute_power(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(power, expected_len)
self.assertTrue(np.all(np.isfinite(power)))
class PadTest(parameterized.TestCase, tf.test.TestCase):
def test_pad_end_stft_is_consistent(self):
"""Ensure that spectral_ops.pad('same') is same as stft(pad_end=True)."""
frame_size = 200
hop_size = 180
audio = tf.random.normal([1, 1000])
padded_audio = spectral_ops.pad(audio, frame_size, hop_size, 'same')
s_pad_end = tf.signal.stft(audio, frame_size, hop_size, pad_end=True)
s_same = tf.signal.stft(padded_audio, frame_size, hop_size, pad_end=False)
self.assertAllClose(np.abs(s_pad_end), np.abs(s_same), rtol=1e-3, atol=1e-3)
@parameterized.named_parameters(
('valid_odd', 'valid', 180),
('same_odd', 'same', 180),
('center_odd', 'center', 180),
('valid_even', 'valid', 200),
('same_even', 'same', 200),
('center_even', 'center', 200),
)
def test_padding_shapes_are_correct(self, padding, hop_size):
"""Ensure that pad() and get_framed_lengths() have correct shapes."""
frame_size = 200
n_t = 1000
audio = tf.random.normal([1, n_t])
padded_audio = spectral_ops.pad(audio, frame_size, hop_size, padding)
n_t_pad = padded_audio.shape[1]
frames = tf.signal.frame(padded_audio, frame_size, hop_size)
n_frames = frames.shape[1]
exp_n_frames, exp_n_t_pad = spectral_ops.get_framed_lengths(
n_t, frame_size, hop_size, padding)
self.assertEqual(n_frames, exp_n_frames)
self.assertEqual(n_t_pad, exp_n_t_pad)
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "ecaee9cd77beab7cf1ec429babfe997dfb5fbbaf", "size": 11158, "ext": "py", "lang": "Python", "max_stars_repo_path": "ddsp/spectral_ops_test.py", "max_stars_repo_name": "vvolhejn/ddsp", "max_stars_repo_head_hexsha": "f99c192473c84bbf5d083e8630bf105520ad6ad0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ddsp/spectral_ops_test.py", "max_issues_repo_name": "vvolhejn/ddsp", "max_issues_repo_head_hexsha": "f99c192473c84bbf5d083e8630bf105520ad6ad0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ddsp/spectral_ops_test.py", "max_forks_repo_name": "vvolhejn/ddsp", "max_forks_repo_head_hexsha": "f99c192473c84bbf5d083e8630bf105520ad6ad0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9523809524, "max_line_length": 80, "alphanum_fraction": 0.689818964, "include": true, "reason": "import numpy", "num_tokens": 2943}
|
\section{Developed Method}
\label{sec:HomographyDevelopedMethod}
Our work aimed to devise a systematic approach to select the ``best'' homography according to the proposed score function. The assumption was that there was no prior knowledge about the quality of individual markers.
Here is the description of the proposed method. Each homography is induced by a single independent marker. The input to our method is multiple sets (\ietext{}, groups) of point correspondences between the warped and the ground-truth (ideal) markers. Therefore, each marker is represented by a unique set of keypoints. The use case of our method is to rank multiple homographies and select the best performing one with respect to the tailor-made score function. Consequently, we require a homography matrix for each marker (a set of point correspondences) on the input. The great advantage comes from the fact that to compute these matrices, any state-of-the-art method can be utilized as a black box. The benefit is that it is capable of ranking the referred homographies without the knowledge of absolute or relative positions of markers in the world (\figtext{}~\ref{fig:GraphicalAbstract}). However, we have to emphasize that we did not propose any method to simultaneously estimate multiple homographies. We only build upon the existing homography matrices.
Due to our assumption of not knowing the arrangement of markers in the scene, there is no way to create one virtual, compound marker that contains all the keypoints. If we could, then we would employ RANSAC~\cite{fischler1981ransac} or any other sophisticated algorithm to select the best subset of keypoints to estimate the homography. In that scenario, our approach would be useless. We only have information about the relative position of the marker’s keypoints at our disposal, not the markers themselves. As a result, the point correspondence is globally indeterminate. We can only establish a local point correspondence between a single marker and its corresponding ground-truth shape. For the best performance, to obtain the isolated homographies, we suggest the user chooses the most robust method available.
The homography estimation between existing point correspondences is a standard problem we heavily rely on. As already highlighted, we did not contribute to this problem in terms of improving the homography estimation itself. We only provided a way to rank the resulting homographies. We developed a way to, under certain circumstances, choose the ``best'' homography from multiple existing ones. Therefore, our method could not even be compared to RANSAC, because we tackle a different problem. There are three following assumptions the proposed method is based upon:
\begin{enumerate}
\item The markers are geometrically similar, which means that they are allowed to differ only in translation, rotation, and uniform scale in the real world.
\item The shape of at least one of the used markers is known beforehand.
\item These markers are positioned on the same planar surface visible in the scene.
\end{enumerate}
One important caveat is that our method handles only transformation from a distorted to the undistorted view of the target plane.
We exploited the properties of homography and similarity transformations and expressed them in a single score function, which stands at the core of our contribution. The score function value is exploited as a proxy for homography ranking according to their reprojection error over the entire image using only markers' keypoints. It is only an estimate. The usual use case would be to select the homography with the lowest score, \ietext{}, the highest-ranked matrix, to perform the image rectification with the expectation of obtaining the most accurate reprojection.
% ------------------------------------------------------------------------------
\begin{figure}[t]
\centerline{\includegraphics[width=\linewidth]{figures/homography/graphical_abstract.pdf}}
\caption[Graphical abstract for homography ranking]{The graphical abstract from our paper. The basic idea is that existing approaches may only estimate an isolated homography for each marker and cannot determine which homography achieves the best reprojection over the entire image. Therefore, we proposed a method to rank isolated homographies obtained from multiple distinct markers to select the best homography. This method extends existing approaches, provided that the point correspondences are available and the markers differ only by similarity transformation after rectification.}
\label{fig:GraphicalAbstract}
\end{figure}
% ------------------------------------------------------------------------------
% ------------------------------------------------------------------------------
\begin{figure}[t]
\centerline{\includegraphics[width=\linewidth]{figures/homography/system_diagram.pdf}}
\caption[Homography ranking system diagram]{A system diagram of our method. \imgpartdesc{a} The input consists of a many-to-one point correspondence specified by multiple similar markers together with the information about the ground-truth shape (up to an arbitrary positive scale) of the target marker. \imgpartdesc{b} The assumption is that the isolated homographies related to each marker are ready on the input as well. \imgpartdesc{c} The algorithm processes each marker by applying its corresponding homography matrix to the image to produce a rectified image. Subsequently, it computes optimal similarity matrices using auxiliary markers. These transformations are required for the computation of the score function. The obtained score values then serve for comparison when ranking the homographies. The homography that ends up ranked first is considered (predicted) to the ``best'' candidate for achieving the minimal reprojection error over the whole image.}
\label{fig:HomographySystemDiagram}
\end{figure}
% ------------------------------------------------------------------------------
Our method utilizes multiple similar markers (\figtext{}~\ref{fig:HomographySystemDiagram}). The input is point correspondences and homographies estimated for each marker. Each marker becomes the reference marker only once during the course of the algorithm. All the remaining markers serve as auxiliary markers. The reference marker's homography is used to perform the perspective transformation to rectify all the visible markers. To rank which reference markers' homography yields the best reprojection, we exploit auxiliary markers. Auxiliary markers are subsequently mapped onto the target marker using similarity transformations (\eqtext{}~\ref{eq:SimilarityMatrices}). The transformed keypoints are converted to homogeneous coordinates and the reprojection error is measured as the mean Euclidean distance between the rectified and the target keypoints~(\eqtext{}~\ref{eq:HomographyScoreFunction}). The objective is to minimize the computed quantity.
Let $r$ be the index of the reference marker. The $3 \times 3$ matrices describing similarity transformations are contained in a set $\mset{S} = \cbrackets{\suprbrackets{\mtx{S}}{i} \ |\ i = 1, \dots, m}$, such that
\begin{equation}
\label{eq:SimilarityMatrices}
\suprbrackets{\mtx{S}}{i} =
\begin{cases}
\begin{aligned}
& \begin{bmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{bmatrix} & \text{if } i = r \\
& \begin{bmatrix}
\subsuprbrackets{\mtx{R}}{2 \times 2}{i} & \subsuprbrackets{\mtx{T}}{2 \times 1}{i} \\
\mathbf{0}_{1 \times 2} & 1
\end{bmatrix} & \text{if }i \neq r \\
\end{aligned}
\end{cases},
\end{equation}
for $i = 1, \dots, m$, where
\begin{equation}
\subsuprbrackets{\mtx{R}}{2 \times 2}{i} =
\begin{bmatrix}
\suprbrackets{s}{i} \cdot \func{\cos}{\suprbrackets{\theta}{i}} & -\suprbrackets{s}{i} \cdot \func{\sin}{\suprbrackets{\theta}{i}} \\
\suprbrackets{s}{i} \cdot \func{\sin}{\suprbrackets{\theta}{i}} & \suprbrackets{s}{i} \cdot \func{\cos}{\suprbrackets{\theta}{i}}
\end{bmatrix}, \quad
\subsuprbrackets{\mtx{T}}{2 \times 1}{i} =
\begin{bmatrix}
\subsuprbrackets{t}{x}{i} \\
\subsuprbrackets{t}{y}{i}
\end{bmatrix}.
\end{equation}
This transformation (besides the identity) involves $4$ \gls{dof}: a single rotation angle $\suprbrackets{\theta}{i}$, two $x$ and $y$ translation coefficients $\subsuprbrackets{t}{x}{i}$, $\subsuprbrackets{t}{y}{i}$, and a scale coefficient $\suprbrackets{s}{i}$. A full affine transformation ($6$ \gls{dof}) would incorporate horizontal and vertical
scales, shear and rotation, and $x$, $y$ offsets~\cite{barath2016novel}. The application of homography that rectifies an image generates a frontal plane that is related to the ground-truth plane by a similarity transformation~\cite{hartley2003multiple, beck2016planar}. Thus, we do not include the shear and we only support uniform scaling. The mathematical justification can be found in the appendix section of our paper~\cite{ondrasovic2021homography}.
As all the markers share the same planar surface, a valid homography corresponding to any of them by definition to provide a valid perspective projection. However, all perspective projections are subjected to different noise. The endeavor then is to quantify which homography estimation could provide the best perspective projection for the whole plane in the image. To do so, we propose a score function based on the aforementioned constraints. The score function computes a score for individual homographies in along with the estimated similarity matrices corresponding to auxiliary markers as
\begin{equation}
\label{eq:HomographyScoreFunction}
\func{\scoref}{\H, \mset{S}} =
\frac{1}{m}
\sum_{i = 1}^{m}
\frobnorm{
\func{h}{
\suprbrackets{\mtx{S}}{i}
\H
\suprbrackets{\mtx{W}}{i}
}
-
\mtx{T}
},
\end{equation}
where $\frobnorm{\cdot}$ denotes the Frobenius norm. The function $\func{h}{\cdot}$ converts points to homogeneous coordinates as
\begin{equation}
\label{eq:HomoCoordsConversion}
\func{h}{
\begin{bmatrix}
x_1 & x_2 & \dots & x_k \\
y_1 & y_2 & \dots & y_k \\
z_1 & z_2 & \dots & z_k
\end{bmatrix}
} =
\begin{bmatrix}
\nicefrac{x_1}{z_1} & \nicefrac{x_2}{z_2} & \dots & \nicefrac{x_k}{z_k} \\
\nicefrac{y_1}{z_1} & \nicefrac{y_2}{z_2} & \dots & \nicefrac{y_k}{z_k} \\
1 & 1 & \dots & 1
\end{bmatrix}.
\end{equation}
In what follows, we describe the proposed Algorithm~\ref{alg:HomographyRanking} for homography ranking. Assume a set of warped markers described by the warped keypoints and a single target marker represented by the target keypoints. There is a many-to-one point correspondence linking these objects. Besides, assume that homographies have been estimated for each marker in isolation. Our algorithm ranks the input set of all pairs $\rbrackets{\suprbrackets{\mtx{W}}{i}, \mtx{T}}$, $i = 1, \dots, m$ in ascending order by how well each $i$-th marker preserves the target shape of all the markers in the image after removing the perspective distortion. The score function defined in \eqtext{}~\ref{eq:HomographyScoreFunction} is used to measure this objective. The algorithm evaluates all markers as candidates for the reference marker. In each iteration, it computes optimal similarity matrices belonging to the auxiliary markers in the rectified plane, \ietext{}, after applying the perspective projection induced by the current homography. The aim is to find a homography with a minimal score. The algorithmic complexity is quadratic in the number of markers, thus $\func{\Theta}{m \rbrackets{m - 1} + m \func{\text{log}_2}{m}} \simeq \func{\Theta}{m^2}$. It is important to remark that the two functions used to compute the homography and similarity matrices in the pseudocode may stand for arbitrary methods that produce the required transformations.
\def\hmatrices{\boldsymbol{\bar{H}}}
\def\scoref{\mathcal{F}}
\begin{algorithm}[t]
\caption[Homography ranking algorithm]{Homography ranking algorithm.}
\label{alg:HomographyRanking}
\begin{algorithmic}[1]
\State $\hmatrices \gets \arraydef \left[ m \right]$
\Comment{empty array for the homography matrices}
\State $\scores \gets \arraydef \left[ m \right]$
\Comment{array of scores computed before the ranking (sorting)}
\For{$i \gets 1, \dots , m$}
\Comment{for each reference marker}
\State $\hmatrices \left[ i \right] \gets$
\Call{homography}{$\suprbrackets{\mtx{W}}{i}$, $\mtx{T}$}
\Comment{retrieve or estimate perspective transform.}
\State $\suprbrackets{\mtx{\bar{S}}}{i} \gets \mtx{I}_{3 \times 3}$
\Comment{identity matrix to stand for a similarity transformation}
\State $\mset{\bar{S}} \gets \cbrackets{\suprbrackets{\mtx{\bar{S}}}{i}}$
\Comment{set of similarity matrices}
\ForAll{$j$ : $\cbrackets{1, \dots, m} - \cbrackets{i}$}
\Comment{for each auxiliary marker}
\State $\suprbrackets{\mtx{\bar{S}}}{j} \gets$ \Call{similarity}{$\hmatrices \left[ i \right] \cdot \suprbrackets{\mtx{W}}{j}$, $\mtx{T }$}
\Comment{estimate similarity transformation}
\State$\mset{\bar{S}} \gets \mset{\bar{S}} \cup \suprbrackets{\mtx{\bar{S}}}{j}$
\Comment{store the similarity matrix}
\EndFor
\State $\scores \left[ i \right] \gets \func{\scoref}{\hmatrices \left[ i \right], \mset{\bar{S}}}$
\Comment{evaluate score function (\eqtext{}~\ref{eq:HomographyScoreFunction})}
\EndFor
\State $\sortres \gets \Call{argsort}{\scores}$
\Comment{indirect sort, only obtain indices of ``would-be'' sorted elements}
\State \Return $\hmatrices, \sortres$
\Comment{return homographies and their respective ranking positions}
\end{algorithmic}
\end{algorithm}
|
{"hexsha": "a995ec5bd85680e5de0e3b659e7d0bab88b57a0d", "size": 14322, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/chapters/homography/sections/methodology.tex", "max_stars_repo_name": "mondrasovic/phd_thesis", "max_stars_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tex/chapters/homography/sections/methodology.tex", "max_issues_repo_name": "mondrasovic/phd_thesis", "max_issues_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/chapters/homography/sections/methodology.tex", "max_forks_repo_name": "mondrasovic/phd_thesis", "max_forks_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 93.0, "max_line_length": 1454, "alphanum_fraction": 0.7110040497, "num_tokens": 3433}
|
# -*- coding: utf-8 -*-
# Copyright 2017 Kakao, Recommendation Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import fire
import h5py
import numpy as np
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import sequence
from attention import Attention
from keras_self_attention import SeqSelfAttention
import cPickle
from itertools import izip
from misc import get_logger, Option
from network import MultiTaskAttnWord2vec, \
fmeasure, precision, recall, masked_loss_function_d, masked_loss_function_s
from sklearn.externals import joblib
opt = Option('./config.json')
cate1 = json.loads(open('../cate1.json').read())
DEV_DATA_LIST = opt.dev_data_list
TRAIN_DATA_LIST = ['./data/train/data.h5py']
char_tfidf_dict = joblib.load(opt.char_indexer)
char_tfidf_size = len(char_tfidf_dict)
word_tfidf_dict = joblib.load(opt.word_indexer)
word_tfidf_size = len(word_tfidf_dict)
class Classifier():
def __init__(self):
self.logger = get_logger('Classifier')
self.num_classes = 0
self.word_sampling_table = sequence.make_sampling_table(opt.word_voca_size + 2)
self.char_sampling_table = sequence.make_sampling_table(opt.char_voca_size + 2)
def get_sample_generator(self, ds, batch_size):
left, limit = 0, ds['wuni'].shape[0]
while True:
right = min(left + batch_size, limit)
X = [ds[t][left:right, :] for t in ['cuni', 'wuni', 'img']]
Y = [ds[hirachi+'cate'][left:right] for hirachi in ['b', 'm', 's', 'd']]
yield X, Y
left = right
if right == limit:
left = 0
def get_inverted_cate1(self, cate1):
inv_cate1 = {}
for d in ['b', 'm', 's', 'd']:
inv_cate1[d] = {v: k for k, v in cate1[d].iteritems()}
return inv_cate1
def write_prediction_result(self, data, pred_y, meta, out_path, readable, istrain='train'):
pid_order = []
if istrain == 'train':
dev_data_list = TRAIN_DATA_LIST
div = 'dev'
elif istrain == 'dev':
dev_data_list = DEV_DATA_LIST
div = 'dev'
elif istrain == 'test':
dev_data_list = opt.test_data_list
div = 'test'
else:
self.logger.info('data type only include train, dev, test')
raise Exception
for data_path in dev_data_list:
h = h5py.File(data_path, 'r')[div]
pid_order.extend(h['pid'][::])
y2l_b = {i: s for s, i in meta['y_vocab'][0].iteritems()}
y2l_b = map(lambda x: x[1], sorted(y2l_b.items(), key=lambda x: x[0]))
y2l_m = {i: s for s, i in meta['y_vocab'][1].iteritems()}
y2l_m = map(lambda x: x[1], sorted(y2l_m.items(), key=lambda x: x[0]))
y2l_s = {i: s for s, i in meta['y_vocab'][2].iteritems()}
y2l_s = map(lambda x: x[1], sorted(y2l_s.items(), key=lambda x: x[0]))
y2l_d = {i: s for s, i in meta['y_vocab'][3].iteritems()}
y2l_d = map(lambda x: x[1], sorted(y2l_d.items(), key=lambda x: x[0]))
pred_b = pred_y[0]
pred_m = pred_y[1]
pred_s = pred_y[2]
pred_d = pred_y[3]
inv_cate1 = self.get_inverted_cate1(cate1)
rets = {}
for pid, p_b, p_m, p_s, p_d in izip(data['pid'], pred_b, pred_m, pred_s, pred_d):
y_b = np.argmax(p_b)
y_m = np.argmax(p_m)
y_s = np.argmax(p_s)
y_d = np.argmax(p_d)
label_b = y2l_b[y_b]
label_m = y2l_m[y_m]
label_s = y2l_s[y_s]
label_d = y2l_d[y_d]
b = label_b.split('>')[0]
m = label_m.split('>')[1]
s = label_s.split('>')[2]
d = label_d.split('>')[3]
# assert b in inv_cate1['b']
# assert m in inv_cate1['m']
# assert s in inv_cate1['s']
# assert d in inv_cate1['d']
tpl = '{pid}\t{b}\t{m}\t{s}\t{d}'
if readable:
b = inv_cate1['b'][b]
m = inv_cate1['m'][m]
s = inv_cate1['s'][s]
d = inv_cate1['d'][d]
rets[pid] = tpl.format(pid=pid, b=b, m=m, s=s, d=d)
no_answer = '{pid}\t-1\t-1\t-1\t-1'
with open(out_path, 'w') as fout:
for pid in pid_order:
ans = rets.get(pid, no_answer.format(pid=pid))
print >> fout, ans
def predict(self, data_root, model_root, test_root, test_div, out_path, readable=False):
meta_path = os.path.join(data_root, 'meta')
meta = cPickle.loads(open(meta_path).read())
model_fname = os.path.join(model_root, 'model.h5')
self.logger.info('# of classes(train): %s' % len(meta['y_vocab']))
model = load_model(model_fname,
custom_objects={
'Attention':Attention,
'SeqSelfAttention':SeqSelfAttention,
'fmeasure':fmeasure,
'precision':precision,
'recall':recall,
'masked_loss_function_d':masked_loss_function_d,
'masked_loss_function_s':masked_loss_function_s})
data_type = test_root.split('/')[-2]
self.logger.info('test_root: %s data_type: %s' % (test_root, data_type))
test_path = os.path.join(test_root, 'data.h5py')
test_data = h5py.File(test_path, 'r')
test = test_data[test_div]
test_gen = self.get_sample_generator(test, opt.batch_size)
total_test_samples = test['wuni'].shape[0]
steps = int(np.ceil(total_test_samples / float(opt.batch_size)))
pred_y = model.predict_generator(test_gen,
steps=steps,
workers=opt.num_predict_workers,
verbose=1,)
self.write_prediction_result(test, pred_y, meta, out_path, readable=readable, istrain=data_type)
def train(self, data_root, out_dir, pretrain, trainall, resume=False):
data_path = os.path.join(data_root, 'data.h5py')
meta_path = os.path.join(data_root, 'meta')
data = h5py.File(data_path, 'r')
meta = cPickle.loads(open(meta_path).read())
self.weight_fname = os.path.join(out_dir, 'weights')
self.model_fname = os.path.join(out_dir, 'model')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
self.logger.info('# of classes: %s' % len(meta['y_vocab']))
self.num_classes = meta['y_vocab']
train = data['train']
dev = data['dev']
self.logger.info('# of train samples: %s' % train['bcate'].shape[0])
self.logger.info('# of dev samples: %s' % dev['bcate'].shape[0])
checkpoint = ModelCheckpoint(self.weight_fname, monitor='val_loss',
save_best_only=True, mode='min', period=1)
classification_model = None
if not resume:
textonly = MultiTaskAttnWord2vec(pretrain=pretrain)
classification_model = textonly.get_classification_model(self.num_classes, mode='sum')
else:
model_fname = os.path.join(out_dir, 'model.h5')
classification_model = load_model(model_fname, custom_objects={
'Attention':Attention,
'SeqSelfAttention':SeqSelfAttention,
'fmeasure':fmeasure,
'precision':precision,
'recall':recall,
'masked_loss_function_d':masked_loss_function_d,
'masked_loss_function_s':masked_loss_function_s})
total_train_samples = train['wuni'].shape[0]
train_gen = self.get_sample_generator(train, batch_size=opt.batch_size)
self.steps_per_epoch = int(np.ceil(total_train_samples / float(opt.batch_size)))
total_dev_samples = dev['wuni'].shape[0]
if total_dev_samples != 0 and trainall is False:
dev_gen = self.get_sample_generator(dev, batch_size=opt.batch_size)
self.validation_steps = int(np.ceil(total_dev_samples / float(opt.batch_size)))
classification_model.fit_generator(generator=train_gen,
steps_per_epoch=self.steps_per_epoch,
epochs=opt.num_epochs,
validation_data=dev_gen,
validation_steps=self.validation_steps,
shuffle=True,
callbacks=[checkpoint])
classification_model.load_weights(self.weight_fname) # loads from checkout point if exists
elif total_dev_samples == 0 and trainall is True:
classification_model.fit_generator(generator=train_gen,
steps_per_epoch=self.steps_per_epoch,
epochs=opt.num_epochs,
shuffle=True)
elif total_dev_samples != 0 and trainall is True:
dev_gen = self.get_sample_generator(dev, batch_size=opt.batch_size)
self.validation_steps = int(np.ceil(total_dev_samples / float(opt.batch_size)))
for epoch in range(opt.num_epochs):
self.logger.info('epoch: %d' % epoch)
classification_model.fit_generator(generator=train_gen,
steps_per_epoch=self.steps_per_epoch,
epochs=1,
shuffle=True)
classification_model.fit_generator(generator=dev_gen,
steps_per_epoch=self.validation_steps,
epochs=1,
shuffle=True)
open(self.model_fname + '.json', 'w').write(classification_model.to_json())
classification_model.save(self.model_fname + '.h5')
if __name__ == '__main__':
clsf = Classifier()
fire.Fire({'train': clsf.train,
'predict': clsf.predict})
|
{"hexsha": "a368e1a317718b189d568011dff84f65a7e2d23a", "size": 11336, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier.py", "max_stars_repo_name": "junwoopark92/kakao_shopping_classification", "max_stars_repo_head_hexsha": "3d2669a8c946a1d810da7c0fd896ad42e0361fbe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-15T13:58:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T15:43:08.000Z", "max_issues_repo_path": "classifier.py", "max_issues_repo_name": "junwoopark92/kakao_shopping_classification", "max_issues_repo_head_hexsha": "3d2669a8c946a1d810da7c0fd896ad42e0361fbe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier.py", "max_forks_repo_name": "junwoopark92/kakao_shopping_classification", "max_forks_repo_head_hexsha": "3d2669a8c946a1d810da7c0fd896ad42e0361fbe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7773584906, "max_line_length": 109, "alphanum_fraction": 0.5460479887, "include": true, "reason": "import numpy", "num_tokens": 2470}
|
import numpy as np
from overrides import overrides
import torch
from datasets.dataset_base import DatasetBase
from services.arguments.ocr_quality_arguments_service import OCRQualityArgumentsService
from services.process.evaluation_process_service import EvaluationProcessService
from services.log_service import LogService
class EvaluationDataset(DatasetBase):
def __init__(
self,
arguments_service: OCRQualityArgumentsService,
process_service: EvaluationProcessService,
log_service: LogService):
self._arguments_service = arguments_service
self._process_service = process_service
self._log_service = log_service
self._target_tokens = self._process_service.get_target_tokens()
self._log_service.log_debug(f'Loaded {len(self._target_tokens)} target tokens in evaluation dataset')
def __len__(self):
return len(self._target_tokens)
def __getitem__(self, idx):
target_token = self._target_tokens[idx]
return target_token
|
{"hexsha": "7cf418f1b9e9eead55798b30aecec42a75f6393c", "size": 1030, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/evaluation_dataset.py", "max_stars_repo_name": "ktodorov/historical-ocr", "max_stars_repo_head_hexsha": "d4d7bf0addf5ff98b7182c00ff716e79c97e050e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/evaluation_dataset.py", "max_issues_repo_name": "ktodorov/historical-ocr", "max_issues_repo_head_hexsha": "d4d7bf0addf5ff98b7182c00ff716e79c97e050e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/evaluation_dataset.py", "max_forks_repo_name": "ktodorov/historical-ocr", "max_forks_repo_head_hexsha": "d4d7bf0addf5ff98b7182c00ff716e79c97e050e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7857142857, "max_line_length": 109, "alphanum_fraction": 0.7718446602, "include": true, "reason": "import numpy", "num_tokens": 196}
|
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi
class Vector(object):
def __init__(self,x,y,z):
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
def duplicate(self):
return Vector(self.x,self.y,self.z)
def size(self):
return self.x.size
def rotation_x(self,alpha):
x=self.x.copy()
y=self.y.copy()
z=self.z.copy()
self.x = x
self.y = y*np.cos(alpha)-z*np.sin(alpha)
self.z = y*np.sin(alpha)+z*np.cos(alpha)
def rotation_y(self,beta):
x=self.x.copy()
y=self.y.copy()
z=self.z.copy()
self.x = x*np.cos(beta)+z*np.sin(beta)
self.y = y
self.z = -x*np.sin(beta)+z*np.cos(beta)
def rotation_z(self,gamma):
x=self.x.copy()
y=self.y.copy()
z=self.z.copy()
self.x = x*np.cos(gamma)-y*np.sin(gamma)
self.y = x*np.sin(gamma)+y*np.cos(gamma)
self.z = z
def rotation(self,angle,axis="x"):
"""
rotate a vector an angle alpha
:param alpha: rotation angle in degrees (counterclockwise)
:param axis: "x", "y" or "z"
:return:
"""
if axis == "x":
self.rotation_x(angle)
elif axis=="y":
self.rotation_y(angle)
elif axis=="z":
self.rotation_z(angle)
def surface_conic_normal(self,ccc):
x=2*ccc[1-1]*self.x+ccc[4-1]*self.y+ccc[6-1]*self.z+ccc[7-1]
y=2*ccc[2-1]*self.y+ccc[4-1]*self.x+ccc[5-1]*self.z+ccc[8-1]
z=2*ccc[3-1]*self.z+ccc[5-1]*self.y+ccc[6-1]*self.x+ccc[9-1]
return Vector(x,y,z)
def modulus(self):
return np.sqrt(self.x**2+self.y**2+self.z**2)
def normalization(self):
mod = self.modulus()
self.x = self.x / mod
self.y = self.y / mod
self.z = self.z / mod
def dot(self,v2):
return np.array(self.x*v2.x+self.y*v2.y+self.z*v2.z)
def perpendicular_component(self,normal):
a=-self.dot(normal)
return Vector(
normal.x*a,
normal.y*a,
normal.z*a)
def sum(self,v2):
return Vector( self.x+v2.x,
self.y+v2.y,
self.z+v2.z)
def rodrigues_formula(self,axis1,theta):
axis = axis1.duplicate()
axis.normalization()
vrot=Vector(self.x,self.y,self.z)
vrot.x=self.x*np.cos(theta)+( axis.y*self.z-axis.z*self.y)*np.sin(theta)+(1-np.cos(theta))*axis.x**2*self.x
vrot.y=self.y*np.cos(theta)+(-axis.x*self.z+axis.z*self.x)*np.sin(theta)+(1-np.cos(theta))*axis.y**2*self.y
vrot.z=self.z*np.cos(theta)+( axis.x*self.y-axis.y*self.x)*np.sin(theta)+(1-np.cos(theta))*axis.z**2*self.z
return vrot
def info(self):
if self.size() == 1:
return "x: %f, y: %f, z: %f\n"%(self.x,self.y,self.z)
else:
txt = ""
for i in range(self.size()):
txt += "x: %f, y: %f, z: %f\n"%(self.x[i],self.y[i],self.z[i])
return txt
|
{"hexsha": "43cfe4dc7712e7c72ed7805c92a56851746f4cc7", "size": 3115, "ext": "py", "lang": "Python", "max_stars_repo_path": "Vector.py", "max_stars_repo_name": "Yiones/raytests", "max_stars_repo_head_hexsha": "d88cc28f4775e00edaf206fb08944aae0f9a6bc7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-17T15:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T15:14:56.000Z", "max_issues_repo_path": "Vector.py", "max_issues_repo_name": "Yiones/raytests", "max_issues_repo_head_hexsha": "d88cc28f4775e00edaf206fb08944aae0f9a6bc7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Vector.py", "max_forks_repo_name": "Yiones/raytests", "max_forks_repo_head_hexsha": "d88cc28f4775e00edaf206fb08944aae0f9a6bc7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9583333333, "max_line_length": 115, "alphanum_fraction": 0.52070626, "include": true, "reason": "import numpy,from numpy", "num_tokens": 886}
|
[STATEMENT]
lemma powser_split_head:
fixes f :: "nat \<Rightarrow> 'a::{real_normed_div_algebra,banach}"
assumes "summable (\<lambda>n. f n * z ^ n)"
shows "suminf (\<lambda>n. f n * z ^ n) = f 0 + suminf (\<lambda>n. f (Suc n) * z ^ n) * z"
and "suminf (\<lambda>n. f (Suc n) * z ^ n) * z = suminf (\<lambda>n. f n * z ^ n) - f 0"
and "summable (\<lambda>n. f (Suc n) * z ^ n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z &&& (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0 &&& summable (\<lambda>n. f (Suc n) * z ^ n)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
2. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
3. summable (\<lambda>n. f (Suc n) * z ^ n)
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
summable (\<lambda>n. f n * z ^ n)
[PROOF STEP]
show "summable (\<lambda>n. f (Suc n) * z ^ n)"
[PROOF STATE]
proof (prove)
using this:
summable (\<lambda>n. f n * z ^ n)
goal (1 subgoal):
1. summable (\<lambda>n. f (Suc n) * z ^ n)
[PROOF STEP]
by (subst summable_powser_split_head)
[PROOF STATE]
proof (state)
this:
summable (\<lambda>n. f (Suc n) * z ^ n)
goal (2 subgoals):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
2. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
from suminf_mult2[OF this, of z]
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ n * z)
[PROOF STEP]
have "(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ Suc n)"
[PROOF STATE]
proof (prove)
using this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ n * z)
goal (1 subgoal):
1. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ Suc n)
[PROOF STEP]
by (simp add: power_commutes algebra_simps)
[PROOF STATE]
proof (state)
this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ Suc n)
goal (2 subgoals):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
2. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f (Suc n) * z ^ Suc n)
goal (2 subgoals):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
2. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
summable (\<lambda>n. f n * z ^ n)
[PROOF STEP]
have "\<dots> = suminf (\<lambda>n. f n * z ^ n) - f 0"
[PROOF STATE]
proof (prove)
using this:
summable (\<lambda>n. f n * z ^ n)
goal (1 subgoal):
1. (\<Sum>n. f (Suc n) * z ^ Suc n) = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
by (subst suminf_split_head) simp_all
[PROOF STATE]
proof (state)
this:
(\<Sum>n. f (Suc n) * z ^ Suc n) = (\<Sum>n. f n * z ^ n) - f 0
goal (2 subgoals):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
2. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
show "suminf (\<lambda>n. f n * z ^ n) = f 0 + suminf (\<lambda>n. f (Suc n) * z ^ n) * z"
[PROOF STATE]
proof (prove)
using this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
goal (1 subgoal):
1. (\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
goal (1 subgoal):
1. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
[PROOF STEP]
show "suminf (\<lambda>n. f (Suc n) * z ^ n) * z = suminf (\<lambda>n. f n * z ^ n) - f 0"
[PROOF STATE]
proof (prove)
using this:
(\<Sum>n. f n * z ^ n) = f 0 + (\<Sum>n. f (Suc n) * z ^ n) * z
goal (1 subgoal):
1. (\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<Sum>n. f (Suc n) * z ^ n) * z = (\<Sum>n. f n * z ^ n) - f 0
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2191, "file": null, "length": 18}
|
module NewtonsMethod
using ForwardDiff
function newtonroot(f, fp; x0, tol=1e-7, maxiter=1000)
abserror = Inf
iter = 1
x = x0
while abserror > tol && iter <= maxiter
x_new = x - f(x)/fp(x)
iter = iter +1
abserror = abs(x_new - x)
x = x_new
end
return x
end
function newtonroot(f; x0, tol = 1e-7, maxiter=1000)
fp = x -> ForwardDiff.derivative(f, x)
newtonroot(f, fp; x0=x0, tol=tol, maxiter=maxiter)
end
export newtonroot
end
|
{"hexsha": "714223a8ed0f07c0cc6004a95cae21a65847afcf", "size": 493, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/NewtonsMethod.jl", "max_stars_repo_name": "ykkan/NewtonsMethod.jl", "max_stars_repo_head_hexsha": "7cde23f25d50a3a5469492d481aad00c701a4849", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/NewtonsMethod.jl", "max_issues_repo_name": "ykkan/NewtonsMethod.jl", "max_issues_repo_head_hexsha": "7cde23f25d50a3a5469492d481aad00c701a4849", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/NewtonsMethod.jl", "max_forks_repo_name": "ykkan/NewtonsMethod.jl", "max_forks_repo_head_hexsha": "7cde23f25d50a3a5469492d481aad00c701a4849", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.72, "max_line_length": 54, "alphanum_fraction": 0.6085192698, "num_tokens": 174}
|
#Python3
##--------------------------------Main file------------------------------------
##
## Copyright (C) 2020 by Belinda Brown Ramírez (belindabrownr04@gmail.com)
## Image recognition system for diagnosis of a network switch
##
##-----------------------------------------------------------------------------
########## IMPORTING PACKAGES ##########
import cv2
import numpy as np
import glob
import os
from collections import OrderedDict
########## DEFINITIONS OF NECESSARY FUNCTIONS ##########
###### FILTERING NOISE / MAKING THE IMAGE SHARP
# def denoising_sharpening(input):
# without_noise= cv2.fastNlMeansDenoisingColored(input, None,15,15,7,15)
# kernel=np.array([[-1,-1,-1,-1,-1],
# [-1,2,2,2,-1],
# [-1,2,8,2,-1],
# [-2,2,2,2,-1],
# [-1,-1,-1,-1,-1]])/8.0
# without_noise = cv2.filter2D(without_noise,-1,kernel)
# return without_noise
#### For list items
def mean_arit_list(list):
n = len(list)
sum = 0
for ind in range (0, n):
sum = sum + list[ind]
return sum/n
def varnc_list(list):
n = len(list)
sum = 0
for ind in range (0, n):
sum = sum + (mean_arit_list(list)-list[ind])**2
return sum/n
def stddesv_list(list):
desvi = varnc_list(list)**(1/2)
return desvi
def color_filter(colorlocation, w_color, h_color, image, color):
####### FOR COLOR #####
###### COMPARING X
color_x = []
y_color_bf = []
###### COMPARING Y
color_y = []
x_color_bf = []
###### THE FILTERED COLOR COORDINATES
x_color_f =[]
y_color_f =[]
### TO JOIN THE TWO COLOR VECTORS
color_flrd_cor = []
###### Number of LEDs in state # XXX
num_leds_color = 0
if len(colorlocation[0]) > 0:
##### for Y on color before filtered
for itercolory in sorted(colorlocation[0]):
if itercolory not in color_y:
color_y.append(itercolory)
#### Compying the vector without repetitions to generate the second to compare
y_color_bf = color_y.copy()
#### Obtaining the first coordinate
y0_color = color_y[0]
#### Deleting the first coordinate
y_color_bf.pop(0)
#### The deleted coordinate is added to the result
y_color_f.append(y0_color)
##### Color before filtering for X - basically vector obtained minus repeated coordinates
for itercolorx in sorted(colorlocation[1]):
if itercolorx not in color_x:
color_x.append(itercolorx)
#### Compying the vector to generate the second
x_color_bf = color_x.copy()
#### Gets the first coordinate obtained from the list of elements without repetitions
x0_color = color_x[0]
### Deleting the first element to be able to subtract with the complete list
x_color_bf.pop(0)
### The deleted coordinate is added to the result
x_color_f.append(x0_color)
#### Applying the same method for filter similar Y COORDENATES
for eee,iii in sorted(zip(color_y,y_color_bf)):
diff_y_color = iii - eee
if h_orange < abs(diff_y_color):
y_color_f.append(iii)
y_color_f = list(OrderedDict.fromkeys(y_color_f))
#### Applying the same method for filter similar X COORDENATES
for eeee,iiii in sorted(zip(color_x,x_color_bf)):
diff_x_color = iiii - eeee
if w_orange < abs(diff_x_color):
x_color_f.append(iiii)
x_color_f = list(OrderedDict.fromkeys(x_color_f))
#### counting the total of coordinates finally filtered
nun_x_color = len(x_color_f)
nun_y_color = len(y_color_f)
how_many_color = 0
while how_many_color < nun_x_color-1 :
how_many_color = how_many_color +1
if nun_y_color != nun_x_color and nun_y_color < nun_x_color:
y_color_f.append(y0_color)
###### Joining the two x, y coordinates
color_flrd_cor = sorted(zip(x_color_f, y_color_f))
for ptcolor in color_flrd_cor:
#### cv2.rectangle(image where draw, place , color BGR, thick line drawing)
cv2.rectangle(image, ptcolor, (ptcolor[0] + w_color, ptcolor[1] + h_color), (0,255,255), 4)
### In this function the color goes BGR, what it does is put the text where it found the led
cv2.putText(image, str(color), ptcolor, cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 255, 255), 4)
##### Count the number of LEDs you found in this state
num_leds_color = num_leds_color +1
print("The number of LEDs in " + color + " status (on / on) found is: ", num_leds_color)
return x_color_f
def port_filter(port_location, w_p, h_p, image, name):
####### FOR PORT #####
port_x = []
port_x_bff = []
# Port compare Y values
port_y = []
port_y_bff = []
# Port coordenates filtered
port_x_f =[]
port_y_f =[]
## Join the coordenates
port_fil = []
# Count ports
count_port = 0
#### IF PORT EXIST ...
if len(port_location[0]) > 0:
##### Ports X coordenates without repeats
for i in sorted(port_location[0]):
if i not in port_x:
port_x.append(i)
#### Before filtered x coordinate
port_x_bff = port_x.copy()
#### Obtaining X first coordinate
x0 = port_x[0]
#### Deleting the first coordinate
port_x_bff.pop(0)
# Append the firt coordinate deleted before to the coordenates filtered
port_x_f.append(x0)
#### Before filtered y coordinate
for j in sorted(port_location[1]):
if j not in port_y:
port_y.append(j)
#### Before filtered y coordinate
port_y_bff = port_y.copy()
#### Obtaining Y first coordinate
y0 = port_y[0]
#### Deleting the first coordinate
port_y_bff.pop(0)
# Append the firt coordinate deleted before to the coordenates filtered
port_y_f.append(y0)
#### To automate the filtering, the dispersion measures are calculated
port_x_mean = mean_arit_list(port_x)
#print("Port mean x coordinates ", port_x_mean)
port_x_var = varnc_list(port_x)
#print("Port variance x coordinates ", port_x_var)
port_x_stdesv = stddesv_list(port_x)
#print("Port standard deviation x coordinates ", port_x_stdesv)
port_y_mean = mean_arit_list(port_y)
port_y_stdesv = stddesv_list(port_y)
#### Applying the same method for filter similar X COORDENATES
for nne,nni in sorted(zip(port_x,port_x_bff)):
sub_port_x = nni - nne
if abs(port_x_stdesv) <abs(sub_port_x):
port_x_f.append(nni)
port_x_f = list(OrderedDict.fromkeys(port_x_f))
#### Applying the same method for filter similar Y COORDENATES
for nnee,nnii in sorted(zip(port_y,port_y_bff)):
Resta_Y_NN = nnii - nnee
if abs(port_y_mean - port_y_stdesv) < abs(Resta_Y_NN):
port_y_f.append(nnii)
port_y_f = list(OrderedDict.fromkeys(port_y_f))
##### Counting the total of coordinates finally filtered
count_port_x = len(port_x_f)
count_port_y = len(port_y_f)
count_p = 0
while count_p < count_port_y-1 :
count_p = count_p +1
if count_port_x != count_port_y and count_port_x < count_port_y:
port_x_f.append(x0)
loc_leds = []
loc_to_add = 0
for i in port_y_f:
loc_to_add = i + w_p -100
loc_leds.append(loc_to_add)
loc_leds = list(OrderedDict.fromkeys(loc_leds))
loc_full_port = []
# Concatenate port positions
loc_full_port = sorted(port_y_f + loc_leds )
# Port_fil will have the coordenates as ordered pairs
port_fil = sorted(zip(port_y_f, port_x_f))
# Drawing ports id on images
for pix_port in port_fil:
#### Rectangle and colors as color BGR
cv2.rectangle(image, pix_port, (pix_port[0] + w_p, pix_port[1] + h_p), (0,255,255), 5)
#### Label on drawing
cv2.putText(image, str(name), pix_port, cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 255, 255), 5)
### Count port template, in each template are two ports. Thats why count_port*2
count_port = count_port + 1
print("The number of ports are: ", count_port*2)
return loc_full_port
###### Read the template
template_green = cv2.imread('',0)
template_orange = cv2.imread('',0)
template_dark_orange = cv2.imread('',0)
template_port = cv2.imread('',0)
###### Store the width (w) and height (h) of the template
w_green, h_green = template_green.shape[::-1]
w_orange, h_orange = template_orange.shape[::-1]
w_dark_orange, h_dark_orange = template_dark_orange.shape[::-1]
w_port, h_port = template_port.shape[::-1]
###### Specifying (threshold)
threshold= 0.90
thresholdport= 0.70
###### Directory with images verify
img_dir = ''
data_path = os.path.join(img_dir,'*.jpg')
files = glob.glob(data_path)
data = []
###### Analyzing all the images in the folder
for f1 in sorted(files):
##### Read each image
img = cv2.imread(f1)
print("\n", f1) #picture name
##### Store image data
data.append(img)
##### uses a gray filter for easy recognition
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY, 0)
######## COMPARING THE IMAGE USING TEMPLATE METHOD ########
res_matching_green = cv2.matchTemplate(img_gray,template_green,cv2.TM_CCOEFF_NORMED)
res_matching_orange = cv2.matchTemplate(img_gray,template_orange,cv2.TM_CCOEFF_NORMED)
res_matching_dark_orange = cv2.matchTemplate(img_gray,template_dark_orange,cv2.TM_CCOEFF_NORMED)
res_matching_port = cv2.matchTemplate(img_gray,template_port,cv2.TM_CCOEFF_NORMED)
##### If you use denoising image:
#f1Filtered = cv2.imread(f1)
#img = denoising_sharpening(f1Filtered)
###### Announces every time an image is reviewed
print("Image loaded, analyzing patterns ...")
###### Gets the position of matching
location_green = np.where(res_matching_green >= threshold)
location_orange = np.where(res_matching_orange >= threshold)
location_dark_orange = np.where(res_matching_dark_orange >= threshold)
location_port = np.where(res_matching_port >= thresholdport)
# Calling functions
X_Green_Filtered0 = color_filter(location_green, w_green, h_green, img, 'GREEN')
X_YellowOrange_Filtered0 = color_filter(location_orange, w_orange, h_orange, img, 'ORANGE')
X_OrangeOrange_Filtered0 = color_filter(location_dark_orange, w_dark_orange, h_dark_orange, img, 'DARK ORANGE')
loc_full_port0 = port_filter(location_port, w_port, h_port, img, 'PORT')
# Checking empty
if X_Green_Filtered0:
X_Green_Filtered = list(OrderedDict.fromkeys(X_Green_Filtered0))
else:
X_Green_Filtered = []
if X_YellowOrange_Filtered0:
X_YellowOrange_Filtered = list(OrderedDict.fromkeys(X_YellowOrange_Filtered0))
else:
X_YellowOrange_Filtered = []
if X_OrangeOrange_Filtered0:
X_OrangeOrange_Filtered = list(OrderedDict.fromkeys(X_OrangeOrange_Filtered0))
else:
X_OrangeOrange_Filtered = []
if loc_full_port0:
loc_full_port = list(OrderedDict.fromkeys(loc_full_port0))
else:
loc_full_port = []
leds_on_fnd = sorted(list(OrderedDict.fromkeys(X_YellowOrange_Filtered + X_Green_Filtered + X_OrangeOrange_Filtered)))
print("Positions of the leds ON found", leds_on_fnd)
number_label = [1, 2, 3, 4, 5, 6, 7, 8, 9 ,10, 11 ,12]
loc_led_templ = sorted(zip(loc_full_port,number_label))
diff = [] # For the result of the sub created for filter positions
for i in sorted(loc_led_templ):
for j in sorted(leds_on_fnd):
z = j + 20
x = i[0] - z
if x in range(-110,60):
if j in X_Green_Filtered:
print("Port", i[1], "status: Green")
elif j in X_YellowOrange_Filtered:
print("Port", i[1], "status: Orange")
elif j in X_OrangeOrange_Filtered:
print("Port", i[1], "status: Dark Orange")
# #### Shows me the figure already analyzed
cv2.imshow("\nProcessed Image",img)
### Since there are several, wait until you press a key and thus analyze the other image
cv2.waitKey(0)
### Once finished remove all windows
cv2.destroyAllWindows()
|
{"hexsha": "6fd075585f173e86afb5d595e0854b87b3152327", "size": 11369, "ext": "py", "lang": "Python", "max_stars_repo_path": "Images_Recognition/main.py", "max_stars_repo_name": "brown9804/EIE-Project_Recognition-System_ARUBA-Switch", "max_stars_repo_head_hexsha": "45382bed52b977280a4bd4562a14b6447cd3a39e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-23T21:22:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T21:25:06.000Z", "max_issues_repo_path": "Images_Recognition/main.py", "max_issues_repo_name": "brown9804/EIE-Project_Recognition-System_ARUBA-Switch", "max_issues_repo_head_hexsha": "45382bed52b977280a4bd4562a14b6447cd3a39e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Images_Recognition/main.py", "max_forks_repo_name": "brown9804/EIE-Project_Recognition-System_ARUBA-Switch", "max_forks_repo_head_hexsha": "45382bed52b977280a4bd4562a14b6447cd3a39e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8643533123, "max_line_length": 119, "alphanum_fraction": 0.6941683525, "include": true, "reason": "import numpy", "num_tokens": 3297}
|
[STATEMENT]
lemma card_nonzero:"\<lbrakk>finite A; card A \<noteq> 0\<rbrakk> \<Longrightarrow> A \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>finite A; card A \<noteq> 0\<rbrakk> \<Longrightarrow> A \<noteq> {}
[PROOF STEP]
by (rule contrapos_pp, simp+)
|
{"llama_tokens": 115, "file": "Group-Ring-Module_Algebra1", "length": 1}
|
from __future__ import print_function
import os
import json
import logging
import numpy as np
from tqdm import tqdm, trange
from datetime import datetime
from collections import defaultdict
import _pickle as cPickle
import torch as t
import torch
from torch.autograd import Variable
##########################
# Torch
##########################
def detach(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(detach(v) for v in h)
def get_variable(inputs, cuda=False, **kwargs):
if type(inputs) in [list, np.ndarray]:
inputs = t.Tensor(inputs)
if cuda:
out = Variable(inputs.cuda(), **kwargs)
else:
out = Variable(inputs, **kwargs)
return out
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
##########################
# ETC
##########################
class keydefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
def get_logger(name=__file__, level=logging.INFO):
logger = logging.getLogger(name)
if getattr(logger, '_init_done__', None):
logger.setLevel(level)
return logger
logger._init_done__ = True
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(0)
del logger.handlers[:]
logger.addHandler(handler)
return logger
logger = get_logger()
def load_pkl(path):
"""
Load a pickled file.
:param path: Path to the pickled file.
:return: The unpickled Python object.
"""
f = open(path, 'rb')
try:
rval = cPickle.load(f)
finally:
f.close()
return rval
def prepare_dirs(args):
if args.model_name:
args.model_name = "{}_{}".format(args.dataset, args.model_name)
if os.path.exists(os.path.join(args.log_dir,args.model_name)):
raise Exception(f"Model args.model_name already exits !! give a differnt name")
else:
if args.load_path:
args.model_dir = './'+args.log_dir + '/' + args.load_path.split('/')[-2]
else:
raise Exception("Atleast one of model name or load path should be specified")
if not hasattr(args, 'model_dir'):
args.model_dir = os.path.join(args.log_dir, args.model_name)
args.data_path = os.path.join(args.data_dir, args.dataset)
for path in [args.log_dir, args.data_dir, args.model_dir]:
if not os.path.exists(path):
makedirs(path)
def get_time():
return datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
def save_result(vid2pred, vid2GTs, save_fpath):
assert set(vid2pred.keys()) == set(vid2GTs.keys())
save_dpath = os.path.dirname(save_fpath)
if not os.path.exists(save_dpath):
os.makedirs(save_dpath)
vids = vid2pred.keys()
with open(save_fpath, 'w') as fout:
for vid in vids:
GTs = ' / '.join(vid2GTs[vid])
pred = vid2pred[vid]
# print(GTs)
# print(pred)
# print(vid)
line = ', '.join([str(vid), pred[0], GTs])
fout.write("{}\n".format(line))
def save_args(args):
param_path = os.path.join(args.model_dir, "params.json")
logger.info("[*] MODEL dir: %s" % args.model_dir)
logger.info("[*] PARAM path: %s" % param_path)
with open(param_path, 'w') as fp:
json.dump(args.__dict__, fp, indent=4, sort_keys=True)
def makedirs(path):
if not os.path.exists(path):
logger.info("[*] Make directories : {}".format(path))
os.makedirs(path)
def remove_file(path):
if os.path.exists(path):
logger.info("[*] Removed: {}".format(path))
os.remove(path)
def backup_file(path):
root, ext = os.path.splitext(path)
new_path = "{}.backup_{}{}".format(root, get_time(), ext)
os.rename(path, new_path)
logger.info("[*] {} has backup: {}".format(path, new_path))
def recnet_local_loss(rec_feats, feats, feats_mask):
Eds = torch.sqrt(torch.sum(((rec_feats - feats) * feats_mask.unsqueeze(-1)) ** 2, -1))
return torch.sum(Eds, -1) / torch.sum(feats_mask, -1)
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
|
{"hexsha": "3e464cd8f700a8e00f9088cf1018e990cf65e772", "size": 4479, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "tuyunbin/TTA_AVS", "max_stars_repo_head_hexsha": "71d7c3a8220550169e731268144ebae397f04163", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "tuyunbin/TTA_AVS", "max_issues_repo_head_hexsha": "71d7c3a8220550169e731268144ebae397f04163", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "tuyunbin/TTA_AVS", "max_forks_repo_head_hexsha": "71d7c3a8220550169e731268144ebae397f04163", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9819277108, "max_line_length": 91, "alphanum_fraction": 0.6150926546, "include": true, "reason": "import numpy", "num_tokens": 1079}
|
using Base64
using Sockets
function main()
str_size = 131072
tries = 8192
str = repeat("a", str_size)
str2 = base64encode(str)
str3 = String(base64decode(str2))
notify("Julia\t$(getpid())")
t = time()
s_encoded = 0
for i = 1:tries
s_encoded += length(base64encode(str))
end
t_encoded = time() - t
t = time()
s_decoded = 0
for i = 1:tries
s_decoded += length(base64decode(str2))
end
t_decoded = time() - t
notify("stop")
print("encode $(str[1:4])... to $(str2[1:4]): $s_encoded, $t_encoded\n")
print("decode $(str2[1:4])... to $(str3[1:4]): $s_decoded, $t_decoded\n")
end
function notify(msg)
try
socket = connect("localhost", 9001)
write(socket, msg)
close(socket)
catch
# standalone usage
end
end
if abspath(PROGRAM_FILE) == @__FILE__
for (src, dst) in [["hello", "aGVsbG8="], ["world", "d29ybGQ="]]
encoded = base64encode(src)
if encoded != dst
println(stderr, "$(encoded) != $(dst)")
exit(1)
end
decoded = String(base64decode(dst))
if decoded != src
println(stderr, "$(decoded) != $(src)")
exit(1)
end
end
main()
end
|
{"hexsha": "b3c9a614ddba57c483441870877f99068266657e", "size": 1273, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "base64/test.jl", "max_stars_repo_name": "clemenswasser/benchmarks", "max_stars_repo_head_hexsha": "d1d22c42c107ffb3ad0a7489ef1dd439c237559c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2317, "max_stars_repo_stars_event_min_datetime": "2015-01-01T19:49:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:51:14.000Z", "max_issues_repo_path": "base64/test.jl", "max_issues_repo_name": "clemenswasser/benchmarks", "max_issues_repo_head_hexsha": "d1d22c42c107ffb3ad0a7489ef1dd439c237559c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 230, "max_issues_repo_issues_event_min_datetime": "2015-02-01T12:22:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:27:51.000Z", "max_forks_repo_path": "base64/test.jl", "max_forks_repo_name": "clemenswasser/benchmarks", "max_forks_repo_head_hexsha": "d1d22c42c107ffb3ad0a7489ef1dd439c237559c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 322, "max_forks_repo_forks_event_min_datetime": "2015-02-01T00:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:25:25.000Z", "avg_line_length": 21.5762711864, "max_line_length": 77, "alphanum_fraction": 0.5428122545, "num_tokens": 385}
|
# Wang Yu, the University of Yamanashi, Japan
# Oct 2, 2020
import numpy as np
import os,sys
DIR=os.path.dirname(os.path.dirname(__file__))
sys.path.append(DIR)
from gmm import gmm
from collections import namedtuple
import copy
class NaiveDiscreteHMM:
'''
A naive HMM with discrete observation probability.
'''
def __init__(self,initProbs,transProbs,obserProbs):
assert isinstance(initProbs,np.ndarray) and len(initProbs.shape) == 1
assert isinstance(transProbs,np.ndarray) and len(transProbs.shape) == 2 and transProbs.shape[0] == transProbs.shape[1] == initProbs.shape[0]
assert isinstance(obserProbs,np.ndarray) and len(obserProbs.shape) == 2 and obserProbs.shape[0] == transProbs.shape[0]
self.I = initProbs
self.A = transProbs
self.B = obserProbs
self.states = obserProbs.shape[0]
self.classes = obserProbs.shape[1]
def forward(self,observation):
'''
Compute the forward probability of an observed sequence.
Time: O(T*(N**2)), Space: O(N).
Args:
observation: an 1-d array.
Return:
a float value, the forward probability of the observed sequence.
'''
curProbs = np.zeros([self.states]) # record probability of current frame.
lastProbs = np.zeros([self.states]) # record probability of last frame.
for t,o in enumerate(observation):
obs = int(o)
assert 0 <= o < self.classes
if t == 0:
for i in range(self.states):
curProbs[i] = self.I[i]*self.B[i,obs]
else:
for i in range(self.states):
sumProb = 0
for j in range(self.states):
sumProb += lastProbs[j]*self.A[j,i]
curProbs[i] = sumProb*self.B[i,obs]
lastProbs = curProbs.copy()
return float( np.sum(curProbs) )
def backward(self,observation):
'''
Compute the backward probability of an observed sequence.
Time: O(T*(N**2)), Space: O(N).
Args:
observation: an 1-d array.
Return:
a float value, the backward probability of the observed sequence.
'''
curProbs = np.zeros([self.states]) # record probability of current frame.
lastProbs = np.ones([self.states]) # record probability of last frame.
T = len(observation)
for t in range(T-1,-1,-1):
obs = int(observation[t])
assert 0 <= obs < self.classes
if t == 0:
for i in range(self.states):
curProbs[i] = self.I[i]*self.B[i,obs]*lastProbs[i]
else:
for i in range(self.states):
sumProb = 0
for j in range(self.states):
sumProb += self.A[i,j]*self.B[j,obs]*lastProbs[j]
curProbs[i] = sumProb
lastProbs = curProbs.copy()
return float( np.sum(curProbs) )
def viterbi_decode(self,observation):
'''
Compute the best path by viterbi search algorithm.
Time: O(T*(N**2)), Space: O(T*N).
Args:
observation: an 1-d array.
Return:
a tuple with two members:
1. an 1-d array, the best path.
2. a float value, the probability of best path.
'''
T = len(observation)
curProbs = np.zeros([self.states,]) # record probability of current frame.
lastProbs = np.zeros([self.states,]) # record probability of last frame.
pathMemory = np.zeros([T,self.states,],dtype="int32") # record history path.
for t,o in enumerate(observation):
assert 0 <= o < self.classes
obs = int(observation[t])
if t == 0:
for i in range(self.states):
curProbs[i] = self.I[i]*self.B[i,obs]
else:
for i in range(self.states):
sumProb = np.zeros([self.states,])
for j in range(self.states):
sumProb[j] = lastProbs[j]*self.A[j,i]
pathMemory[t,i] = np.argmax(sumProb)
curProbs[i] = sumProb[int(pathMemory[t,i])]*self.B[i,obs]
lastProbs = curProbs.copy()
bestPath = []
bestPath.append( int(np.argmax(curProbs)) )
for t in range(T-1,0,-1):
bestPath.append( int(pathMemory[t,bestPath[-1]]) )
return np.array( bestPath[::-1] ), np.around(np.max(curProbs[-1]),4)
class State:
'''
Create a state token to record:
1. Which HMM it belongs to.
2. Which state it is of this HMM.
3. Whether it is the termination state.
4. All arcs of this state. A transforming arc: (origin HMM, origin state) -> (target HMM, target state), with probability: weight.
'''
def __init__(self,hmmID,stateID,terminate=False):
self.hID = hmmID
self.sID = stateID
self.is_termination = terminate
self.arcs = []
self.arcIDCount = 0
@property
def ArcSpec(self):
return namedtuple("Arc",["aID","start","end","weight"])
@property
def NodeSpec(self):
return namedtuple("Node",["hID","sID"])
def add_arc(self,endHmm,endState,weight):
'''
Add a arc
'''
if endHmm != self.hID:
assert endState == 0, "If this arc goes to other HMM, it can only skip to state 0."
assert self.is_termination, "Only termination state can goes to other HMM."
for a in self.arcs:
if a.end.hID == endHmm:
raise Exception("Cannot add arc to skip to the same target HMM twice.")
else:
for a in self.arcs:
if a.end.sID == endState:
raise Exception("Cannot add arc to skip to the same target state twice.")
self.arcs.append( self.ArcSpec(self.arcIDCount, self.NodeSpec(self.hID,self.sID), self.NodeSpec(endHmm,endState), weight) )
self.arcIDCount += 1
def remove_arc(self,arcID):
for i,a in enumerate(self.arcs):
if a.aID == arcID:
self.arcs.pop(i)
def reset_arc_weight(self,arcID,weight):
for i,a in enumerate(self.arcs):
if a.aID == arcID:
self.arcs[i] = a._replace(weight=weight)
class ViterbiToken:
def __init__(self,hID,sID,weight):
self.history = [ self.NodeSpec(hID,sID), ]
self.p = weight
@property
def NodeSpec(self):
return namedtuple("Node",["hID","sID"])
def copy(self):
return copy.deepcopy(self)
def passing(self,arc):
self.history.append( arc.end )
self.p *= arc.weight
def get_path(self):
path = np.zeros([len(self.history),2])
for i,h in enumerate(self.history):
path[i][0],path[i][1] = h.hID,h.sID
return path, self.p
class HMM:
def __init__(self,nums,hmmID,initArcs=True):
assert isinstance(nums,int) and nums > 0
self.hID = hmmID
# Generate states
self.states = []
for n in range(nums):
self.states.append( State(self.hID,n) )
self.states[-1].is_termination = True
if initArcs:
# Add arcs
for i in range(nums):
self.states[i].add_arc(endHmm=self.hID,endState=i,weight=0.5)
if i != nums-1:
self.states[i].add_arc(endHmm=self.hID,endState=i+1,weight=0.5)
def __view(self):
contents = f"HMM ID: {self.hID}\n"
contents += f"States: {len(self.states)}\n"
contents += f"Termination State ID: {len(self.states)-1}\n"
contents += f"Arcs ( start HMM , start state -> end HMM, end state P: transform weight ):\n"
for s in self.states:
for a in s.arcs:
contents += f"{a.start.hID},{a.start.sID} -> {a.end.hID},{a.end.sID} P:{a.weight} \n"
return contents
def view(self):
print(self.__view())
def save(self,fileName):
assert isinstance(fileName,str)
if not fileName.strip().endswith(".hmm"):
fileName += ".hmm"
contents = self.__view()
with open(fileName,"w") as fw:
fw.write(contents + "End\n")
return fileName
def forward(self,obserProbs,initProb=1.0):
'''
Compute the forward probability of a observation.
Args:
obserProbs: a 2-d array, (T,states). In each frame, each was observed with this probability.
initProb: a float value within [0,1].
Return:
a float value.
'''
assert isinstance(obserProbs,np.ndarray) and len(obserProbs.shape)==2
assert obserProbs.shape[1] == len(self.states)
curProbs = {}
lastProbs = {}
T = obserProbs.shape[0]
for t in range(T):
if t == 0:
curProbs[0] = initProb*obserProbs[t,0]
else:
for sID,forwardWeight in lastProbs.items():
for arc in self.states[sID].arcs:
if arc.end.sID not in curProbs.keys():
curProbs[arc.end.sID] = forwardWeight*arc.weight
else:
curProbs[arc.end.sID] += forwardWeight*arc.weight
for sID in curProbs.keys():
curProbs[sID] *= obserProbs[t,sID]
lastProbs.clear()
lastProbs.update(curProbs)
curProbs.clear()
sumProb = 0
for finalStateID,finalWeight in lastProbs.items():
if self.states[finalStateID].is_termination:
sumProb += finalWeight
return sumProb
def cherry_pick(self, tokens, obserProb=1.0):
'''
1. Choose the best token arrived the same state.
2. Discard other tokens.
Args:
tokens: a list or tuple of viterbi tokens.
Return:
a ViterbiToken object.
'''
bestToken = tokens[0]
for i in range(1,len(tokens)):
if tokens[i].p > bestToken.p:
del bestToken
bestToken = tokens[i]
del tokens[i]
bestToken.p *= obserProb
return bestToken
def viterbi_decode(self,obserProbs,initProb=1.0):
'''
Search the best path with viter.
Args:
obserProbs: a 2-d array, (T,states). In each frame, each was observed with this probability.
initProb: a float value within [0,1].
Return:
a tuple with two values:
1. an 2-d array standing for the best alignment.
2. a float value, the probability.
'''
assert isinstance(obserProbs,np.ndarray) and len(obserProbs.shape)==2
assert obserProbs.shape[1] == len(self.states)
curProbs = {}
lastProbs = {}
T = obserProbs.shape[0]
for t in range(T):
if t == 0:
token = ViterbiToken( self.hID, 0, initProb*obserProbs[t,0] )
curProbs[0] = token
else:
for sID,token in lastProbs.items():
for arc in self.states[sID].arcs:
tempToken = token.copy()
tempToken.passing(arc)
if arc.end.sID not in curProbs.keys():
curProbs[arc.end.sID] = [ tempToken, ]
else:
curProbs[arc.end.sID].append(tempToken)
for sID in curProbs.keys():
curProbs[sID] = self.cherry_pick( curProbs[sID], obserProbs[t,sID] )
lastProbs.clear()
lastProbs.update(curProbs)
curProbs.clear()
finalTokens = []
for finalStateID,finalToken in lastProbs.items():
if self.states[finalStateID].is_termination:
finalTokens.append(finalToken)
bestToken = self.cherry_pick(finalTokens)
return bestToken.get_path()
def load_HMM(filePath):
'''
Load a HMM from a .hmm file.
Args:
filePath: a .hmm file path.
Return:
a HMM object.
'''
assert os.path.isfile(filePath), f"No such file: {filePath} ."
with open(filePath,"r",encoding="utf-8") as fr:
headerID = fr.readline().strip().split()
assert len(headerID) == 3 and headerID[0] == "HMM" and headerID[1] == "ID:", "Wrong info: HMM ID (in the first line)."
hmmID = int(headerID[2])
headerS = fr.readline().strip().split()
assert len(headerS) == 2 and headerS[0] == "States:", "Wrong info: states (in the second line)."
nums = int(headerS[1])
hmm = HMM(nums=nums,hmmID=hmmID,initArcs=False)
fr.readline()
fr.readline()
while True:
arc = fr.readline().strip()
if arc == "End":
break
elif len(arc) == 0:
raise Exception("Missed end flag in file.")
else:
arc = arc.split(",",maxsplit=1)
startHmm = int(arc[0])
arc = arc[1].split("->",maxsplit=1)
startState = int(arc[0])
arc = arc[1].split(",",maxsplit=1)
endHmm = int(arc[0])
arc = arc[1].split("P:",maxsplit=1)
endState = int(arc[0])
weight = float(arc[1])
hmm.states[startState].add_arc(endHmm,endState,weight)
return hmm
|
{"hexsha": "ac12b925d495cb712542dd106e01c5f88d83beb9", "size": 12224, "ext": "py", "lang": "Python", "max_stars_repo_path": "hmm/hmm.py", "max_stars_repo_name": "wangyu09/asr_memo", "max_stars_repo_head_hexsha": "e51b0232a4d3f79126d151edc61cdd02c8c68680", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-02T11:15:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-02T11:15:28.000Z", "max_issues_repo_path": "hmm/hmm.py", "max_issues_repo_name": "wangyu09/asr_memo", "max_issues_repo_head_hexsha": "e51b0232a4d3f79126d151edc61cdd02c8c68680", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hmm/hmm.py", "max_forks_repo_name": "wangyu09/asr_memo", "max_forks_repo_head_hexsha": "e51b0232a4d3f79126d151edc61cdd02c8c68680", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8451025057, "max_line_length": 144, "alphanum_fraction": 0.6055301047, "include": true, "reason": "import numpy", "num_tokens": 3410}
|
module Pseudospectra
#=
Eigenvalue and Pseudospectrum Analysis for Julia
The Pseudospectra.jl package is a translation of EigTool, but no endorsement
or promotion by the authors of EigTool is implied.
This package is released under a BSD license, as described in the LICENSE file.
Julia code and supplements
Copyright (c) 2017-2019, Ralph Smith
Portions derived from EigTool:
Copyright (c) 2002-2014, The Chancellor, Masters and Scholars
of the University of Oxford, and the EigTool Developers. All rights reserved.
EigTool is maintained on GitHub: https://github.com/eigtool
SPDX-License-Identifier: BSD-3-Clause
License-Filename: LICENSES/BSD-3-Clause_Eigtool
=#
using ProgressMeter
using LinearAlgebra, SparseArrays, Arpack, Printf
using Requires
export new_matrix, driver!, spectralportrait
export psa_compute, psa_radius, psa_abscissa
export numerical_range, numerical_abscissa
export modeplot, mtxexpsplot, mtxpowersplot, isheadless, iscomputed
export PSAStruct, ArpackOptions, Portrait, GUIState
# Plotting packages should probably extend these:
export zoomin!, zoomout!
# Not exported, but may be used by plotting packages:
# vec2ax, expandlevels, isvalidax
# oneeigcond, psmode_inv_lanczos, transient_bestlb, set_method!
# Associated plotting packages should provide these, specialized on their
# own GUIState types:
# redrawcontour, surfplot, arnoldiplotter!, ewsplotter, plotmode,
# replzdlg, addmark
const smallσ = 1e-150
"""
by default, sparse matrices of this size or smaller are converted to full
for pseudospectra computation.
"""
const nmax4autofull = 200
"""
by default, iterative methods are used for computing pseudospectra of dense
matrices of this size or larger.
"""
const nmin4autoiter = 1600
const myname = "PSA"
include("types.jl")
# Placeholders for plot-specific code implemented elsewhere
function redrawcontour end
function surfplot end
function arnoldiplotter! end
function _portrait end
"""
ewsplotter(gs::GUIState, ews::Vector, zoom)
plot eigenvalues
So we have something to look at while waiting for the compute engines.
"""
function ewsplotter end
function plotmode end
function replzdlg end
function addmark end
"""
mtxexpsplot(ps_data,dt=0.1,nmax=50; gs::GUIState = defaultgs(), gradual=false)
plot the evolution of `∥e^(tA)∥`.
This is useful for analyzing linear initial value problems `∂x/∂t = Ax`.
"""
function mtxexpsplot(ps_data::PSAStruct, dt=0.1, nmax=50;
gs::GUIState=defaultgs(), kws...)
mtxexpsplot(gs, ps_data, dt=dt, nmax=nmax; kws...)
end
"""
mtxpowersplot(ps_data, nmax=50; gs::GUIState = defaultgs(), gradual=false)
plot norms of powers of a matrix `∥A^k∥`
This is useful for analyzing iterative linear algebra methods.
"""
function mtxpowersplot(ps_data::PSAStruct, nmax=50;
gs::GUIState = defaultgs(), kws...)
mtxpowersplot(gs, ps_data, nmax=nmax; kws...)
end
function fillopts end
function isheadless end
include("utils.jl")
include("compute.jl")
include("xeigs.jl")
include("modes.jl")
include("abscissa.jl")
include("radius.jl")
include("numrange.jl")
include("transients.jl")
include("plotter.jl")
include("zooming.jl")
include("plots/PSAPlots.jl")
"""
new_matrix(A::AbstractMatrix, opts::Dict{Symbol,Any}=()) -> ps_data
process a matrix into the auxiliary data structure used by Pseudospectra.
# Options
- `:direct::Bool`: force use of a direct algorithm?
- `:keep_sparse::Bool`: use sparse matrix code even if `A` is not large?
- `:real_matrix::Bool`: treat `A` as unitarily equivalent to a real matrix?
- `:verbosity::Int`: obvious
- `:eigA`: eigenvalues of `A`, if already known
- `:proj_lev`: projection level (see `psa_compute`)
- `:npts`: edge length of grid for computing and plotting pseudospectra
- `:arpack_opts::ArpackOptions`: (see type description)
- `:levels::Vector{Real}`: contour levels
- `:ax::Vector{Real}(4)`: bounding box for computation `[xmin,xmax,ymin,ymax]`
- `:scale_equal::Bool`: force isotropic axes for spectral portraits?
"""
function new_matrix(A::AbstractMatrix,
opts::Dict{Symbol,Any}=Dict{Symbol,Any}())
m,n=size(A)
(m >= n) || throw(ArgumentError(
"Only square or tall rectangular matrices are supported."))
(issparse(A) && (m != n)) && throw(ArgumentError(
"Only square sparse matrices are supported."))
(any(isnan.(A)) || any(isinf.(A))) && throw(ArgumentError(
"Input matrix has infinite or invalid entries."))
# flag for the M x (M-1) Hessenberg form
# Presumably intended for the case where projection is done
# by a Krylov scheme outside this package.
AisHess = ((m == (n+1)) && all([x == 0 for x in tril(A,-2)]))
# User may specify that A is unitarily equivalent to a real matrix
# even if it is complex
Aisreal = get(opts,:real_matrix, !(eltype(A) <: Complex))
verbosity = get(opts,:verbosity,1)
convert2full = issparse(A) & (n <= nmax4autofull) &
!get(opts,:keep_sparse,false)
if haskey(opts,:direct)
direct = opts[:direct]
else
if issparse(A)
direct = convert2full
else
direct = (n <= nmin4autoiter)
if (verbosity > 0) && (n > nmin4autoiter)
# Might merit a warning here since it is most likely not
# expected so iteration options are probably inappropriate.
# For now, unspec. iteration options already => a warning.
println("defaulting to iterative for large dense mtx")
end
end
end
# TODO: check for correctness of
# proj_lev, levels, ax, arpack stuff, etc. from opts
# define as placeholder if not provided
eigA = get(opts,:eigA,Vector{complex(eltype(A))}())
input_unitary_mtx = get(opts,:unitary_mtx,I)
proj_lev = get(opts,:proj_lev,Inf)
npts = get(opts,:npts,setgridsize(n,24,80,!Aisreal))
Aissquare = (m == n)
local Tschur, U
haveschur = false
if Aissquare && !issparse(A) && direct
# if small, delay is negligible
(verbosity > 1) && (m > 100) &&
println("Attempting initial decomposition...")
# If square, dense, & direct, we prefer a Schur factorization.
# Checking for schurfact! method should work,
# but that's just asking for surprises. This should be robust.
try
if eltype(A) <: Complex
F = schur(A)
else
# For some reason Julia devs think a real Schur decomp
# should shadow the true (not real!) thing
F = schur(A .+ zero(eltype(A))*im)
end
Tschur,U,eigA = F.T,F.Z,F.values
haveschur = true
catch JE
isa(JE,MethodError) || rethrow(JE)
end
if !haveschur && isempty(eigA)
try
eigA = eigvals(A)
catch JE
isa(JE,MethodError) || rethrow(JE)
# ME: maybe trap algorithmic errors too; what are they?
# Warning is needed here since it explains why we need axes
# for the driver.
@warn("Failed to compute eigenvalues; proceeding without.")
if verbosity > 0
# If we display(JE) we get the whole damn matrix too
println("Exception was method error: ",JE.f,
" for ",typeof(A))
end
end
end
(verbosity > 1) && (m > 100) && println("...done.")
end
if haveschur
ps_dict = Dict{Symbol,Any}(:Aisreal => Aisreal,
:isHessenberg => AisHess,
:schur_mtx => Tschur,
:schur_unitary_mtx => U,
:direct => true,
:projection_on => true,
:proj_lev => proj_lev,
:ews => eigA)
ps_data = PSAStruct(UpperTriangular(Tschur), input_unitary_mtx * U,
A, input_unitary_mtx, ps_dict)
elseif issparse(A) || AisHess || !direct || Aissquare
# sparse, Hessenberg, iterative, or needing special handling
# CHECKME: previously seemed to force
# direct |= Aissquare
#
ps_dict = Dict{Symbol,Any}(:Aisreal => Aisreal,
:isHessenberg => AisHess,
:projection_on => false,
:proj_lev => Inf,
:ews => eigA)
ps_data = PSAStruct(A, input_unitary_mtx, A, input_unitary_mtx,
ps_dict)
if !direct
if !haskey(opts,:arpack_opts)
@info("setting default ARPACK options")
ps_dict[:arpack_opts] = ArpackOptions{eltype(A)}()
else
isa(opts[:arpack_opts],ArpackOptions) || throw(
ArgumentError("type of opts[:arpack_options] must "
* "be ArpackOptions"))
ps_dict[:arpack_opts] = opts[:arpack_opts]
end
elseif issparse(A) && convert2full
(verbosity > 0) &&
println("converting to full for direct computation")
Atmp = full(ps_data.matrix)
try
F = schur(Atmp+complex(eltype(Atmp))(0))
ps_dict[:schur_mtx] = F.T
ps_dict[:schur_unitary_mtx] = F.Z
ps_data.matrix = UpperTriangular(F.T)
ps_data.unitary_mtx = ps_data.input_unitary_mtx * F.T
ps_dict[:projection_on] = true
ps_dict[:ews] = F.values
ps_dict[:orig_ews] = copy(F.values)
catch
ps_data.matrix = Atmp
end
end
else # dense, non-square (but not Hessenberg), and direct
rfS,rfT = rect_fact(A)
ps_dict = Dict{Symbol,Any}(:Aisreal => Aisreal,
:isHessenberg => false,
:projection_on => false,
:proj_lev => Inf,
:matrix2 => rfT,
:ews => eigA)
ps_data = PSAStruct(rfS,input_unitary_mtx,A,input_unitary_mtx,
ps_dict)
end
ps_dict[:orig_ews] = eigA
ps_dict[:ew_estimates] = false
if !ps_dict[:isHessenberg] && !isa(ps_data.unitary_mtx,UniformScaling)
if size(ps_data.unitary_mtx,2) ∉ [m,1]
ps_data.unitary_mtx = I
end
end
lev = get(opts,:levels,zeros(0))
zoom = Portrait(zeros(0),zeros(0),zeros(0,0),
npts, get(opts,:ax,zeros(0)),
LevelDesc(lev),
isempty(lev), proj_lev,
size(ps_data.matrix), false,
get(opts,:scale_equal,false))
push!(ps_data.zoom_list,zoom)
ps_data.zoom_pos = 1
# save for use when returning to initial plot
ps_dict[:init_opts] = deepcopy(zoom)
ps_dict[:init_direct] = direct
ps_dict[:direct] = direct
ps_dict[:verbosity] = verbosity
# DEVNOTE: if direct, upstream constructs axes and calls origplot/redraw
return ps_data
end
"""
new_matrix(A, opts::Dict{Symbol,Any}=()) -> ps_data
process a linear operator object into the auxiliary data structure used by
Pseudospectra.
There must be methods with `A` for `eltype`, `size`, and `mul!`.
It is up to the user to make sure that `mul!` is consistent with any
options passed to the iterative solver (see documentation for [`xeigs`](@ref)).
"""
function new_matrix(A, opts::Dict{Symbol,Any}=Dict{Symbol,Any}())
# CHECKME: can A be anything other than a LinearMap here?
m,n=size(A)
(m == n) || throw(ArgumentError(
"Only square linear operators are supported."))
Aisreal = get(opts,:real_matrix, !(eltype(A) <: Complex))
verbosity = get(opts,:verbosity,1)
direct = false
eigA = get(opts,:eigA,Vector{complex(eltype(A))}())
input_unitary_mtx = get(opts,:unitary_mtx,I)
npts = get(opts,:npts,setgridsize(n,24,80,!Aisreal))
proj_lev = get(opts,:proj_lev,Inf)
ps_dict = Dict{Symbol,Any}(:Aisreal => Aisreal,
:isHessenberg => false,
:projection_on => false,
:proj_lev => Inf,
:ews => eigA)
ps_data = PSAStruct(A, input_unitary_mtx, A, input_unitary_mtx,
ps_dict)
if !haskey(opts,:arpack_opts)
@info("setting default ARPACK options")
ps_dict[:arpack_opts] = ArpackOptions{eltype(A)}()
else
isa(opts[:arpack_opts],ArpackOptions) || throw(
ArgumentError("type of opts[:arpack_options] must "
* "be ArpackOptions"))
ps_dict[:arpack_opts] = opts[:arpack_opts]
end
lev = get(opts,:levels,zeros(0))
zoom = Portrait(zeros(0),zeros(0),zeros(0,0),
npts, get(opts,:ax,zeros(0)),
LevelDesc(lev),
isempty(lev), proj_lev,
size(ps_data.matrix), false,
get(opts,:scale_equal,false))
push!(ps_data.zoom_list,zoom)
ps_dict[:orig_ews] = eigA
ps_dict[:ew_estimates] = false
ps_data.zoom_pos = 1
# save for use when returning to initial plot
ps_dict[:init_opts] = deepcopy(zoom)
ps_dict[:init_direct] = direct
ps_dict[:direct] = direct
ps_dict[:verbosity] = verbosity
return ps_data
end
# for verifying that tests cover intended cases
const logging_algo = Ref{Bool}(false)
"""
driver!(ps_data, opts, gs=defaultgs(); revise_method=false)
Compute pseudospectra and plot a spectral portrait.
If using an iterative method to get some eigenvalues and a projection, invokes
that first.
# Arguments
- `ps_data::PSAStruct`: ingested matrix, as processed by `new_matrix`
- `gs::GUIState`: object handling graphical output
- `opts::Dict{Symbol,Any}`:
- `:ax`, axis limits (overrides value stored in `ps_data`).
- other options passed to `redrawcontour`, `arnoldiplotter!`
When revising a spectral portrait (`revise_method==true`), the following
entries in `opts` also apply:
- `:arpack_opts::ArpackOptions`,
- `:direct::Bool`.
"""
function driver!(ps_data::PSAStruct,
optsin::Dict{Symbol,Any}=Dict{Symbol,Any}(),
gs::GUIState=defaultgs();
myprintln=println, logger=:default, revise_method=false)
# DEVNOTE: mostly corresponds to switch_redraw.m in EigTool
opts = fillopts(gs,optsin)
ps_dict = ps_data.ps_dict
verbosity = get(ps_dict,:verbosity,1)
# For changing from direct to iterative, or vice versa,
if revise_method & haskey(opts,:direct)
set_method!(ps_data, opts[:direct])
end
if revise_method & haskey(opts,:arpack_opts) & !ps_dict[:direct]
ao = opts[:arpack_opts]
if !isa(ao,ArpackOptions)
@mywarn(logger,"invalid :arpack_opts option")
return nothing
end
if haskey(ps_dict,:arpack_opts)
pvalid = (ao == ps_dict[:arpack_opts])
else
pvalid = false
end
ps_dict[:proj_valid] = pvalid
ps_dict[:arpack_opts] = ao
end
# if caller specifies ax, use it or bust.
if haskey(opts,:ax)
if isvalidax(opts[:ax])
new_ax = opts[:ax]
else
@mywarn(logger,"opts[:ax] is not a valid bounding box")
return nothing
end
else
new_ax = zeros(0)
end
if ps_dict[:direct] || get(ps_dict,:proj_valid,false)
# for iterative methods, we get here on reentrance with the projection
n,m = size(ps_data.matrix)
A = ps_data.matrix
B = get(ps_dict,:matrix2,I)
eigA = ps_dict[:ews]
zoom = ps_data.zoom_list[ps_data.zoom_pos]
if isempty(new_ax)
if !isempty(eigA)
# This sets the default domain for the typical case
isempty(zoom.ax) && (zoom.ax = vec2ax(eigA))
if !isheadless(gs)
# show eigenvalues while waiting
ewsplotter(gs, eigA, zoom)
end
else
if isempty(zoom.ax)
@mywarn(logger,"bounding box must be specified")
return nothing
end
end
else
zoom.ax = new_ax
end
if haskey(opts, :npts)
new_npts = opts[:npts]
if isa(new_npts, Integer) && (new_npts > 2) && (new_npts < 2049)
zoom.npts = new_npts
else
@mywarn(logger,"opts[:npts] is not a valid number of points")
return nothing
end
end
psa_opts = Dict{Symbol,Any}(:levels=>expandlevels(zoom.levels),
:recompute_levels=>zoom.autolev,
:proj_lev=>zoom.proj_lev,
:scale_equal=>zoom.scale_equal,
:real_matrix=>ps_dict[:Aisreal],
:verbosity=>verbosity)
ss = size(A)
Z,x,y,t_levels,err,Tproj,eigAproj,algo = psa_compute(A,zoom.npts,
zoom.ax,
eigA,psa_opts,
B,
myprintln=myprintln,
logger=logger)
# FIXME: handle projection properly
ps_dict[:proj_ews] = eigAproj
if err != 0
@mywarn(logger,"PSA computation failed")
# FIXME: reset GUI if any
return nothing
end
(logging_algo[] | (verbosity > 1)) && println("algorithm: ",algo)
zoom = ps_data.zoom_list[ps_data.zoom_pos]
if zoom.autolev
(verbosity > 1) && println("levels: $t_levels")
zoom.levels = LevelDesc(t_levels)
end
zoom.x = x
zoom.y = y
zoom.Z = Z
zoom.computed = true
zoom.dims = size(ps_data.matrix)
redrawcontour(gs, ps_data, opts)
else
# Iterative method (uses ARPACK):
# This performs implicitly restart Arnoldi steps to
# project on a Krylov subspace, yielding a Hessenberg matrix
# with approximately the same spectral properties (locally) as A.
ps_data.matrix = ps_data.input_matrix
m,n = size(ps_data.matrix)
ao = ps_dict[:arpack_opts]
function xeigsproducer(chnl)
local ews,H,V
local nconv,niter,nmult
try
ews,v,nconv,niter,nmult,resid,H,V = xeigs(ps_data.matrix,I,chnl,
nev=ao.nev,ncv=ao.ncv,
which=ao.which,tol=ao.tol,
maxiter=ao.maxiter,
v0=ao.v0,
sigma=ao.sigma,
options=opts)
catch JE
# FIXME: post a dialog, reset GUI if any
@warn("xeigs throws:")
display(JE)
println()
stuff = (:failure,nothing)
put!(chnl,stuff)
return nothing
end
end
local ews,H,V
local nconv,niter,nmult
old_ax = zeros(0)
(verbosity > 1) && println("calling xeigs w/ $ao")
chnl = Channel(xeigsproducer)
xeigs_result = take!(chnl)
ap_state = nothing
while xeigs_result[1] ∉ [:finale,:failure]
the_key,dispvec,the_str,the_ews,the_shifts = xeigs_result
if !isheadless(gs)
ap_state = arnoldiplotter!(gs,old_ax,opts,dispvec,
the_str,the_ews, the_shifts,
ap_state)
end # if gs
xeigs_result = take!(chnl)
end
if xeigs_result[1] == :failure
@mywarn(logger,"xeigs failed")
return nothing
end
the_key,ews,v,nconv,niter,nmult,resid,H,V = xeigs_result
if verbosity > 0
println("xeigs: $nconv of $(ao.nev) converged in $niter iters "
* "($nmult MxV)")
end
if verbosity > 1
println("xeigs ews:")
display(ews); println()
end
ews = filter(x->!isnan(x), ews)
# We basically replace A with H, saving some projection information,
# and proceed with the dense matrix algorithms.
ps_dict[:ew_estimates] = true
ps_dict[:proj_matrix] = H
ps_data.matrix = H
ps_dict[:isHessenberg] = true
ps_dict[:proj_unitary_mtx] = ps_data.input_unitary_mtx * V
ps_data.unitary_mtx = ps_dict[:proj_unitary_mtx]
ps_dict[:proj_valid] = true
ps_dict[:ews] = ews
# reset zoom list
ps_data.zoom_pos = 1
resize!(ps_data.zoom_list,1)
# CHECKME: do we need remove() here?
ps_dict[:mode_markers] = []
zoom = ps_data.zoom_list[1]
if isempty(new_ax)
origax = ps_dict[:init_opts].ax # init_opts is a Portrait!
if !isempty(origax) && isvalidax(origax) # && !ps_dict[:init_direct]
copy!(zoom.ax,origax)
else
# CHECKME: maybe use init_ews if available?
zoom.ax = vec2ax(ews)
# elseif gs.mainph != nothing
# println("using eigvals for axis limits")
# copy!(zoom.ax,getxylims(gs.mainph))
end
else
zoom.ax = new_ax
end
zoom.autolev = ps_dict[:init_opts].autolev
zoom.levels = deepcopy(ps_dict[:init_opts].levels)
delete!(ps_dict,:proj_axes)
delete!(ps_dict,:comp_proj_lev)
origplot!(ps_data,opts,gs) # WARNING: reenters driver!()
# caller must reset GUI if appropriate
end
nothing
end
function iscomputed(ps_data::PSAStruct, idx=ps_data.zoom_pos)
ps_data.zoom_list[idx].computed
end
iscomputed(zoom::Portrait) = zoom.computed
"""
Make sure zoom list is ok, then redraw (unless `ax_only`).
Note: truncates zoom list, so use for a new problem or for a reset.
"""
function origplot!(ps_data::PSAStruct, opts, gs; ax_only = false)
ps_data.zoom_pos = 1
ps_dict = ps_data.ps_dict
resize!(ps_data.zoom_list,1)
zoom = ps_data.zoom_list[1]
if isempty(zoom.ax)
if isempty(get(ps_dict,:ews,[]))
@warn("origplot called w/o preset axes or eigenvalues")
else
zoom.ax = vec2ax(ps_dict[:ews])
end
end
ax_only || driver!(ps_data,opts,gs)
nothing
end
"""
possibly change from direct to iterative method or vice versa
"""
function set_method!(ps_data::PSAStruct, todirect::Bool)
# this is the part of switch_method which pertains to ps_data
ps_dict = ps_data.ps_dict
(todirect == ps_dict[:direct]) && return
m,n = size(ps_data.input_matrix)
if todirect
if haskey(ps_dict,:schur_matrix)
ps_data.matrix = ps_dict[:schur_matrix]
ps_data.ews = ps_dict[:orig_ews]
ps_dict[:ew_estimates] = false
elseif m==n && issparse(ps_data.input_matrix)
ps_data.matrix = ps_data.input_matrix
end
if haskey(ps_dict,:schur_unitary_mtx)
ps_data.unitary_mtx = ps_data.input_unitary_mtx *
ps_data.schur_unitary_mtx
else
ps_data.unitary_mtx = ps_data.input_unitary_mtx
end
ps_dict[:proj_valid] = false
# if reverting to a square matrix, no longer have ARPACK projection
ss = size(ps_data.matrix)
(ss[1]==ss[2]) && (ps_dict[:isHessenberg] = false)
else # switch to iterative
(m == n) || throw(ArgumentError("Iterative method not implemented "
* "for rectangular matrices"))
# apparently that's all we need for now
end
ps_dict[:direct] = todirect
end
"""
spectralportrait(A::AbstractMatrix; npts=100) => Plots object
compute pseudospectra of matrix `A` and display as a spectral portrait.
Pseudospectra are computed on a grid of `npts` by `npts` points in
the complex plane, including a neighborhood of the spectrum.
Contour levels are `log10(ϵ)` where `ϵ` is the inverse resolvent norm.
This is a convenience wrapper for simple cases; see the Pseudospectra
package documentation for more elaborate interfaces.
"""
function spectralportrait(A0 :: AbstractMatrix; npts=100)
if _currentplotter[] == :undef
setpsplotter()
end
local ps_data
try
ps_data = new_matrix(A0)
catch JE
@warn "The spectralportrait function only works for simple cases."
rethrow(JE)
end
n,m = size(ps_data.matrix)
A = ps_data.matrix
ps_dict = ps_data.ps_dict
B = get(ps_dict,:matrix2,I)
eigA = ps_dict[:ews]
zoom = ps_data.zoom_list[ps_data.zoom_pos]
isempty(zoom.ax) && (zoom.ax = vec2ax(eigA))
psa_opts = _basic_psa_opts(zoom,ps_dict)
ss = size(A)
Z,xs,ys,t_levels,err,Tproj,eigAproj,algo = psa_compute(A,npts,
zoom.ax,
eigA,psa_opts,
B)
return _portrait(xs,ys,Z,eigA)
end
_basic_psa_opts(zoom,ps_dict) = Dict{Symbol,Any}(
:levels=>expandlevels(zoom.levels),
:recompute_levels=>zoom.autolev,
:proj_lev=>zoom.proj_lev,
:scale_equal=>zoom.scale_equal,
:real_matrix=>ps_dict[:Aisreal],
:verbosity=>0)
################################################################
# FIXME: until we think of a better way to handle this:
include("../examples/demo_mtx.jl")
function __init__()
@require PyPlot="d330b81b-6aea-500a-939a-2ce795aea3ee" link_pyplot()
@require Plots="91a5bcdd-55d7-5caf-9e0b-520d859cae80" link_plots()
# GLMakie="e9467ef8-e4e7-5192-8a1a-b1aee30e663a"
@require AbstractPlotting = "537997a7-5e4e-5d89-9595-2241ea00577e" link_makie()
end
end # module
|
{"hexsha": "6dedf189caaceec0f3467b2840ba3c8cead94a27", "size": 26684, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Pseudospectra.jl", "max_stars_repo_name": "csimal/Pseudospectra.jl", "max_stars_repo_head_hexsha": "1cbbdd17fb84c1bdf604d5d122778df2664ca8ca", "max_stars_repo_licenses": ["BSD-3-Clause", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Pseudospectra.jl", "max_issues_repo_name": "csimal/Pseudospectra.jl", "max_issues_repo_head_hexsha": "1cbbdd17fb84c1bdf604d5d122778df2664ca8ca", "max_issues_repo_licenses": ["BSD-3-Clause", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Pseudospectra.jl", "max_forks_repo_name": "csimal/Pseudospectra.jl", "max_forks_repo_head_hexsha": "1cbbdd17fb84c1bdf604d5d122778df2664ca8ca", "max_forks_repo_licenses": ["BSD-3-Clause", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8174496644, "max_line_length": 83, "alphanum_fraction": 0.5780242842, "num_tokens": 6688}
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns; sns.set()
import pandas as pd
from sklearn.cluster import KMeans
store_data = pd.read_csv('D:\\Datasets\\NIPS_1987-2015.csv')
x = store_data.iloc[:, [1,5811]].values
print(x)
kmeans = KMeans(n_clusters=9)
y_kmeans = kmeans.fit_predict(x)
print(y_kmeans)
print(kmeans.cluster_centers_)
plt.scatter(x[:,0],x[:,1],c=kmeans.labels_,cmap='rainbow')
plt.scatter(kmeans.cluster_centers_[:,0] ,kmeans.cluster_centers_[:,1],color='black')
[[0 0]
[0 0]
[0 0]
...
[0 0]
[0 0]
[0 0]]
[0 0 0 ... 0 0 0]
[[-6.02295991e-15 -3.57769370e-14]
[ 1.03125000e+00 1.36250000e+01]
[ 3.86138614e-01 5.38613861e+00]
[ 7.50000000e-01 2.91250000e+01]
[ 1.96666667e+01 0.00000000e+00]
[ 7.15789474e-02 1.47578947e+00]
[ 4.10204082e+00 9.18367347e-01]
[ 9.91666667e+00 3.83333333e+00]
[ 1.16996047e+00 1.77865613e-01]]
<matplotlib.collections.PathCollection at 0x2561e93c848>
*Graph at the main file project
|
{"hexsha": "8ca6ddd0305faf8f19a139800bff269475d67e80", "size": 1086, "ext": "py", "lang": "Python", "max_stars_repo_path": "2.1.2_clustering_between_to_first_article_and_the_last_article_(all_rows)[1].py", "max_stars_repo_name": "dianagut1987/BigData-Unsupervised-for-articles", "max_stars_repo_head_hexsha": "f3413f02b30a57770d62c1a7f692212219c440cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2.1.2_clustering_between_to_first_article_and_the_last_article_(all_rows)[1].py", "max_issues_repo_name": "dianagut1987/BigData-Unsupervised-for-articles", "max_issues_repo_head_hexsha": "f3413f02b30a57770d62c1a7f692212219c440cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2.1.2_clustering_between_to_first_article_and_the_last_article_(all_rows)[1].py", "max_forks_repo_name": "dianagut1987/BigData-Unsupervised-for-articles", "max_forks_repo_head_hexsha": "f3413f02b30a57770d62c1a7f692212219c440cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1632653061, "max_line_length": 85, "alphanum_fraction": 0.7007366483, "include": true, "reason": "import numpy", "num_tokens": 434}
|
SUBROUTINE MA_CGDT ( iret )
C************************************************************************
C* MA_CGDT *
C* *
C* This subroutine sets the report date/time using system and bulletin *
C* header as input. Parameters not in the calling sequence are found *
C* in common. *
C* *
C* MA_CGDT ( IRET ) *
C* *
C* Input parameters: *
C* RCTIM(*) REAL System year, month, and day *
C* *
C* Output parameters: *
C* IRPTDT(*) INTEGER Report date and time *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* *
C** *
C* Log: *
C* C. Caruso Magee/NCEP 4/01 Modifying for Coast Guard data. *
C* F. J. Yen/NCEP 4/01 Reformatted and renamed from CG_DATM. *
C* Removed include 'GEMPRM.PRM' statement. *
C* Added parameters in common to prologue.*
C************************************************************************
INCLUDE 'macmn.cmn'
C-----------------------------------------------------------------------
iret = 0
C
C* Use system year/month and bulletin day/hour/minute.
C
irptdt(1) = nint ( rctim(2) )
irptdt(2) = nint ( rctim(3) )
C
C* Store bulletin day as report day.
C
CALL ST_INTG ( btime(1:2), ist1, ier )
irptdt(3) = ist1
IF ( irptdt(3) .gt. 31 ) THEN
WRITE ( UNIT = logmsg, FMT = '( I4 )' ) irptdt(3)
CALL DC_WLOG ( 2, 'MA', 7, logmsg, ierwlg )
iret = -1
RETURN
END IF
C
C* Check to see if system day is first day of month. If so,
C* then if bulletin day is from previous month (i.e. bulletin
C* day is greater than 1) then set month back to previous month.
C
IF ( rctim(4) .eq. 1. .and. irptdt(3) .gt. 1 ) THEN
IF ( irptdt(2) .gt. 1) THEN
irptdt(2) = irptdt(2) - 1
ELSE
irptdt(1) = irptdt(1) - 1
irptdt(2) = 12
END IF
END IF
C
C* Store bulletin hour as report hour.
C
CALL ST_INTG ( btime(3:4), ist2, ier )
irptdt(4) = ist2
IF ( irptdt(4) .lt. 0 .or. irptdt(4) .gt. 23 ) THEN
WRITE ( UNIT = logmsg, FMT = '( I4 )' ) irptdt(4)
CALL DC_WLOG ( 2, 'MA', 8, logmsg, ierwlg )
iret = -1
RETURN
END IF
C
C* Store bulletin minute as report minute.
C
CALL ST_INTG ( btime(5:6), ist3, ier )
irptdt(5) = ist3
IF ( irptdt(5) .lt. 0 .or. irptdt(4) .gt. 59 ) THEN
WRITE ( UNIT = logmsg, FMT = '( I4 )' ) irptdt(5)
CALL DC_WLOG ( 2, 'MA', 9, logmsg, ierwlg )
iret = -1
RETURN
END IF
C*
RETURN
END
|
{"hexsha": "1090fd69c0d9e97f0d487a6b0b0a3c0f84b79b8a", "size": 3051, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/bridge/ma/macgdt.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/bridge/ma/macgdt.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/bridge/ma/macgdt.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 36.7590361446, "max_line_length": 73, "alphanum_fraction": 0.4270730908, "num_tokens": 910}
|
import argparse
import torch
import numpy as np
import time as tm
from torch.autograd import Variable
# Compute error
def compute_error(A, sA):
normA = torch.sqrt(torch.sum(torch.sum(A * A, dim=1),dim=1))
error = A - torch.bmm(sA, sA)
error = torch.sqrt((error * error).sum(dim=1).sum(dim=1)) / normA
return torch.mean(error)
# Forward + Backward via SVD decomposition
def sqrt_svd_lyap(A, dldz, dtype):
batchSize = A.data.shape[0]
dim = A.data.shape[1]
dlda = torch.zeros(batchSize, dim, dim).type(dtype)
sA = torch.zeros(batchSize, dim, dim).type(dtype)
for i in range(batchSize):
U, S, V = (A[i,:,:].data).svd()
sA[i,:,:] = (U.mm(S.diag().sqrt())).mm(V.t())
S = S.diag().sqrt().mm(torch.ones(dim, dim).type(dtype))
IU = U.t()
X = -U.mm(
((IU.mm(dldz[i,:,:].data)).mm(IU.t()))
/(S+S.t())
).mm(U.t())
dlda[i,:,:] = X
return sA, dlda, compute_error(A, Variable(sA, requires_grad=False))
# Forward via Denman-Beavers iterations
def sqrt_denman_beavers(A, numIters, dtype):
batchSize = A.data.shape[0]
dim = A.data.shape[1]
sA = torch.zeros(batchSize, dim, dim).type(dtype)
for n in range(batchSize):
Y = (A[n,:,:]).data
Z = torch.eye(dim, dim).type(dtype)
for i in range(numIters):
Y_ = 0.5*(Y + Z.inverse())
Z = 0.5*(Z + Y.inverse())
Y = Y_
sA[n,:,:] = Y
sA = Variable(sA, requires_grad=False)
error = compute_error(A,sA)
return sA, error
# Forward via Newton-Schulz iterations
# Backward via autograd
def sqrt_newton_schulz_autograd(A, numIters, dtype):
batchSize = A.data.shape[0]
dim = A.data.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A))
I = Variable(torch.eye(dim,dim).view(1, dim, dim).
repeat(batchSize,1,1).type(dtype),requires_grad=False)
Z = Variable(torch.eye(dim,dim).view(1, dim, dim).
repeat(batchSize,1,1).type(dtype),requires_grad=False)
for i in range(numIters):
T = 0.5*(3.0*I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y * torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
error = compute_error(A, sA)
return sA, error
# Forward via Newton-Schulz iterations (non autograd version)
# Seems to be slighlty faster and has much lower memory overhead
def sqrt_newton_schulz(A, numIters, dtype):
batchSize = A.shape[0]
dim = A.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A));
I = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
Z = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
for i in range(numIters):
T = 0.5*(3.0*I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
error = compute_error(A, sA)
return sA, error
# Backward via iterative Lyapunov solver
def lyap_newton_schulz(z, dldz, numIters, dtype):
batchSize = z.shape[0]
dim = z.shape[1]
normz = z.mul(z).sum(dim=1).sum(dim=1).sqrt()
a = z.div(normz.view(batchSize, 1, 1).expand_as(z))
I = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
q = dldz.div(normz.view(batchSize, 1, 1).expand_as(z))
for i in range(numIters):
q = 0.5*(q.bmm(3.0*I - a.bmm(a)) - a.transpose(1, 2).bmm(a.transpose(1,2).bmm(q) - q.bmm(a)) )
a = 0.5*a.bmm(3.0*I - a.bmm(a))
dlda = 0.5*q
return dlda
# Create random PSD matrix
def create_symm_matrix(batchSize, dim, numPts, tau, dtype):
A = torch.zeros(batchSize, dim, dim).type(dtype)
for i in range(batchSize):
pts = np.random.randn(numPts, dim).astype(np.float32)
sA = np.dot(pts.T, pts)/numPts + tau*np.eye(dim).astype(np.float32)
A[i,:,:] = torch.from_numpy(sA)
print(f'Creating batch {batchSize}, dim {dim}, pts {numPts}, tau {tau}, dtype {dtype}')
return A
|
{"hexsha": "1c10ca63b4f71d160e89276b9e430af5e3b81430", "size": 4120, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/matrix_sqrt.py", "max_stars_repo_name": "milesgray/CALAE", "max_stars_repo_head_hexsha": "a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/matrix_sqrt.py", "max_issues_repo_name": "milesgray/CALAE", "max_issues_repo_head_hexsha": "a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/matrix_sqrt.py", "max_forks_repo_name": "milesgray/CALAE", "max_forks_repo_head_hexsha": "a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7981651376, "max_line_length": 102, "alphanum_fraction": 0.5951456311, "include": true, "reason": "import numpy", "num_tokens": 1327}
|
# Import general libraries
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import pprint
import os
# Import dash
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
# Import newsapi
from newsapi import NewsApiClient
try:
from keys import newsapikey # retrieve from local system
newsapi = NewsApiClient(api_key=newsapikey)
except:
newsapikey = os.environ["newapi_key"] # retrieve from Heroku
newsapi = NewsApiClient(api_key=newsapikey)
n_days_ago = 30
date_n_days_ago = datetime.now() - timedelta(days=n_days_ago)
date_now = datetime.now()
def news_update():
all_articles = newsapi.get_everything(
q="dengue singapore",
from_param=date_n_days_ago,
to=date_now,
language="en",
sort_by="publishedAt",
page_size=100,
)
all_articles_title = [
str(all_articles["articles"][i]["title"])
for i in range(len(all_articles["articles"]))
]
all_articles_url = [
all_articles["articles"][i]["url"] for i in range(len(all_articles["articles"]))
]
all_articles_description = [
str(all_articles["articles"][i]["description"])
for i in range(len(all_articles["articles"]))
]
all_articles_date = [
str(datetime.date(pd.to_datetime(all_articles["articles"][i]["publishedAt"])))
for i in range(len(all_articles["articles"]))
]
all_articles_img = [
str(all_articles["articles"][i]["urlToImage"])
for i in range(len(all_articles["articles"]))
]
news_all = []
temp_news = []
col = ["warning", "success", "info", "success"]
col_idx = 0
for i in range(len(all_articles["articles"])):
temp_news.append(
dbc.Col(
dbc.Card(
[
dbc.CardImg(
src=all_articles_img[i],
top=True,
style={
"max-width": "80%",
"max-height": 200,
"margin": "auto",
"display": "block",
"padding-top": "10px",
},
),
dbc.CardBody(
[
html.H4(all_articles_title[i], className="card-title"),
html.P(
all_articles_description[i],
className="card-text",
# style={"fontSize": 16}
),
]
),
dbc.CardFooter(
[
all_articles_date[i],
dbc.Button(
"Source",
color="primary",
href=all_articles_url[i],
style={"float": "right"},
),
]
),
],
color=col[col_idx],
style={"height": 525},
)
)
)
col_idx += 1
if col_idx > 3:
col_idx = 0
if (i + 1) % 3 == 0:
news_all.append(
dbc.Row(temp_news, className="mb-4", style={"padding": "1em"})
)
temp_news = []
return news_all
newsTab = html.Div(news_update())
|
{"hexsha": "ba4ea0b4f774332ab70abafae0eb4a0c3e719339", "size": 3815, "ext": "py", "lang": "Python", "max_stars_repo_path": "news.py", "max_stars_repo_name": "bensjx/covid-dashboard", "max_stars_repo_head_hexsha": "c4204d984719137c3cbdd224b50ced385b4f5f49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "news.py", "max_issues_repo_name": "bensjx/covid-dashboard", "max_issues_repo_head_hexsha": "c4204d984719137c3cbdd224b50ced385b4f5f49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:15:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:45:21.000Z", "max_forks_repo_path": "news.py", "max_forks_repo_name": "bensjx/covid-dashboard", "max_forks_repo_head_hexsha": "c4204d984719137c3cbdd224b50ced385b4f5f49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-02T23:55:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-02T23:55:17.000Z", "avg_line_length": 30.7661290323, "max_line_length": 88, "alphanum_fraction": 0.4560943644, "include": true, "reason": "import numpy", "num_tokens": 706}
|
import matplotlib.pyplot as plt
import numpy as np
import wave
import scipy.io.wavfile as wav
from audiolazy.lazy_lpc import levinson_durbin
from pip._vendor.distlib.compat import raw_input
from scipy import signal
import scipy as sk
from audiolazy import *
import audiolazy.lazy_lpc
from audiolazy import lpc
from sklearn import preprocessing
import scipy.signal as sig
import scipy.linalg as linalg
import joblib# for saving the GMMs model
from sklearn.mixture import GaussianMixture #scikit-learn
def readWavFile(wav):
# given a path from the keyboard to read a .wav file
# wav = raw_input('Give me the path of the .wav file you want to read: ')
inputWav = 'data' + wav
return inputWav
# reading the .wav file (signal file) and extract the information we need
def initialize(inputWav):
rate, signal = wav.read(readWavFile(inputWav)) # returns a wave_read object , rate: sampling frequency
sig = wave.open(readWavFile(inputWav))
# signal is the numpy 2D array with the date of the .wav file
# len(signal) number of samples
sampwidth = sig.getsampwidth()
print('The sample rate of the audio is: ', rate)
print('Sampwidth: ', sampwidth)
return signal, rate
# implementation of the low-pass filter
def lowPassFilter(signal, coeff=0.97):
return np.append(signal[0],
signal[1:] - coeff * signal[:-1]) # y[n] = x[n] - a*x[n-1] , a = 0.97 , a>0 for low-pass filters
def preEmphasis(wav):
# taking the signal
signal, rate = initialize(wav)
# Pre-emphasis Stage
preEmphasis = 0.97
emphasizedSignal = lowPassFilter(signal)
Time = np.linspace(0, len(signal) / rate, num=len(signal))
EmphasizedTime = np.linspace(0, len(emphasizedSignal) / rate, num=len(emphasizedSignal))
# plots using matplotlib
'''plt.figure(figsize=(9, 7))
plt.subplot(211, facecolor='darkslategray')
plt.title('Signal wave')
plt.ylim(-50000, 50000)
plt.ylabel('Amplitude', fontsize=16)
plt.plot(Time,signal,'C1')
plt.subplot(212, facecolor='darkslategray')
plt.title('Pre-emphasis')
plt.ylim(-50000, 50000)
plt.xlabel('time(s)', fontsize=10)
plt.ylabel('Amplitude', fontsize=16)
plt.plot(EmphasizedTime,emphasizedSignal,'C1')
plt.show()'''
return emphasizedSignal, signal, rate
def visualize(rate, signal):
# taking the signal's time
Time = np.linspace(0, len(signal) / rate, num=len(signal))
# plots using matplotlib
plt.figure(figsize=(10, 6))
plt.subplot(facecolor='darkslategray')
plt.title('Signal wave')
plt.ylim(-40000, 40000)
plt.ylabel('Amplitude', fontsize=16)
plt.xlabel('Time(s)', fontsize=8)
plt.plot(Time, signal, 'C1')
plt.draw()
# plt.show()
def framing(fs, signal):
# split the signal into frames
windowSize = 0.025 # 25ms
windowStep = 0.01 # 10ms
overlap = int(fs * windowStep)
frameSize = int(fs * windowSize) # int() because the numpy array can take integer as an argument in the initiation
numberOfframes = int(np.ceil(float(np.abs(len(signal) - frameSize)) / overlap))
print('Overlap is: ', overlap)
print('Frame size is: ', frameSize)
print('Number of frames: ', numberOfframes)
frames = np.ndarray(
(numberOfframes, frameSize)) # initiate a 2D array with numberOfframes rows and frame size columns
# assing samples into the frames (framing)
for k in range(0, numberOfframes):
for i in range(0, frameSize):
if ((k * overlap + i) < len(signal)):
frames[k][i] = signal[k * overlap + i]
else:
frames[k][i] = 0
return frames, frameSize
def hamming(frames, frameSize):
# Windowing with Hamming
# Hamming implementation : W[n] = 0.54 - 0.46 * numpy.cos((2 * numpy.pi * n) / (frameSize - 1))
# y[n] = s[n] (signal in a specific sample) * w[n] (the window function Hamming)
frames *= np.hamming(frameSize)
'''plt.figure(figsize=(10, 6))
plt.subplot(facecolor='darkslategray')
plt.title('Hamming window')
plt.ylim(-40000, 40000)
plt.ylabel('Amplitude', fontsize=16)
plt.xlabel('Time(ms)', fontsize=8)
plt.plot(frames,'C1')
plt.show()'''
return frames
def autocorrelation(hammingFrames):
correlateFrames = []
for k in range(len(hammingFrames)):
correlateFrames.append(np.correlate(hammingFrames[k], hammingFrames[k], mode='full'))
# print 'Each frame after windowing and autocorrelation: \n',correlateFrames
yolo = correlateFrames[len(correlateFrames) / 2:]
return yolo
def levinsonDurbin(correlateFrames):
# normalizedCF = preprocessing.normalize(correlateFrames, norm='l2')
filt1 = levinson_durbin(correlateFrames, 13)
print(filt1.numerator[1:])
def lpc_train():
#folder = raw_input('Give the name of the folder that you want to read data: ')
#amount = raw_input('Give the number of samples in the specific folder: ')
for x in range(1, 10 + 1):
wav = '/data_raw/'+'notepad_'+str(x)+'.wav'
print(wav)
emphasizedSignal, signal, rate = preEmphasis(wav)
filt = lpc(emphasizedSignal, order=16)
lpc_features = filt.numerator[1:]
print('panjang data = ',len(lpc_features))
print('LPC Feature ke -',x,' = ',lpc_features)
np.save('data//data_raw//feature_' + str(x) + '.npy', lpc_features)
print('LPC Feature di save pada feature_' + str(x) + '.npy')
return lpc_features
def lpc_uji():
#folder = raw_input('Give the name of the folder that you want to read data: ')
#amount = raw_input('Give the number of samples in the specific folder: ')
for x in range(1, 5 + 1):
wav = '/data_uji/notepad_'+str(x)+'.wav'
print(wav)
emphasizedSignal, signal, rate = preEmphasis(wav)
filt = lpc(emphasizedSignal, order=16)
lpc_features = filt.numerator[1:]
print('panjang data = ',len(lpc_features))
print('LPC Feature ke -',x,' = ',lpc_features)
np.save('data//data_uji//feature_' + str(x) + '.npy', lpc_features)
print('LPC Feature di save pada feature_'+str(x)+'.npy')
return lpc_features
# Defining a function which takes the MFCCs as a parameter(input) and returns the GMM(output)
def model_construct(data, n_components=1):
gmm = GaussianMixture(n_components=n_components,
covariance_type='diag',
tol=0.001,
reg_covar=1e-06,
max_iter=100,
n_init=1,
init_params='kmeans',
warm_start=False,
verbose=0,
verbose_interval=10)
gmm.fit(X=data)
return gmm
def create_model():
#folder = raw_input('Give the name of the folder that you want to read data: ')
#amount = raw_input('Give the number of samples in the specific folder: ')
for x in range(1, 10 + 1):
feature = 'data/data_raw/feature_'+str(x)+'.npy'
fture = np.load(feature) #load feature
# Initializing gmm_<word> to the output of the gmm_construct function
k = 8
reshape_feature = np.reshape(fture, (-1, 2)) #reshape 1D array to 2D array
model_data = model_construct(reshape_feature, n_components=k) #create model
# Saving the model to disk
joblib.dump(model_data, 'data//data_raw//model'+str(x)+'.pkl')
print("model - ",str(x)," have been constructed and saved to disk")
def score_gmm(data, gmm):
log_likelihood = gmm.score(X=data)
return log_likelihood
def match():
notepad_model1 = joblib.load('data//data_raw//model1.pkl')
notepad_model2 = joblib.load('data//data_raw//model2.pkl')
notepad_model3 = joblib.load('data//data_raw//model3.pkl')
notepad_model4 = joblib.load('data//data_raw//model4.pkl')
notepad_model5 = joblib.load('data//data_raw//model5.pkl')
notepad_model6 = joblib.load('data//data_raw//model6.pkl')
notepad_model7 = joblib.load('data//data_raw//model7.pkl')
notepad_model8 = joblib.load('data//data_raw//model8.pkl')
notepad_model9 = joblib.load('data//data_raw//model9.pkl')
notepad_model10 = joblib.load('data//data_raw//model10.pkl')
for x in range(1, 5 + 1):
feature = 'data/data_uji/feature_'+str(x)+'.npy'
mfccs = np.load(feature)
reshape_feature = np.reshape(mfccs, (-1, 2))
# score the MFCCs under each GMM
scores = [notepad_model1.score(reshape_feature),notepad_model2.score(reshape_feature),notepad_model3.score(reshape_feature),notepad_model4.score(reshape_feature),notepad_model5.score(reshape_feature),notepad_model6.score(reshape_feature),notepad_model7.score(reshape_feature),notepad_model8.score(reshape_feature),notepad_model9.score(reshape_feature),notepad_model10.score(reshape_feature)]
# if score 0 - 9 maka ke deteksi
if (scores.index(max(scores)) > -1 and scores.index(max(scores)) < 10) :
print('notepad ke - ' + str(x)+' memiliki score '+str(scores.index(max(scores)))+' terdeteksi notepad')
else:
print('notepad ke - ' + str(x)+' memiliki score '+str(scores.index(max(scores)))+' tidak terdeteksi')
def play():
lpc_train()
create_model()
lpc_uji()
match()
# mylpc()
play()
|
{"hexsha": "7785425bf259fb3f975125b22c835333752f6312", "size": 9356, "ext": "py", "lang": "Python", "max_stars_repo_path": "spam_code/lpc_gmm.py", "max_stars_repo_name": "chondroseto/Virtual_Assistant", "max_stars_repo_head_hexsha": "b52945d255176b711795d61da54d72000cf3561b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T06:38:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T06:38:40.000Z", "max_issues_repo_path": "spam_code/lpc_gmm.py", "max_issues_repo_name": "chondroseto/Virtual_Assistant", "max_issues_repo_head_hexsha": "b52945d255176b711795d61da54d72000cf3561b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spam_code/lpc_gmm.py", "max_forks_repo_name": "chondroseto/Virtual_Assistant", "max_forks_repo_head_hexsha": "b52945d255176b711795d61da54d72000cf3561b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6440677966, "max_line_length": 399, "alphanum_fraction": 0.6554082941, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2542}
|
contactList{1,1} = 'title 1'; contactList{1,2} = 'author name 1'; contactList{1,3} = 'spam@email.com';
contactList{2,1} = 'title 2'; contactList{2,2} = 'author name 2'; contactList{2,3} = 'spam@email.com';
subjectLine = 'title of the emails';
bodyLine = ['Dear %s,\n',...
'\n',...
'We will be organizing XXX\n',...
'Sincerely,\n',...
'The Organizers'];
cmdLine = 'mail -s "%s" %s < email.txt';
for itemID =1:size(contactList,1)
paperTitle = contactList{itemID,1};
authorName = contactList{itemID,2};
emailAddress = contactList{itemID,3};
% write the file
fp = fopen('email.txt','w');
fprintf(fp,bodyLine,authorName, paperTitle);
fclose(fp);
% run command
cmd = sprintf(cmdLine,subjectLine,emailAddress);
system(cmd);
% delete file
delete('email.txt');
end
|
{"author": "jianxiongxiao", "repo": "ProfXkit", "sha": "7376c50abf5ead846247774a36be026e6f24953c", "save_path": "github-repos/MATLAB/jianxiongxiao-ProfXkit", "path": "github-repos/MATLAB/jianxiongxiao-ProfXkit/ProfXkit-7376c50abf5ead846247774a36be026e6f24953c/batchEmail.m"}
|
[STATEMENT]
lemma project_extend_Join: "project h UNIV ((extend h F)\<squnion>G) = F\<squnion>(project h UNIV G)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. project h UNIV (extend h F \<squnion> G) = F \<squnion> project h UNIV G
[PROOF STEP]
apply (rule program_equalityI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. Init (project h UNIV (extend h F \<squnion> G)) = Init (F \<squnion> project h UNIV G)
2. Acts (project h UNIV (extend h F \<squnion> G)) = Acts (F \<squnion> project h UNIV G)
3. AllowedActs (project h UNIV (extend h F \<squnion> G)) = AllowedActs (F \<squnion> project h UNIV G)
[PROOF STEP]
apply (auto simp add: project_set_extend_set_Int image_iff)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x a b. \<lbrakk>\<forall>xa\<in>extend_act h ` Acts F \<union> Acts G. x \<noteq> project_act h xa; x \<in> Acts F; (a, b) \<in> x\<rbrakk> \<Longrightarrow> a = b
2. \<And>x b. \<lbrakk>\<forall>xa\<in>extend_act h ` Acts F \<union> Acts G. x \<noteq> project_act h xa; x \<in> Acts F\<rbrakk> \<Longrightarrow> (b, b) \<in> x
[PROOF STEP]
apply (metis Un_iff extend_act_inverse image_iff)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x b. \<lbrakk>\<forall>xa\<in>extend_act h ` Acts F \<union> Acts G. x \<noteq> project_act h xa; x \<in> Acts F\<rbrakk> \<Longrightarrow> (b, b) \<in> x
[PROOF STEP]
apply (metis Un_iff extend_act_inverse image_iff)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 621, "file": null, "length": 5}
|
macro do_while(condition, block)
quote
let
$block
while $condition
$block
end
end
end |> esc
end
function _reg(s, quoted_attr, attr_name)
@eval begin
#$get_attr_name(x) = getattr( x, $quoted_attr)
#$set_attr_name(x, val) = setattr!(x, $quoted_attr, val)
#export $get_attr_name
#export $set_attr_name
$attr_name(o::Object) = get_attr(o, $quoted_attr)
$attr_name(w::World, id::Symbol) = get_attr(w, id, $quoted_attr)
$attr_name(o::Object, v::Any) = set_attr!(o, $quoted_attr, v)
$attr_name(w::World, id::Symbol, v::Any) = set_attr!(w, id, $quoted_attr, v)
$attr_name(w::World, o::Object) = $attr_name(o)
$attr_name(w::World, o::Object, v::Any) = $attr_name(o, v)
export $attr_name
end
end
macro register_attribute(attr)
#get_attr_name = Symbol("get_", attr)
#set_attr_name = Symbol("set_", attr)
attr_name = Symbol(attr)
quoted_attr = QuoteNode(attr)
s = string(attr)
_reg(s, quoted_attr, attr_name)
end
|
{"hexsha": "929a935f082f013aa84a03181a344ae9115b00fe", "size": 1106, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/macros.jl", "max_stars_repo_name": "PPKFS/julia_if_old", "max_stars_repo_head_hexsha": "b46abb43aa89daf038e2f822c184c0f2c75d1b6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-13T15:41:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T15:41:54.000Z", "max_issues_repo_path": "src/macros.jl", "max_issues_repo_name": "PPKFS/julia_if_old", "max_issues_repo_head_hexsha": "b46abb43aa89daf038e2f822c184c0f2c75d1b6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/macros.jl", "max_forks_repo_name": "PPKFS/julia_if_old", "max_forks_repo_head_hexsha": "b46abb43aa89daf038e2f822c184c0f2c75d1b6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7222222222, "max_line_length": 84, "alphanum_fraction": 0.5886075949, "num_tokens": 319}
|
#pragma once
#include <memory>
#include <boost/asio.hpp>
#include "eventReceiver.hpp"
#include "serviceParamTypes.h"
namespace mln::net {
struct NetCommonObjects
{
public:
NetCommonObjects(ServiceParams& svcParams)
: _ioc(svcParams.ioc_)
, _strand(svcParams.ioc_)
, _keepAliveTimeMs(svcParams.keepAliveTimeMs_)
, _updateTimeMs(svcParams.serviceUpdateTimeMs_)
, _updater(svcParams.ioc_, boost::posix_time::milliseconds(svcParams.serviceUpdateTimeMs_))
{
_packetProc = std::make_unique<PacketProcedure>(svcParams.packetParser_, svcParams.manip_);
svcParams.receiver_.clone(&_eventReceiver);
_eventReceiver.initHandler(_packetProc.get());
}
boost::asio::io_context& _ioc;
boost::asio::io_context::strand _strand;
std::unique_ptr<PacketProcedure> _packetProc;
EventReceiver _eventReceiver;
size_t _keepAliveTimeMs = 0;
size_t _updateTimeMs = 0;
size_t _index;
private:
boost::chrono::system_clock::time_point _prevTime;
boost::asio::deadline_timer _updater;
public:
size_t getIndex() const { return _index; }
void setIndex(const size_t idx) { _index = idx; }
void expireTimerReady() {
if (0 != _updateTimeMs) {
_prevTime = boost::chrono::system_clock::now();
_updater.expires_from_now(boost::posix_time::milliseconds(_updateTimeMs));
_updater.async_wait(boost::asio::bind_executor(_strand, boost::bind(
&NetCommonObjects::handleUpdate, this, boost::asio::placeholders::error)));
}
}
void handleUpdate(const boost::system::error_code& ec) {
if (ec) {
[[unlikely]]
LOGW("failed handleUpdate. code:{}, msg:{}", ec.value(), ec.message());
}
else {
boost::chrono::system_clock::time_point now = boost::chrono::system_clock::now();
unsigned long elapse
= (unsigned long)boost::chrono::duration_cast<boost::chrono::milliseconds>(now - _prevTime).count();
_prevTime = now;
_eventReceiver.onUpdate(elapse);
_updater.expires_from_now(boost::posix_time::milliseconds(_updateTimeMs));
_updater.async_wait(boost::asio::bind_executor(_strand, boost::bind(
&NetCommonObjects::handleUpdate, this, boost::asio::placeholders::error)));
}
}
};
}//namespace mln::net {
|
{"hexsha": "ecf87dbb2ea6af169f8fcd557cc599d8b8f89d3c", "size": 2214, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/net/netCommonObjects.hpp", "max_stars_repo_name": "lazychase/mlnsdk", "max_stars_repo_head_hexsha": "599303c37b83c03827a3050c42aeb3af649ee968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/net/netCommonObjects.hpp", "max_issues_repo_name": "lazychase/mlnsdk", "max_issues_repo_head_hexsha": "599303c37b83c03827a3050c42aeb3af649ee968", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-01-11T11:43:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T11:43:01.000Z", "max_forks_repo_path": "include/net/netCommonObjects.hpp", "max_forks_repo_name": "lazychase/mlnsdk", "max_forks_repo_head_hexsha": "599303c37b83c03827a3050c42aeb3af649ee968", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7532467532, "max_line_length": 105, "alphanum_fraction": 0.7190605239, "num_tokens": 604}
|
using Documenter, Query
makedocs(
modules = [Query],
sitename = "Query.jl",
pages = [
"Introduction" => "index.md",
"Getting Started" => "gettingstarted.md",
"Standalone Query Commands" => "standalonequerycommands.md",
"LINQ Style Query Commands" => "linqquerycommands.md",
"Data Sources" => "sources.md",
"Data Sinks" => "sinks.md",
"Experimental Features" => "experimental.md",
"Internals" => "internals.md"]
)
deploydocs(
repo = "github.com/queryverse/Query.jl.git"
)
|
{"hexsha": "cfed2a1e6129c56000b14b86654f6fec7b9fd4ef", "size": 498, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "petershintech/Query.jl", "max_stars_repo_head_hexsha": "7ab5f58ec82d51c42cf6dba7e916eee7a78a2c51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "petershintech/Query.jl", "max_issues_repo_head_hexsha": "7ab5f58ec82d51c42cf6dba7e916eee7a78a2c51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "petershintech/Query.jl", "max_forks_repo_head_hexsha": "7ab5f58ec82d51c42cf6dba7e916eee7a78a2c51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9, "max_line_length": 62, "alphanum_fraction": 0.6646586345, "num_tokens": 151}
|
from automan.api import Problem, Automator, Simulation
from automan.api import CondaClusterManager
from matplotlib import pyplot as plt
import numpy as np
class Squares(Problem):
def get_name(self):
return 'squares'
def get_commands(self):
commands = [(str(i), 'python square.py %d' % i, None)
for i in range(1, 8)]
return commands
def run(self):
self.make_output_dir()
data = []
for i in range(1, 8):
stdout = self.input_path(str(i), 'stdout.txt')
with open(stdout) as f:
values = [float(x) for x in f.read().split()]
data.append(values)
data = np.asarray(data)
plt.plot(data[:, 0], data[:, 1], 'o-')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(self.output_path('squares.pdf'))
class Powers(Problem):
def get_name(self):
return 'powers'
def setup(self):
base_cmd = 'python powers.py --output-dir $output_dir'
self.cases = [
Simulation(
root=self.input_path(str(i)),
base_command=base_cmd,
power=float(i)
)
for i in range(1, 5)
]
def run(self):
self.make_output_dir()
for case in self.cases:
data = np.load(case.input_path('results.npz'))
plt.plot(
data['x'], data['y'],
label=r'$x^{{%.2f}}$' % case.params['power']
)
plt.grid()
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.savefig(self.output_path('powers.pdf'))
if __name__ == '__main__':
automator = Automator(
simulation_dir='outputs',
output_dir='manuscript/figures',
all_problems=[Squares, Powers],
cluster_manager_factory=CondaClusterManager
)
automator.run()
|
{"hexsha": "f08c5ee1ff401ba18c615465837f85e02b692de3", "size": 1904, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/edm_conda_cluster/automate_conda.py", "max_stars_repo_name": "pypr/automan", "max_stars_repo_head_hexsha": "80619f0cb58ad1e996dc7c9ea66effecf8df5acc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-08-07T10:55:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T13:12:00.000Z", "max_issues_repo_path": "examples/edm_conda_cluster/automate_conda.py", "max_issues_repo_name": "pypr/automan", "max_issues_repo_head_hexsha": "80619f0cb58ad1e996dc7c9ea66effecf8df5acc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2018-06-19T18:57:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T11:35:06.000Z", "max_forks_repo_path": "examples/edm_conda_cluster/automate_conda.py", "max_forks_repo_name": "pypr/automan", "max_forks_repo_head_hexsha": "80619f0cb58ad1e996dc7c9ea66effecf8df5acc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-09-01T13:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T10:38:42.000Z", "avg_line_length": 27.2, "max_line_length": 62, "alphanum_fraction": 0.5378151261, "include": true, "reason": "import numpy", "num_tokens": 440}
|
library(ggplot2)
library(dplyr)
library(reshape2)
#load data
data <- read.csv("Downloads/MachineLearning-master/Example Data/PCA_Example_1.csv", stringsAsFactors=F)
#change the first column format as Date
data$Date = as.Date(data$Date)
#transform stock data into Date Stock1 Stock2 .... Stock24
data <- reshape(data, idvar = "Date", timevar = "Stock", direction = "wide")
#sort data by date, asc
data <- arrange(data, Date)
#change column id(sort by A, B....Z)
data<-data[,order(colnames(data),decreasing=F)]
#apply PCA
pca.model = prcomp(data[,1:ncol(data)-1])
#Get PCA component PCA component 1:
PC1 <- pca.model$x[,"PC1"]
#add id as duration days:
duration <- 1:length(PC1)
#transform into data frame and combine
PC1 <- as.data.frame(PC1)
duration <- as.data.frame(duration)
PC1 <- cbind(PC1, duration)
colnames(PC1) <- c("feature", "duration")
#draw plot
pc1_plot <- qplot(PC1$duration, PC1$feature)
#verify data path
data.verify <- read.csv("Downloads/MachineLearning-master/Example Data/PCA_Example_2.csv", stringsAsFactors = F)
data.verify$Date <- as.Date(data.verify$Date)
#subset data, only contains 2 columns, date and close
data.verify <- data.verify[,c(1,5)]
#sort by date
data.verify <- arrange(data.verify, Date)
#add duration
duration.verify <- 1:nrow(data.verify)
duration.verify <- as.data.frame(duration.verify)
data.verify <- cbind(duration.verify, data.verify)
#normalize data
max.value <- max(data.verify$Close)
min.value <- min(data.verify$Close)
range.value <- max.value - min.value
data.verify$Close <- data.verify$Close/range.value
#plot data
qplot(data.verify$duration.verify, data.verify$Close)
#normalize to the same scale
#I wrote the normalize part into the readme.md in R folder
|
{"hexsha": "b31e0e37f7e716cec489fb841f9fb682381996a1", "size": 1712, "ext": "r", "lang": "R", "max_stars_repo_path": "R/pca.r", "max_stars_repo_name": "Hennrik/machine_learning_examples", "max_stars_repo_head_hexsha": "8263fb95aa18ae56e4dc9690d389fe8ac25c2b3a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-01-24T04:44:35.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-24T04:44:35.000Z", "max_issues_repo_path": "R/pca.r", "max_issues_repo_name": "Hennrik/machine_learning_examples", "max_issues_repo_head_hexsha": "8263fb95aa18ae56e4dc9690d389fe8ac25c2b3a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/pca.r", "max_forks_repo_name": "Hennrik/machine_learning_examples", "max_forks_repo_head_hexsha": "8263fb95aa18ae56e4dc9690d389fe8ac25c2b3a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2173913043, "max_line_length": 112, "alphanum_fraction": 0.7429906542, "num_tokens": 464}
|
# -*- coding: utf-8 -*-
"""
Contains the definition of the SuddenDecay class.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import numpy as np
from . import SampleBasedDecay
logger = logging.getLogger('decay.sudden')
class SuddenDecay(SampleBasedDecay):
"""
Class that decays the value following the sigmoid curve.
Sigmoid is:
k
Y = --------------------- + 1
a + bx
1 + e
This curve used a=100, b=-100, k=-2
This intersects the Y axis at
+1 and the X axis at -1 and +1. We're interested only in the
positive x.
"""
def __init__(self, *args, **kwargs):
""" Constructor. """
super(SuddenDecay, self).__init__(
decay_name='.decay.sudden.', *args, **kwargs)
def __str__(self):
""" Represent this object as a human-readable string. """
return 'SuddenDecay()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'SuddenDecay()'
decay_x = np.array([
0.0,
0.05263157894736842,
0.10526315789473684,
0.15789473684210525,
0.21052631578947367,
0.2631578947368421,
0.3157894736842105,
0.3684210526315789,
0.42105263157894735,
0.47368421052631576,
0.5263157894736842,
0.5789473684210527,
0.631578947368421,
0.6842105263157894,
0.7368421052631579,
0.7894736842105263,
0.8421052631578947,
0.894736842105263,
0.9473684210526315,
1.0,
])
decay_y = np.array([
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.9999999999999998,
0.9999999999999614,
0.9999999999925487,
0.9999999985612403,
0.9999997221895089,
0.9999463589234484,
0.9896955173948946,
0.0,
])
|
{"hexsha": "fff977053334258989b6ac0d6b95c314d3c3b6f4", "size": 2032, "ext": "py", "lang": "Python", "max_stars_repo_path": "decay/decays/sample/sudden.py", "max_stars_repo_name": "pyl1b/decay", "max_stars_repo_head_hexsha": "7200516455fc03351ad658af66b5cc39b2b2d50a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "decay/decays/sample/sudden.py", "max_issues_repo_name": "pyl1b/decay", "max_issues_repo_head_hexsha": "7200516455fc03351ad658af66b5cc39b2b2d50a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "decay/decays/sample/sudden.py", "max_forks_repo_name": "pyl1b/decay", "max_forks_repo_head_hexsha": "7200516455fc03351ad658af66b5cc39b2b2d50a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5777777778, "max_line_length": 65, "alphanum_fraction": 0.5551181102, "include": true, "reason": "import numpy", "num_tokens": 645}
|
@testset "map" begin
m = sprand(5, 5, 0.25)
n = GBMatrix(m)
@test map(UnaryOps.LOG, n)[1,1] == map(log, m)[1,1]
o = map!(>, GBMatrix{Bool}(5, 5), 0.1, n)
@test o[1,4] == (0.1 > m[1,4])
@test map(second, n, 1.5)[1,1] == 1.5
@test (n .* 10)[1,1] == n[1,1] * 10
# Julia will map over the entire array, rather than just nnz.
# so just test [1,1]
@test map((x) -> 1.5, n)[1,1] == map((x) -> 1.5, m)[1,1]
end
|
{"hexsha": "4dd13310e8b11ce993532cc15cb21f4b70ab0b8a", "size": 444, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/operations/map.jl", "max_stars_repo_name": "JuliaSparse/SuiteSparseGraphBLAS.jl", "max_stars_repo_head_hexsha": "73466763044fb8a8c80c92180b294c482440c2b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2021-05-29T03:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T21:00:48.000Z", "max_issues_repo_path": "test/operations/map.jl", "max_issues_repo_name": "JuliaSparse/SuiteSparseGraphBLAS.jl", "max_issues_repo_head_hexsha": "73466763044fb8a8c80c92180b294c482440c2b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2021-05-21T21:59:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T23:34:24.000Z", "max_forks_repo_path": "test/operations/map.jl", "max_forks_repo_name": "abhinavmehndiratta/SuiteSparseGraphBLAS.jl", "max_forks_repo_head_hexsha": "73466763044fb8a8c80c92180b294c482440c2b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-06-08T15:44:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-18T23:38:35.000Z", "avg_line_length": 34.1538461538, "max_line_length": 65, "alphanum_fraction": 0.490990991, "num_tokens": 211}
|
(*
Title: Catoids
Author: Georg Struth
Maintainer: Georg Struth <g.struth at sheffield.ac.uk>
*)
section \<open>Catoids\<close>
theory Catoid
imports Main
begin
subsection \<open>Multimagmas\<close>
text \<open>Multimagmas are sets equipped with multioperations. Multioperations are isomorphic to ternary relations.\<close>
class multimagma =
fixes mcomp :: "'a \<Rightarrow> 'a \<Rightarrow> 'a set" (infixl "\<odot>" 70)
begin
text \<open>We introduce notation for the domain of definition of the multioperation.\<close>
abbreviation "\<Delta> x y \<equiv> (x \<odot> y \<noteq> {})"
text \<open>We extend the multioperation to powersets\<close>
definition conv :: "'a set \<Rightarrow> 'a set \<Rightarrow> 'a set" (infixl "\<star>" 70) where
"X \<star> Y = \<Union>{x \<odot> y |x y. x \<in> X \<and> y \<in> Y}"
lemma conv_exp: "X \<star> Y = {z. \<exists>x y. z \<in> x \<odot> y \<and> x \<in> X \<and> y \<in> Y}"
unfolding conv_def by fastforce
lemma conv_exp2: "(z \<in> X \<star> Y) = (\<exists>x y. z \<in> x \<odot> y \<and> x \<in> X \<and> y \<in> Y)"
by (simp add: multimagma.conv_exp)
lemma conv_distl: "X \<star> \<Union>\<Y> = \<Union>{X \<star> Y |Y. Y \<in> \<Y>}"
unfolding conv_def by blast
lemma conv_distr: "\<Union>\<X> \<star> Y = \<Union>{X \<star> Y |X. X \<in> \<X>}"
unfolding conv_def by blast
lemma conv_isol: "X \<subseteq> Y \<Longrightarrow> Z \<star> X \<subseteq> Z \<star> Y"
using conv_exp2 by fastforce
lemma conv_isor: "X \<subseteq> Y \<Longrightarrow> X \<star> Z \<subseteq> Y \<star> Z"
using conv_exp2 by fastforce
lemma conv_atom [simp]: "{x} \<star> {y} = x \<odot> y"
by (simp add: conv_def)
end
subsection \<open>Multisemigroups\<close>
text \<open>Sultisemigroups are associative multimagmas.\<close>
class multisemigroup = multimagma +
assumes assoc: "\<Union>{x \<odot> v |v. v \<in> y \<odot> z} = \<Union>{v \<odot> z |v. v \<in> x \<odot> y}"
begin
lemma assoc_exp: "(\<exists>v. w \<in> x \<odot> v \<and> v \<in> y \<odot> z) = (\<exists>v. v \<in> x \<odot> y \<and> w \<in> v \<odot> z)"
using assoc by blast
lemma assoc_var: "{x} \<star> (y \<odot> z) = (x \<odot> y) \<star> {z}"
unfolding conv_def assoc_exp using local.assoc by force
text \<open>Associativity extends to powersets.\<close>
lemma conv_assoc: "X \<star> (Y \<star> Z) = (X \<star> Y) \<star> Z"
unfolding conv_exp using assoc_exp by fastforce
end
subsection \<open>st-Multimagmas\<close>
text \<open>We equip multimagmas with source and target maps.\<close>
class st_op =
fixes src :: "'a \<Rightarrow> 'a" ("\<sigma>")
and tgt :: "'a \<Rightarrow> 'a" ("\<tau>")
class st_multimagma = multimagma + st_op +
assumes Dst: "x \<odot> y \<noteq> {} \<Longrightarrow> \<tau> x = \<sigma> y"
and s_absorb [simp]: "\<sigma> x \<odot> x = {x}"
and t_absorb [simp]: "x \<odot> \<tau> x = {x}"
text \<open>The following sublocale proof sets up opposition/duality.\<close>
sublocale st_multimagma \<subseteq> stopp: st_multimagma "\<lambda>x y. y \<odot> x" tgt src
rewrites "stopp.conv X Y = Y \<star> X"
by (unfold_locales, auto simp add: local.Dst multimagma.conv_def)
lemma (in st_multimagma) ts_compat [simp]:
"\<tau> (\<sigma> x) = \<sigma> x"
by (simp add: Dst)
lemma (in st_multimagma) ss_idem [simp]:
"\<sigma> (\<sigma> x) = \<sigma> x"
by (metis local.stopp.ts_compat local.ts_compat)
lemma (in st_multimagma) st_fix:
"(\<tau> x = x) = (\<sigma> x = x)"
proof
assume h1: "\<tau> x = x"
hence "\<sigma> x = \<sigma> (\<tau> x)"
by simp
also have "\<dots> = x"
by (metis h1 local.stopp.ts_compat)
finally show "\<sigma> x = x".
next
assume h2: "\<sigma> x = x"
hence "\<tau> x = \<tau> (\<sigma> x)"
by simp
also have "\<dots> = x"
by (metis h2 ts_compat)
finally show "\<tau> x = x".
qed
text \<open>We extend source and target operations to powersets by taking images.\<close>
abbreviation (in st_op) Src :: "'a set \<Rightarrow> 'a set" where
"Src \<equiv> image \<sigma>"
abbreviation (in st_op) Tgt :: "'a set \<Rightarrow> 'a set" where
"Tgt \<equiv> image \<tau>"
text \<open>Fixpoints of source and target maps model source and target elements.
These correspond to units.\<close>
abbreviation (in st_op) sfix :: "'a set" where
"sfix \<equiv> {x. \<sigma> x = x}"
abbreviation (in st_op) tfix :: "'a set" where
"tfix \<equiv> {x. \<tau> x = x}"
lemma (in st_multimagma) st_mm_rfix [simp]:
"tfix = stopp.sfix"
by simp
lemma (in st_multimagma) st_fix_set:
"{x. \<sigma> x = x} = {x. \<tau> x = x}"
using local.st_fix by presburger
lemma (in st_multimagma) stfix_set:
"sfix = tfix"
using local.st_fix_set by blast
lemma (in st_multimagma) sfix_im:
"sfix = Src UNIV"
by (smt (verit, ccfv_threshold) Collect_cong full_SetCompr_eq local.ss_idem)
lemma (in st_multimagma) tfix_im:
"tfix = Tgt UNIV"
using local.stopp.sfix_im by blast
lemma (in st_multimagma) ST_im:
"Src UNIV = Tgt UNIV"
using local.sfix_im local.stfix_set local.tfix_im by presburger
text \<open>Source and target elements are "orthogonal" idempotents.\<close>
lemma (in st_multimagma) s_idem [simp]:
"\<sigma> x \<odot> \<sigma> x = {\<sigma> x}"
proof-
have "{\<sigma> x} = \<sigma> x \<odot> \<tau> (\<sigma> x)"
using local.t_absorb by presburger
also have "\<dots> = \<sigma> x \<odot> \<sigma> x"
by simp
finally show ?thesis..
qed
lemma (in st_multimagma) s_ortho:
"\<Delta> (\<sigma> x) (\<sigma> y) \<Longrightarrow> \<sigma> x = \<sigma> y"
proof-
assume "\<Delta> (\<sigma> x) (\<sigma> y)"
hence "\<tau> (\<sigma> x) = \<sigma> (\<sigma> y)"
using local.Dst by blast
thus ?thesis
by simp
qed
lemma (in st_multimagma) s_ortho_iff:
"\<Delta> (\<sigma> x) (\<sigma> y) = (\<sigma> x = \<sigma> y)"
using local.s_ortho by auto
lemma (in st_multimagma) s_absorb_var:
"(\<sigma> y \<noteq> \<sigma> x) = (\<sigma> y \<odot> x = {})"
using local.Dst by force
lemma (in st_multimagma) s_absorb_var2:
"(\<sigma> y = \<sigma> x) = (\<sigma> y \<odot> x \<noteq> {})"
using local.s_absorb_var by blast
lemma (in st_multimagma) s_absorb_var3:
"(\<sigma> y = \<sigma> x) = \<Delta> (\<sigma> x) y"
by (metis local.s_absorb_var)
lemma (in st_multimagma) s_assoc:
"{\<sigma> x} \<star> (\<sigma> y \<odot> z) = (\<sigma> x \<odot> \<sigma> y) \<star> {z}"
proof-
{fix a
have "(a \<in> {\<sigma> x} \<star> (\<sigma> y \<odot> z)) = (\<exists>b. a \<in> \<sigma> x \<odot> b \<and> b \<in> \<sigma> y \<odot> z)"
by (simp add: local.conv_exp2)
also have "\<dots> = (\<exists>b. a \<in> \<sigma> x \<odot> b \<and> b \<in> \<sigma> y \<odot> z \<and> \<sigma> y = \<sigma> z)"
using local.s_absorb_var by auto
also have "\<dots> = (\<exists>b. a \<in> \<sigma> x \<odot> b \<and> b \<in> \<sigma> y \<odot> z \<and> \<sigma> y = \<sigma> z \<and> \<sigma> x = \<sigma> y)"
using local.stopp.Dst by fastforce
also have "\<dots> = (\<exists>b. b \<in> \<sigma> x \<odot> \<sigma> y \<and> a \<in> b \<odot> z \<and> \<sigma> y = \<sigma> z \<and> \<sigma> x = \<sigma> y)"
by fastforce
also have "\<dots> = (\<exists>b. b \<in> \<sigma> x \<odot> \<sigma> y \<and> a \<in> b \<odot> z)"
by (metis equals0D local.s_absorb_var3 local.s_idem singleton_iff)
also have "\<dots> = (a \<in> (\<sigma> x \<odot> \<sigma> y) \<star> {z})"
using local.conv_exp2 by auto
finally have "(a \<in> {\<sigma> x} \<star> (\<sigma> y \<odot> z)) = (a \<in> (\<sigma> x \<odot> \<sigma> y) \<star> {z})".}
thus ?thesis
by blast
qed
lemma (in st_multimagma) sfix_absorb_var [simp]:
"\<Union>{e \<odot> x |e. e \<in> sfix} = {x}"
apply safe
apply (metis local.Dst local.s_absorb local.ts_compat singletonD)
by (smt (verit) UnionI insertI1 local.s_absorb local.ss_idem mem_Collect_eq)
lemma (in st_multimagma) tfix_absorb_var:
"\<Union>{x \<odot> e |e. e \<in> tfix} = {x}"
using local.stopp.sfix_absorb_var by presburger
lemma (in st_multimagma) st_comm:
"\<tau> x \<odot> \<sigma> y = \<sigma> y \<odot> \<tau> x"
using local.Dst by fastforce
lemma (in st_multimagma) s_weak_twisted:
"\<Union>{\<sigma> u \<odot> x |u. u \<in> x \<odot> y} \<subseteq> x \<odot> \<sigma> y"
by (safe, metis empty_iff insertI1 local.Dst local.s_absorb local.t_absorb)
lemma (in st_multimagma) s_comm:
"\<sigma> x \<odot> \<sigma> y = \<sigma> y \<odot> \<sigma> x"
using local.Dst by force
lemma (in st_multimagma) s_export [simp]:
"Src (\<sigma> x \<odot> y) = \<sigma> x \<odot> \<sigma> y"
using local.Dst by force
lemma (in st_multimagma) st_prop:
"(\<tau> x = \<sigma> y) = \<Delta> (\<tau> x) (\<sigma> y)"
by (metis local.stopp.s_absorb_var2 local.stopp.st_comm local.ts_compat)
lemma (in st_multimagma) weak_local_var:
"\<tau> x \<odot> \<sigma> y = {} \<Longrightarrow> x \<odot> y = {}"
using local.Dst local.st_prop by auto
text \<open>The following facts hold by duality.\<close>
lemma (in st_multimagma) st_compat:
"\<sigma> (\<tau> x) = \<tau> x"
by simp
lemma (in st_multimagma) tt_idem:
"\<tau> (\<tau> x) = \<tau> x"
by simp
lemma (in st_multimagma) t_idem:
"\<tau> x \<odot> \<tau> x = {\<tau> x}"
by simp
lemma (in st_multimagma)t_weak_twisted:
"\<Union>{x \<odot> \<tau> u |u. u \<in> y \<odot> x} \<subseteq> \<tau> y \<odot> x"
using local.stopp.s_weak_twisted by auto
lemma (in st_multimagma) t_comm:
"\<tau> x \<odot> \<tau> y = \<tau> y \<odot> \<tau> x"
by (simp add: stopp.s_comm)
lemma (in st_multimagma) t_export:
"image \<tau> (x \<odot> \<tau> y) = \<tau> x \<odot> \<tau> y"
by simp
lemma (in st_multimagma) tt_comp_prop:
"\<Delta> (\<tau> x) (\<tau> y) = (\<tau> x = \<tau> y)"
using local.stopp.s_ortho_iff by force
text \<open>The set of all sources (and targets) are units at powerset level.\<close>
lemma (in st_multimagma) conv_uns [simp]:
"sfix \<star> X = X"
proof-
{fix a
have "(a \<in> sfix \<star> X) = (\<exists>b \<in> sfix. \<exists>c \<in> X. a \<in> b \<odot> c)"
by (meson local.conv_exp2)
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. \<sigma> b = b \<and> a \<in> b \<odot> c)"
by blast
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. a \<in> \<sigma> b \<odot> c)"
by (metis local.ss_idem)
also have "\<dots> = (\<exists>c \<in> X. a \<in> \<sigma> c \<odot> c)"
by (metis empty_iff local.s_absorb_var)
also have "\<dots> = (a \<in> X)"
by auto
finally have "(a \<in> sfix \<star> X) = (a \<in> X)".}
thus ?thesis
by blast
qed
lemma (in st_multimagma) conv_unt:
"X \<star> tfix = X"
using stopp.conv_uns by blast
text \<open>We prove laws of modal powerset quantales.\<close>
lemma (in st_multimagma) Src_exp:
"Src X = {\<sigma> x |x. x \<in> X}"
by (simp add: Setcompr_eq_image)
lemma (in st_multimagma) ST_compat [simp]:
"Src (Tgt X) = Tgt X"
unfolding image_def by fastforce
lemma (in st_multimagma) TS_compat:
"Tgt (Src X) = Src X"
by (meson local.stopp.ST_compat)
lemma (in st_multimagma) Src_absorp [simp]:
"Src X \<star> X = X"
proof-
{fix a
have "(a \<in> Src X \<star> X) = (\<exists>b \<in> Src X. \<exists>c \<in> X. a \<in> b \<odot> c)"
using local.conv_exp2 by auto
also have "\<dots> = (\<exists>b \<in> X. \<exists>c \<in> X. a \<in> \<sigma> b \<odot> c)"
by blast
also have "\<dots> = (\<exists>c \<in> X. a \<in> \<sigma> c \<odot> c)"
by (metis empty_iff local.s_absorb_var)
also have "\<dots> = (a \<in> X)"
by simp
finally have "(a \<in> Src X \<star> X) = (a \<in> X)".}
thus ?thesis
by force
qed
lemma (in st_multimagma) Tgt_absorp:
"X \<star> Tgt X = X"
by simp
lemma (in st_multimagma) Src_Sup_pres:
"Src (\<Union>\<X>) = \<Union>{Src X |X. X \<in> \<X>}"
unfolding Src_exp by auto
lemma (in st_multimagma) Tgt_Sup_pres:
"Tgt (\<Union>\<X>) = \<Union>{Tgt X |X. X \<in> \<X>}"
by blast
lemma (in st_multimagma) ST_comm:
"Src X \<star> Tgt Y = Tgt Y \<star> Src X"
proof-
{fix a
have "(a \<in> Src X \<star> Tgt Y) = (\<exists>b \<in> Src X. \<exists>c \<in> Tgt Y. a \<in> b \<odot> c)"
using local.conv_exp2 by auto
also have "\<dots> = (\<exists>b \<in> X. \<exists>c \<in> Y. a \<in> \<sigma> b \<odot> \<tau> c)"
by auto
also have "\<dots> = (\<exists>b \<in> X. \<exists>c \<in> Y. a \<in> \<tau> c \<odot> \<sigma> b)"
using local.st_comm by auto
also have "\<dots> = (a \<in> Tgt Y \<star> Src X)"
using multimagma.conv_exp2 by fastforce
finally have "(a \<in> Src X \<star> Tgt Y) = (a \<in> Tgt Y \<star> Src X)".}
thus ?thesis
by force
qed
lemma (in st_multimagma) Src_comm:
"Src X \<star> Src Y = Src Y \<star> Src X"
by (metis local.ST_comm local.TS_compat)
lemma (in st_multimagma) Tgt_comm:
"Tgt X \<star> Tgt Y = Tgt Y \<star> Tgt X"
using local.stopp.Src_comm by presburger
lemma (in st_multimagma) Src_subid:
"Src X \<subseteq> sfix"
by force
lemma (in st_multimagma) Tgt_subid:
"Tgt X \<subseteq> tfix"
using local.stopp.Src_subid by presburger
lemma (in st_multimagma) Src_export [simp]:
"Src (Src X \<star> Y) = Src X \<star> Src Y"
proof-
{fix a
have "(a \<in> Src (Src X \<star> Y)) = (\<exists>b \<in> Src X \<star> Y. a = \<sigma> b)"
by (simp add: image_iff)
also have "\<dots> = (\<exists>b. \<exists>c \<in> Src X. \<exists>d \<in> Y. a = \<sigma> b \<and> b \<in> c \<odot> d)"
by (meson local.conv_exp2)
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Y. a = \<sigma> b \<and> b \<in> \<sigma> c \<odot> d)"
by simp
also have "\<dots> = (\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> Src (\<sigma> c \<odot> d))"
by (metis (mono_tags, lifting) image_iff)
also have "\<dots> = (\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> \<sigma> c \<odot> \<sigma> d)"
by auto
also have "\<dots> = (\<exists>c \<in> Src X. \<exists>d \<in> Src Y. a \<in> c \<odot> d)"
by force
also have "\<dots> = (a \<in> Src X \<star> Src Y)"
using local.conv_exp2 by auto
finally have "(a \<in> Src (Src X \<star> Y)) = (a \<in> Src X \<star> Src Y)".}
thus ?thesis
by force
qed
lemma (in st_multimagma) Tgt_export [simp]:
"Tgt (X \<star> Tgt Y) = Tgt X \<star> Tgt Y"
by simp
text \<open>Locality implies st-locality, which is the composition pattern of categories.\<close>
lemma (in st_multimagma) locality:
assumes src_local: "Src (x \<odot> \<sigma> y) \<subseteq> Src (x \<odot> y)"
and tgt_local: "Tgt (\<tau> x \<odot> y) \<subseteq> Tgt (x \<odot> y)"
shows "\<Delta> x y = (\<tau> x = \<sigma> y)"
using local.Dst tgt_local by auto
subsection \<open>Catoids\<close>
class catoid = st_multimagma + multisemigroup
sublocale catoid \<subseteq> ts_msg: catoid "\<lambda>x y. y \<odot> x" tgt src
by (unfold_locales, simp add: local.assoc)
lemma (in catoid) src_comp_aux:
"v \<in> x \<odot> y \<Longrightarrow> \<sigma> v = \<sigma> x"
by (metis empty_iff insertI1 local.assoc_exp local.s_absorb local.s_absorb_var)
lemma (in catoid) src_comp:
"Src (x \<odot> y) \<subseteq> {\<sigma> x}"
proof-
{fix a
assume "a \<in> Src (x \<odot> y)"
hence "\<exists>b \<in> x \<odot> y. a = \<sigma> b"
by auto
hence "\<exists>b. \<sigma> b = \<sigma> x \<and> a = \<sigma> b"
using local.src_comp_aux by blast
hence "a = \<sigma> x"
by blast}
thus ?thesis
by blast
qed
lemma (in catoid) src_comp_cond:
"(\<Delta> x y) \<Longrightarrow> Src (x \<odot> y) = {\<sigma> x}"
by (meson image_is_empty local.src_comp subset_singletonD)
lemma (in catoid) tgt_comp_aux:
"v \<in> x \<odot> y \<Longrightarrow> \<tau> v = \<tau> y"
using local.ts_msg.src_comp_aux by fastforce
lemma (in catoid) tgt_comp:
"Tgt (x \<odot> y) \<subseteq> {\<tau> y}"
by (simp add: local.ts_msg.src_comp)
lemma (in catoid) tgt_comp_cond:
"\<Delta> x y \<Longrightarrow> Tgt (x \<odot> y) = {\<tau> y}"
by (simp add: local.ts_msg.src_comp_cond)
lemma (in catoid) src_weak_local:
"Src (x \<odot> y) \<subseteq> Src (x \<odot> \<sigma> y)"
proof-
{fix a
assume "a \<in> Src (x \<odot> y)"
hence "\<exists>b \<in> x \<odot> y. a = \<sigma> b"
by auto
hence "\<exists>b \<in> x \<odot> y. a = \<sigma> b"
by blast
hence "\<exists>b \<in> x \<odot> y. a = \<sigma> b \<and> \<tau> x = \<sigma> y"
using local.Dst by auto
hence "\<exists>b \<in> x \<odot> \<sigma> y. a = \<sigma> b"
by (metis insertI1 local.t_absorb local.ts_msg.tgt_comp_aux)
hence "a \<in> Src (x \<odot> \<sigma> y)"
by force}
thus ?thesis
by force
qed
lemma (in catoid) src_local_cond:
"\<Delta> x y \<Longrightarrow> Src (x \<odot> y) = Src (x \<odot> \<sigma> y)"
by (simp add: local.stopp.Dst local.ts_msg.tgt_comp_cond)
lemma (in catoid) tgt_weak_local:
"Tgt (x \<odot> y) \<subseteq> Tgt (\<tau> x \<odot> y)"
by (simp add: local.ts_msg.src_weak_local)
lemma (in catoid) tgt_local_cond:
"\<Delta> x y \<Longrightarrow> Tgt (x \<odot> y) = Tgt (\<tau> x \<odot> y)"
using local.ts_msg.src_local_cond by presburger
lemma (in catoid) src_twisted_aux:
"u \<in> x \<odot> y \<Longrightarrow> (x \<odot> \<sigma> y = \<sigma> u \<odot> x)"
by (metis local.Dst local.s_absorb local.src_comp_aux local.t_absorb)
lemma (in catoid) src_twisted_cond:
"\<Delta> x y \<Longrightarrow> x \<odot> \<sigma> y = \<Union>{\<sigma> u \<odot> x |u. u \<in> x \<odot> y}"
using local.stopp.Dst local.ts_msg.tgt_comp_aux by auto
lemma (in catoid) tgt_twisted_aux:
"u \<in> x \<odot> y \<Longrightarrow> (\<tau> x \<odot> y = y \<odot> \<tau> u)"
by (simp add: local.ts_msg.src_twisted_aux)
lemma (in catoid) tgt_twisted_cond:
"\<Delta> x y \<Longrightarrow> \<tau> x \<odot> y = \<Union>{y \<odot> \<tau> u |u. u \<in> x \<odot> y}"
by (simp add: local.ts_msg.src_twisted_cond)
lemma (in catoid) src_funct:
"x \<in> y \<odot> z \<Longrightarrow> x' \<in> y \<odot> z \<Longrightarrow> \<sigma> x = \<sigma> x'"
using local.src_comp_aux by force
lemma (in catoid) st_local_iff:
"(\<forall>x y. \<Delta> x y = (\<tau> x = \<sigma> y)) = (\<forall>v x y z. v \<in> x \<odot> y \<longrightarrow> \<Delta> y z \<longrightarrow> \<Delta> v z)"
apply safe
apply (metis local.ts_msg.src_comp_aux)
using local.Dst apply blast
by (metis local.s_absorb_var2 local.t_absorb singleton_iff)
text \<open>Again we can lift to properties of modal semirings and quantales.\<close>
lemma (in catoid) Src_weak_local:
"Src (X \<star> Y) \<subseteq> Src (X \<star> Src Y)"
proof-
{fix a
assume "a \<in> Src (X \<star> Y)"
hence "\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Y. a = \<sigma> b \<and> b \<in> c \<odot> d"
by (smt (verit) image_iff local.conv_exp2)
hence "\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> Src (c \<odot> d)"
by auto
hence "\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> Src (c \<odot> \<sigma> d)"
by (metis empty_iff image_empty local.src_local_cond)
hence "\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Src Y. a = \<sigma> b \<and> b \<in> c \<odot> d"
by auto
hence "a \<in> Src (X \<star> Src Y)"
by (metis image_iff local.conv_exp2)}
thus ?thesis
by blast
qed
lemma (in catoid) Tgt_weak_local:
"Tgt (X \<star> Y) \<subseteq> Tgt (Tgt X \<star> Y)"
by (metis local.stopp.conv_exp local.ts_msg.Src_weak_local multimagma.conv_exp)
text \<open>st-Locality implies locality.\<close>
lemma (in catoid) st_locality_l_locality:
assumes "\<Delta> x y = (\<tau> x = \<sigma> y)"
shows "Src (x \<odot> y) = Src (x \<odot> \<sigma> y)"
proof-
{fix a
have "(a \<in> Src (x \<odot> \<sigma> y)) = (\<exists>b \<in> x \<odot> \<sigma> y. a = \<sigma> b)"
by auto
also have "\<dots> = (\<exists>b \<in> x \<odot> \<sigma> y. a = \<sigma> b \<and> \<tau> x = \<sigma> y)"
by (simp add: local.st_prop local.tgt_comp_aux local.tgt_twisted_aux)
also have "\<dots> = (\<exists>b \<in> x \<odot> y. a = \<sigma> b)"
by (metis assms equals0D equals0I insertI1 local.t_absorb local.ts_msg.tgt_comp_aux)
also have "\<dots> = (a \<in> Src (x \<odot> y))"
by auto
finally have "(a \<in> Src (x \<odot> \<sigma> y)) = (a \<in> Src (x \<odot> y))".}
thus ?thesis
by force
qed
lemma (in catoid) st_locality_r_locality:
assumes lr_locality: "\<Delta> x y = (\<tau> x = \<sigma> y)"
shows "Tgt (x \<odot> y) = Tgt (\<tau> x \<odot> y)"
by (metis local.ts_msg.st_locality_l_locality lr_locality)
lemma (in catoid) st_locality_locality:
"(Src (x \<odot> y) = Src (x \<odot> \<sigma> y) \<and> Tgt (x \<odot> y) = Tgt (\<tau> x \<odot> y)) = (\<Delta> x y = (\<tau> x = \<sigma> y))"
apply standard
apply (simp add: local.locality)
by (metis local.st_locality_l_locality local.ts_msg.st_locality_l_locality)
subsection \<open>Locality\<close>
text \<open>For st-multimagmas there are different notions of locality. We do not develop this in detail.\<close>
class local_catoid = catoid +
assumes src_local: "Src (x \<odot> \<sigma> y) \<subseteq> Src (x \<odot> y)"
and tgt_local: "Tgt (\<tau> x \<odot> y) \<subseteq> Tgt (x \<odot> y)"
sublocale local_catoid \<subseteq> sts_msg: local_catoid "\<lambda>x y. y \<odot> x" tgt src
apply unfold_locales using local.tgt_local local.src_local by auto
lemma (in local_catoid) src_local_eq [simp]:
"Src (x \<odot> \<sigma> y) = Src (x \<odot> y)"
by (simp add: local.src_local local.src_weak_local order_class.order_eq_iff)
lemma (in local_catoid) tgt_local_eq:
"Tgt (\<tau> x \<odot> y) = Tgt (x \<odot> y)"
by simp
lemma (in local_catoid) src_twisted:
"x \<odot> \<sigma> y = \<Union>{\<sigma> u \<odot> x |u. u \<in> x \<odot> y}"
by (metis Setcompr_eq_image Sup_empty empty_is_image local.src_twisted_cond local.sts_msg.tgt_local_eq)
lemma (in local_catoid) tgt_twisted:
"\<tau> x \<odot> y = \<Union>{y \<odot> \<tau> u |u. u \<in> x \<odot> y}"
using local.sts_msg.src_twisted by auto
lemma (in local_catoid) local_var:
"\<Delta> x y \<Longrightarrow> \<Delta> (\<tau> x) (\<sigma> y)"
by (simp add: local.stopp.Dst)
lemma (in local_catoid) local_var_eq [simp]:
"\<Delta> (\<tau> x) (\<sigma> y) = \<Delta> x y"
by (simp add: local.locality)
text \<open>We lift locality to powersets.\<close>
lemma (in local_catoid) Src_local [simp]:
"Src (X \<star> Src Y) = Src (X \<star> Y)"
proof-
{fix a
have "(a \<in> Src (X \<star> Src Y)) = (\<exists>b \<in> X \<star> Src Y. a = \<sigma> b)"
by (simp add: image_iff)
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Src Y. b \<in> c \<odot> d \<and> a = \<sigma> b)"
by (meson local.conv_exp2)
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Y. b \<in> c \<odot> \<sigma> d \<and> a = \<sigma> b)"
by simp
also have "\<dots> = (\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> Src (c \<odot> \<sigma> d))"
by blast
also have "\<dots> = (\<exists>c \<in> X. \<exists>d \<in> Y. a \<in> Src (c \<odot> d))"
by auto
also have "\<dots> = (\<exists>b. \<exists>c \<in> X. \<exists>d \<in> Y. b \<in> c \<odot> d \<and> a = \<sigma> b)"
by auto
also have "\<dots> = (\<exists>b \<in> X \<star> Y. a = \<sigma> b)"
by (meson local.conv_exp2)
also have "\<dots> = (a \<in> Src (X \<star> Y))"
by (simp add: image_iff)
finally have "(a \<in> Src (X \<star> Src Y)) = (a \<in> Src (X \<star> Y))".}
thus ?thesis
by force
qed
lemma (in local_catoid) Tgt_local [simp]:
"Tgt (Tgt X \<star> Y) = Tgt (X \<star> Y)"
by (metis local.stopp.conv_def local.sts_msg.Src_local multimagma.conv_def)
lemma (in local_catoid) st_local: "\<Delta> x y = (\<tau> x = \<sigma> y)"
using local.stopp.locality by force
subsection \<open>From partial magmas to single-set categories.\<close>
class functional_magma = multimagma +
assumes functionality: "x \<in> y \<odot> z \<Longrightarrow> x' \<in> y \<odot> z \<Longrightarrow> x = x'"
begin
text \<open>Functional magmas could also be called partial magmas. The multioperation corresponds to a partial operation.\<close>
lemma partial_card: "card (x \<odot> y) \<le> 1"
by (metis One_nat_def bot_nat_0.extremum card.infinite card_le_Suc0_iff_eq local.functionality)
lemma fun_in_sgl: "(x \<in> y \<odot> z) = ({x} = y \<odot> z)"
using local.functionality by fastforce
definition pcomp :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<otimes>" 70) where
"x \<otimes> y = (THE z. z \<in> x \<odot> y)"
lemma functionality_var: "\<Delta> x y \<Longrightarrow> (\<exists>!z. z \<in> x \<odot> y)"
using local.functionality by auto
lemma functionality_lem: "(\<exists>!z. z \<in> x \<odot> y) \<or> (x \<odot> y = {})"
using functionality_var by blast
lemma pcomp_def_var: "(\<Delta> x y \<and> x \<otimes> y = z) = (z \<in> x \<odot> y)"
unfolding pcomp_def by (smt (verit, del_insts) all_not_in_conv functionality_lem theI_unique)
lemma pcomp_def_var2: "\<Delta> x y \<Longrightarrow> ((x \<otimes> y = z) = (z \<in> x \<odot> y))"
using pcomp_def_var by blast
end
class functional_st_magma = functional_magma + st_multimagma
class functional_semigroup = functional_magma + multisemigroup
begin
lemma pcomp_assoc_defined: "(\<Delta> u v \<and> \<Delta> (u \<otimes> v) w) = (\<Delta> u (v \<otimes> w) \<and> \<Delta> v w)"
proof-
have "(\<Delta> u v \<and> \<Delta> (u \<otimes> v) w) = (\<exists>x. \<Delta> u v \<and> \<Delta> x w \<and> x = u \<otimes> v)"
by simp
also have "... = (\<exists>x. x \<in> u \<odot> v \<and> \<Delta> x w)"
by (metis local.pcomp_def_var)
also have "... = (\<exists>x. x \<in> v \<odot> w \<and> \<Delta> u x)"
using local.assoc_exp by blast
also have "... = (\<exists>x. \<Delta> v w \<and> x = v \<otimes> w \<and> \<Delta> u x)"
by (metis local.pcomp_def_var)
also have "... = (\<Delta> u (v \<otimes> w) \<and> \<Delta> v w)"
by auto
finally show ?thesis.
qed
lemma pcomp_assoc: "\<Delta> x y \<and> \<Delta> (x \<otimes> y) z \<Longrightarrow> (x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
by (smt (z3) local.assoc_exp local.functionality_lem local.pcomp_def_var2 pcomp_assoc_defined)
end
class functional_catoid = functional_semigroup + catoid
text \<open>Finally, here comes the definition of single-set categories as in Chapter 12 of Mac Lane's book,
but with partiality of arrow composition modelled using a multioperation.\<close>
class single_set_category = functional_catoid + local_catoid
begin
lemma st_assoc: "\<tau> x = \<sigma> y \<Longrightarrow> \<tau> y = \<sigma> z \<Longrightarrow> (x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
by (metis local.st_local local.pcomp_assoc local.pcomp_def_var2 local.tgt_comp_aux)
end
subsection \<open>Morphisms of multimagmas and lr-multimagmas\<close>
text \<open>In the context of single-set categories, these morphisms are functors. Bounded morphisms
are functional bisimulations. They are known as zig-zag morphisms or p-morphism in modal and
substructural logics.\<close>
definition mm_morphism :: "('a::multimagma \<Rightarrow> 'b::multimagma) \<Rightarrow> bool" where
"mm_morphism f = (\<forall>x y. image f (x \<odot> y) \<subseteq> f x \<odot> f y)"
definition bounded_mm_morphism :: "('a::multimagma \<Rightarrow> 'b::multimagma) \<Rightarrow> bool" where
"bounded_mm_morphism f = (mm_morphism f \<and> (\<forall>x u v. f x \<in> u \<odot> v \<longrightarrow> (\<exists>y z. u = f y \<and> v = f z \<and> x \<in> y \<odot> z)))"
definition st_mm_morphism :: "('a::st_multimagma \<Rightarrow> 'b::st_multimagma) \<Rightarrow> bool" where
"st_mm_morphism f = (mm_morphism f \<and> f \<circ> \<sigma> = \<sigma> \<circ> f \<and> f \<circ> \<tau> = \<tau> \<circ> f)"
definition bounded_st_mm_morphism :: "('a::st_multimagma \<Rightarrow> 'b::st_multimagma) \<Rightarrow> bool" where
"bounded_st_mm_morphism f = (bounded_mm_morphism f \<and> st_mm_morphism f)"
subsection \<open>Relationship with categories\<close>
text \<open>Next we add a standard definition of a category following Moerdijk and Mac Lane's book and,
for good measure, show that categories form single set categories and vice versa.\<close>
locale category =
fixes id :: "'objects \<Rightarrow> 'arrows"
and dom :: "'arrows \<Rightarrow> 'objects"
and cod :: "'arrows \<Rightarrow> 'objects"
and comp :: "'arrows \<Rightarrow> 'arrows \<Rightarrow> 'arrows" (infixl "\<bullet>" 70)
assumes dom_id [simp]: "dom (id X) = X"
and cod_id [simp]: "cod (id X) = X"
and id_dom [simp]: "id (dom f) \<bullet> f = f"
and id_cod [simp]: "f \<bullet> id (cod f) = f"
and dom_loc [simp]: "cod f = dom g \<Longrightarrow> dom (f \<bullet> g) = dom f"
and cod_loc [simp]: "cod f = dom g \<Longrightarrow> cod (f \<bullet> g) = cod g"
and assoc: "cod f = dom g \<Longrightarrow> cod g = dom h \<Longrightarrow> (f \<bullet> g) \<bullet> h = f \<bullet> (g \<bullet> h)"
begin
lemma "cod f = dom g \<Longrightarrow> dom (f \<bullet> g) = dom (f \<bullet> id (dom g))"
by simp
abbreviation "LL f \<equiv> id (dom f)"
abbreviation "RR f \<equiv> id (cod f)"
abbreviation "Comp \<equiv> \<lambda>f g. (if RR f = LL g then {f \<bullet> g} else {})"
end
typedef (overloaded) 'a::single_set_category st_objects = "{x::'a::single_set_category. \<sigma> x = x}"
using stopp.tt_idem by blast
setup_lifting type_definition_st_objects
lemma Sfix_coerce [simp]: "Abs_st_objects (\<sigma> (Rep_st_objects X)) = X"
by (metis (mono_tags, lifting) CollectD Rep_st_objects Rep_st_objects_inverse)
lemma Rfix_coerce [simp]: "Abs_st_objects (\<tau> (Rep_st_objects X)) = X"
by (metis (mono_tags, lifting) CollectD Rep_st_objects Rep_st_objects_inverse stopp.st_fix)
sublocale single_set_category \<subseteq> sscatcat: category Rep_st_objects "Abs_st_objects \<circ> \<sigma>" "Abs_st_objects \<circ> \<tau>" "(\<otimes>)"
apply unfold_locales
apply simp_all
apply (metis (mono_tags, lifting) Abs_st_objects_inverse empty_not_insert functional_magma_class.pcomp_def_var2 insertI1 mem_Collect_eq st_multimagma_class.s_absorb st_multimagma_class.ss_idem)
apply (metis (mono_tags, lifting) Abs_st_objects_inverse functional_magma_class.pcomp_def_var insert_iff mem_Collect_eq st_multimagma_class.stopp.s_absorb st_multimagma_class.stopp.ts_compat)
apply (metis (mono_tags, lifting) Abs_st_objects_inject catoid_class.ts_msg.tgt_comp_aux functional_magma_class.pcomp_def_var2 local_catoid_class.sts_msg.st_local mem_Collect_eq st_multimagma_class.stopp.ts_compat st_multimagma_class.stopp.tt_idem)
apply (metis (mono_tags, lifting) Abs_st_objects_inject functional_semigroup_class.pcomp_assoc_defined local_catoid_class.sts_msg.st_local mem_Collect_eq st_multimagma_class.stopp.s_absorb_var st_multimagma_class.stopp.st_compat)
by (metis (mono_tags, lifting) Abs_st_objects_inverse mem_Collect_eq single_set_category_class.st_assoc st_multimagma_class.stopp.st_compat st_multimagma_class.stopp.ts_compat)
sublocale category \<subseteq> catlrm: st_multimagma Comp LL RR
by unfold_locales auto
sublocale category \<subseteq> catsscat: single_set_category Comp LL RR
apply unfold_locales
apply simp_all
apply (metis cod_loc dom_id dom_loc local.assoc)
apply (metis empty_iff insert_iff)
apply (metis dom_id dom_loc)
by (metis cod_loc dom_id)
subsection \<open>A Mac Lane style variant\<close>
text \<open>Next we present an axiomatisation of single-set categories that follows Mac Lane's axioms
more closely, but still uses a multioperation for arrow composition.\<close>
class mlss_cat = functional_magma +
fixes l0 :: "'a \<Rightarrow>'a"
fixes r0 :: "'a \<Rightarrow>'a"
assumes comp0_def: "(x \<odot> y \<noteq> {}) = (r0 x = l0 y)"
assumes r0l0 [simp]: "r0 (l0 x) = l0 x"
assumes l0r0 [simp]: "l0 (r0 x) = r0 x"
assumes l0_absorb [simp]: "l0 x \<otimes> x = x"
assumes r0_absorb [simp] : "x \<otimes> r0 x = x"
assumes assoc_defined: "(u \<odot> v \<noteq> {} \<and> (u \<otimes> v) \<odot> w \<noteq> {}) = (u \<odot> (v \<otimes> w) \<noteq> {} \<and> v \<odot> w \<noteq> {})"
assumes comp0_assoc: "r0 x = l0 y \<Longrightarrow> r0 y = l0 z \<Longrightarrow> x \<otimes> (y \<otimes> z) = (x \<otimes> y) \<otimes> z"
assumes locall_var: "r0 x = l0 y \<Longrightarrow> l0 (x \<otimes> y) = l0 x"
assumes localr_var: "r0 x = l0 y \<Longrightarrow> r0 (x \<otimes> y) = r0 y"
begin
lemma ml_locall [simp]: "l0 (x \<otimes> l0 y) = l0 (x \<otimes> y)"
by (metis local.comp0_def local.l0_absorb local.locall_var local.pcomp_def local.r0l0)
lemma ml_localr [simp]: "r0 (r0 x \<otimes> y) = r0 (x \<otimes> y)"
by (metis local.comp0_def local.l0r0 local.localr_var local.pcomp_def local.r0l0)
lemma ml_locall_im [simp]: "image l0 (x \<odot> l0 y) = image l0 (x \<odot> y)"
by (smt (verit, ccfv_SIG) Collect_cong Setcompr_eq_image local.comp0_def local.l0r0 local.pcomp_def_var2 local.r0l0 ml_locall)
lemma ml_localr_im [simp]: "image r0 (r0 x \<odot> y) = image r0 (x \<odot> y)"
by (smt (verit, ccfv_SIG) Collect_cong Setcompr_eq_image local.comp0_def local.l0r0 local.pcomp_def_var2 local.r0l0 ml_localr)
end
sublocale single_set_category \<subseteq> sscatml: mlss_cat "(\<odot>)" "\<sigma>" "\<tau>"
apply unfold_locales
apply (simp_all add: st_local pcomp_def_var2)
using local.pcomp_assoc_defined local.st_local apply force
using pcomp_assoc_defined st_assoc local.pcomp_def_var2 local.st_local local.src_comp_aux tgt_comp_aux by fastforce+
sublocale mlss_cat \<subseteq> mlsscat: single_set_category "(\<odot>)" "l0" "r0"
apply unfold_locales
apply (simp_all add: comp0_def)
apply standard
apply (clarsimp, smt (verit, ccfv_SIG) local.assoc_defined local.comp0_assoc local.comp0_def local.fun_in_sgl local.pcomp_def_var)
apply (clarsimp, metis local.assoc_defined local.comp0_assoc local.comp0_def local.pcomp_def_var)
apply (metis local.comp0_def local.fun_in_sgl local.l0_absorb local.pcomp_def_var2 local.r0l0)
using local.comp0_def local.fun_in_sgl local.l0r0 local.pcomp_def_var2 local.r0_absorb by presburger
subsection \<open>Product of catoids\<close>
instantiation prod :: (catoid, catoid) catoid
begin
definition "src_prod x = (\<sigma> (fst x), \<sigma> (snd x))"
for x :: "'a \<times> 'b"
definition "tgt_prod x = (\<tau> (fst x), \<tau> (snd x))"
for x :: "'a \<times> 'b"
definition "mcomp_prod x y = {(u,v) |u v. u \<in> fst x \<odot> fst y \<and> v \<in> snd x \<odot> snd y}"
for x y :: "'a \<times> 'b"
instance
proof
fix x y z :: "'a \<times> 'b"
show "\<Union>{x \<odot> v |v. v \<in> y \<odot> z} = \<Union>{v \<odot> z |v. v \<in> x \<odot> y}"
proof-
{fix a b
have "((a,b) \<in> \<Union>{x \<odot> v |v. v \<in> y \<odot> z}) = (\<exists>v. (a,b) \<in> x \<odot> v \<and> v \<in> y \<odot> z)"
by blast
also have "\<dots> = (\<exists>v w. a \<in> fst x \<odot> v \<and> v \<in> fst y \<odot> fst z \<and> b \<in> snd x \<odot> w \<and> w \<in> snd y \<odot> snd z)"
using mcomp_prod_def by auto
also have "\<dots> = (\<exists>v w. a \<in> v \<odot> fst z \<and> v \<in> fst x \<odot> fst y \<and> b \<in> w \<odot> snd z \<and> w \<in> snd x \<odot> snd y)"
by (meson ts_msg.assoc_exp)
also have "\<dots> = (\<exists>v. (a,b) \<in> v \<odot> z \<and> v \<in> x \<odot> y)"
using mcomp_prod_def by auto
also have "\<dots> = ((a,b) \<in> \<Union>{v \<odot> z |v. v \<in> x \<odot> y})"
by blast
finally have "((a,b) \<in> \<Union>{x \<odot> v |v. v \<in> y \<odot> z}) = ((a,b) \<in> \<Union>{v \<odot> z |v. v \<in> x \<odot> y})".}
thus ?thesis
by (meson pred_equals_eq2)
qed
show "x \<odot> y \<noteq> {} \<Longrightarrow> \<tau> x = \<sigma> y"
by (simp add: Catoid.mcomp_prod_def Dst src_prod_def tgt_prod_def)
show "\<sigma> x \<odot> x = {x}"
unfolding src_prod_def mcomp_prod_def by simp
show "x \<odot> \<tau> x = {x}"
unfolding tgt_prod_def mcomp_prod_def by simp
qed
end
instantiation prod :: (single_set_category, single_set_category) single_set_category
begin
instance
proof
fix x y z x' :: "'a \<times> 'b"
show "x \<in> y \<odot> z \<Longrightarrow> x' \<in> y \<odot> z \<Longrightarrow> x = x'"
unfolding mcomp_prod_def by (smt (verit, best) functionality mem_Collect_eq)
show a: "stopp.Tgt (x \<odot> \<sigma> y) \<subseteq> stopp.Tgt (x \<odot> y)"
proof-
{fix a b
have "((a,b) \<in> stopp.Tgt (x \<odot> \<sigma> y)) = ((a,b) \<in> Src {(c,d) |c d. c \<in> fst x \<odot> \<sigma> (fst y) \<and> d \<in> snd x \<odot> \<sigma> (snd y)})"
by (simp add: mcomp_prod_def src_prod_def)
also have "\<dots> = (a \<in> Src (fst x \<odot> \<sigma> (fst y)) \<and> b \<in> Src (snd x \<odot> \<sigma> (snd y)))"
by (smt (z3) Setcompr_eq_image fst_conv mem_Collect_eq snd_conv src_prod_def stopp.tt_idem)
also have "\<dots> = (a \<in> Src (fst x \<odot> fst y) \<and> b \<in> Src (snd x \<odot> snd y))"
by simp
also have "\<dots> = ((a,b) \<in> Src {(c,d) |c d. c \<in> (fst x \<odot> fst y) \<and> d \<in> (snd x \<odot> snd y)})"
by (smt (z3) Setcompr_eq_image fst_conv mem_Collect_eq snd_conv src_prod_def stopp.tt_idem)
also have "\<dots> = ((a,b) \<in> stopp.Tgt (x \<odot> y))"
by (simp add: mcomp_prod_def src_prod_def)
finally have "((a,b) \<in> stopp.Tgt (x \<odot> \<sigma> y)) = ((a,b) \<in> stopp.Tgt (x \<odot> y))".}
thus ?thesis
by auto
qed
show "Tgt (\<tau> x \<odot> y) \<subseteq> Tgt (x \<odot> y)"
by (metis (no_types, lifting) a bot.extremum_uniqueI empty_is_image stopp.s_absorb_var3 tgt_local_cond tgt_weak_local ts_msg.st_locality_l_locality)
qed
end
end
|
{"author": "gstruth", "repo": "catoids", "sha": "1b7c623d742bcacfecf1a60518106c31716bf2dd", "save_path": "github-repos/isabelle/gstruth-catoids", "path": "github-repos/isabelle/gstruth-catoids/catoids-1b7c623d742bcacfecf1a60518106c31716bf2dd/Catoid.thy"}
|
[STATEMENT]
lemma mask_inj_hlp1: "inj_on (mask :: nat \<Rightarrow> 16 word) {0..16}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj_on mask {0..16}
[PROOF STEP]
proof(intro inj_onI, goal_cases)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> {0..16}; y \<in> {0..16}; mask x = mask y\<rbrakk> \<Longrightarrow> x = y
[PROOF STEP]
case (1 x y)
[PROOF STATE]
proof (state)
this:
x \<in> {0..16}
y \<in> {0..16}
mask x = mask y
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> {0..16}; y \<in> {0..16}; mask x = mask y\<rbrakk> \<Longrightarrow> x = y
[PROOF STEP]
from 1(3)
[PROOF STATE]
proof (chain)
picking this:
mask x = mask y
[PROOF STEP]
have oe: "of_bl (replicate (16 - x) False @ replicate x True) = (of_bl (replicate (16 - y) False @ replicate y True) :: 16 word)"
[PROOF STATE]
proof (prove)
using this:
mask x = mask y
goal (1 subgoal):
1. of_bl (replicate (16 - x) False @ replicate x True) = of_bl (replicate (16 - y) False @ replicate y True)
[PROOF STEP]
unfolding mask_bl of_bl_rep_False
[PROOF STATE]
proof (prove)
using this:
of_bl (replicate x True) = of_bl (replicate y True)
goal (1 subgoal):
1. of_bl (replicate x True) = of_bl (replicate y True)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
of_bl (replicate (16 - x) False @ replicate x True) = of_bl (replicate (16 - y) False @ replicate y True)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> {0..16}; y \<in> {0..16}; mask x = mask y\<rbrakk> \<Longrightarrow> x = y
[PROOF STEP]
have "\<And>z. z \<le> 16 \<Longrightarrow> length (replicate (16 - z) False @ replicate z True) = 16"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>z. z \<le> 16 \<Longrightarrow> length (replicate (16 - z) False @ replicate z True) = 16
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?z \<le> 16 \<Longrightarrow> length (replicate (16 - ?z) False @ replicate ?z True) = 16
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> {0..16}; y \<in> {0..16}; mask x = mask y\<rbrakk> \<Longrightarrow> x = y
[PROOF STEP]
with 1(1,2)
[PROOF STATE]
proof (chain)
picking this:
x \<in> {0..16}
y \<in> {0..16}
?z \<le> 16 \<Longrightarrow> length (replicate (16 - ?z) False @ replicate ?z True) = 16
[PROOF STEP]
have ps: "replicate (16 - x) False @ replicate x True \<in> {bl. length bl = LENGTH(16)}" " replicate (16 - y) False @ replicate y True \<in> {bl. length bl = LENGTH(16)}"
[PROOF STATE]
proof (prove)
using this:
x \<in> {0..16}
y \<in> {0..16}
?z \<le> 16 \<Longrightarrow> length (replicate (16 - ?z) False @ replicate ?z True) = 16
goal (1 subgoal):
1. replicate (16 - x) False @ replicate x True \<in> {bl. length bl = LENGTH(16)} &&& replicate (16 - y) False @ replicate y True \<in> {bl. length bl = LENGTH(16)}
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
replicate (16 - x) False @ replicate x True \<in> {bl. length bl = LENGTH(16)}
replicate (16 - y) False @ replicate y True \<in> {bl. length bl = LENGTH(16)}
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> {0..16}; y \<in> {0..16}; mask x = mask y\<rbrakk> \<Longrightarrow> x = y
[PROOF STEP]
from inj_onD[OF word_bl.Abs_inj_on, OF oe ps]
[PROOF STATE]
proof (chain)
picking this:
replicate (16 - x) False @ replicate x True = replicate (16 - y) False @ replicate y True
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
replicate (16 - x) False @ replicate x True = replicate (16 - y) False @ replicate y True
goal (1 subgoal):
1. x = y
[PROOF STEP]
using 1(1,2)
[PROOF STATE]
proof (prove)
using this:
replicate (16 - x) False @ replicate x True = replicate (16 - y) False @ replicate y True
x \<in> {0..16}
y \<in> {0..16}
goal (1 subgoal):
1. x = y
[PROOF STEP]
by(fastforce intro: replicate_FT_hlp)
[PROOF STATE]
proof (state)
this:
x = y
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1681, "file": "LOFT_LinuxRouter_OpenFlow_Translation", "length": 16}
|
/* test_uniform_int_distribution.cpp
*
* Copyright Steven Watanabe 2011
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* $Id$
*
*/
#include <boost/random/uniform_int_distribution.hpp>
#include <limits>
#define BOOST_RANDOM_DISTRIBUTION boost::random::uniform_int_distribution<>
#define BOOST_RANDOM_ARG1 a
#define BOOST_RANDOM_ARG2 b
#define BOOST_RANDOM_ARG1_DEFAULT 0
#define BOOST_RANDOM_ARG2_DEFAULT 0x7fffffff
#define BOOST_RANDOM_ARG1_VALUE 100
#define BOOST_RANDOM_ARG2_VALUE 250
#define BOOST_RANDOM_DIST0_MIN 0
#define BOOST_RANDOM_DIST0_MAX 0x7fffffff
#define BOOST_RANDOM_DIST1_MIN 100
#define BOOST_RANDOM_DIST1_MAX 0x7fffffff
#define BOOST_RANDOM_DIST2_MIN 100
#define BOOST_RANDOM_DIST2_MAX 250
#define BOOST_RANDOM_TEST1_PARAMS (0, 9)
#define BOOST_RANDOM_TEST1_MIN 0
#define BOOST_RANDOM_TEST1_MAX 9
#define BOOST_RANDOM_TEST2_PARAMS (10, 19)
#define BOOST_RANDOM_TEST2_MIN 10
#define BOOST_RANDOM_TEST2_MAX 19
#include "test_distribution.ipp"
#define BOOST_RANDOM_UNIFORM_INT boost::random::uniform_int_distribution
#include "test_uniform_int.ipp"
|
{"hexsha": "227a0b31050a5a8db32daa1984dacef8fd052fd6", "size": 1232, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/random/test/test_uniform_int_distribution.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T20:36:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T20:38:31.000Z", "max_issues_repo_path": "libs/boost/libs/random/test/test_uniform_int_distribution.cpp", "max_issues_repo_name": "flingone/frameworks_base_cmds_remoted", "max_issues_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/boost/libs/random/test/test_uniform_int_distribution.cpp", "max_forks_repo_name": "flingone/frameworks_base_cmds_remoted", "max_forks_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2015-01-28T16:33:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T05:04:39.000Z", "avg_line_length": 28.6511627907, "max_line_length": 76, "alphanum_fraction": 0.8035714286, "num_tokens": 298}
|
import os
import numpy as np
from tqdm import tqdm
import laspy
import argparse
from tqdm import tqdm
def get_predictions(pred_file, las_file):
result = np.loadtxt(pred_file)
labels = result[:, 3]
points = result[:, 0:3]
las = laspy.create(file_version = "1.2", point_format = 3)
las.x = points[:, 0]
las.y = points[:, 1]
las.z = points[:, 2]
las.classification = labels
las.write(las_file)
return points, labels
def get_predictions_dir(pred_dir, out_dir):
all_files = [f for f in os.listdir(pred_dir) if os.path.isfile(os.path.join(pred_dir, f))]
pred_files = [f for f in all_files if f[-11:-4] == "pred_gt"]
pred_files = sorted(pred_files, key = str.lower)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for i in tqdm(range(len(pred_files)), "Reading Pointcloud Predictions"):
out_las_name = "{}.las".format(pred_files[i][:-4])
out_file = os.path.join(out_dir, out_las_name)
get_predictions(os.path.join(pred_dir, pred_files[i]), out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract DGCNN pointcloud predicitons')
parser.add_argument('--pred_dir', type = str, default = "predict", help = 'Directory of DGCNN predictions')
parser.add_argument('--out_dir', type = str, default = "predict_las", help = 'Directory to save LAS prediction files to')
args = parser.parse_args()
get_predictions_dir(args.pred_dir, args.out_dir)
|
{"hexsha": "7255ec5834cfd5b7897ebe5db8dd44d2b85b2512", "size": 1501, "ext": "py", "lang": "Python", "max_stars_repo_path": "predictions.py", "max_stars_repo_name": "BenCurran98/FugroDGCNN", "max_stars_repo_head_hexsha": "7033cc4992f975e836289cae59d4990d9edb8b6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predictions.py", "max_issues_repo_name": "BenCurran98/FugroDGCNN", "max_issues_repo_head_hexsha": "7033cc4992f975e836289cae59d4990d9edb8b6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predictions.py", "max_forks_repo_name": "BenCurran98/FugroDGCNN", "max_forks_repo_head_hexsha": "7033cc4992f975e836289cae59d4990d9edb8b6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2708333333, "max_line_length": 125, "alphanum_fraction": 0.6755496336, "include": true, "reason": "import numpy", "num_tokens": 385}
|
from multiprocessing import Pool #parallel processing
import multiprocessing as mp
import structure
from structure.global_constants import *
from structure.cell import Tissue, BasicSpringForceNoGrowth
import structure.initialisation as init
import sys
import os
import numpy as np
import libs.pd_lib_neutral as lib
import libs.data as data
def calc_interactions(tissue,mutant_index,n):
"""treats all cells with ancestor 'mutant_index' as cooperators
returns:
n (int): size of clone
I_CC/I_CD (ints): number of cooperator-cooperator/defector interactions in population
W_CC/W_CD (floats): number of cooperator-cooperator/defector interactions in pop. weighted by neighbour number
"""
neighbours = tissue.mesh.neighbours
types = tissue.properties['ancestor']==mutant_index
I_CC,I_CD,W_CC,W_CD,N_D = 0,0,0.,0.,0
for ctype,cell_neighbours in zip(types,neighbours):
if ctype:
Cneigh,neigh = float(sum(types[cell_neighbours])),float(len(cell_neighbours))
I_CC += Cneigh
I_CD += neigh - Cneigh
W_CC += Cneigh/neigh
W_CD += (neigh-Cneigh)/neigh
return [n,I_CC,I_CD,W_CC,W_CD]
def run_sim(i):
"""run a single simulation and save interaction data for each clone"""
rand = np.random.RandomState()
dt=0.005*-50./MU
tissue = lib.initialise_tissue_ancestors(l,dt,10.,10.,rand,MU)
tissue.properties['ancestor']=np.arange(l*l)
if init_timend is not None: tissue = lib.run(tissue,lib.simulation_ancestor_tracking(tissue,dt,init_timend/dt,init_timend/dt,rand),init_timend/dt,init_timend/dt)[-1]
data = [calc_interactions(tissue,mutant_index,n)
for tissue in lib.run_generator(lib.simulation_ancestor_tracking(tissue,dt,timend/dt,timestep/dt,rand,til_fix=True),timend/dt,timestep/dt)
for mutant_index,n in enumerate(np.bincount(tissue.properties['ancestor'])) if n>=n_min]
np.savetxt('%s/data_%d'%(outdir,i),data,fmt=('%4d','%4d','%4d','%4.6f','%4.6f'))
return None
l = 10 # population size N = l*l
init_timend = 10. # initial simulation time to equilibrate
timestep = 12. # timesteps at which to calc interaction data (hours)
timend = 10000. # length of simulation (hours)
sim_runs = int(sys.argv[1]) # number of sims to run taken as command line arg
MU = float(sys.argv[2]) #spring constant
n_min = 1
outdir = 'interaction_data/supp_vary_MU/MU%d/raw_data'%MU
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
# run simulations in parallel
cpunum=mp.cpu_count()
pool = Pool(processes=cpunum-1,maxtasksperchild=1000)
pool.map(run_sim,range(sim_runs))
pool.close()
pool.join()
|
{"hexsha": "370f1ffde2f500968a4e3d043bee9a5dfebacc18", "size": 2726, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_files/pd_original/cluster_stats_vary_MU.py", "max_stars_repo_name": "jessiesrr/VTdyn", "max_stars_repo_head_hexsha": "6f71ef94525d95221f5bd5e5290f4df10648cd18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-02T06:37:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-02T06:37:50.000Z", "max_issues_repo_path": "run_files/pd_original/cluster_stats_vary_MU.py", "max_issues_repo_name": "jessiesrr/VTdyn", "max_issues_repo_head_hexsha": "6f71ef94525d95221f5bd5e5290f4df10648cd18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_files/pd_original/cluster_stats_vary_MU.py", "max_forks_repo_name": "jessiesrr/VTdyn", "max_forks_repo_head_hexsha": "6f71ef94525d95221f5bd5e5290f4df10648cd18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.59375, "max_line_length": 169, "alphanum_fraction": 0.7123991196, "include": true, "reason": "import numpy", "num_tokens": 739}
|
"""
Interact with the grizli AWS database
"""
import os
import numpy as np
FLAGS = {'init_lambda': 1,
'start_beams': 2,
'done_beams': 3,
'no_run_fit': 4,
'start_redshift_fit': 5,
'fit_complete': 6}
COLUMNS = ['root', 'id', 'status', 'ra', 'dec', 'ninput', 'redshift', 'as_epsf', 't_g102', 'n_g102', 'p_g102', 't_g141', 'n_g141', 'p_g141', 't_g800l', 'n_g800l', 'p_g800l', 'numlines', 'haslines', 'chi2poly', 'chi2spl', 'splf01', 'sple01', 'splf02', 'sple02', 'splf03', 'sple03', 'splf04', 'sple04', 'huberdel', 'st_df', 'st_loc', 'st_scl', 'dof', 'chimin', 'chimax', 'bic_poly', 'bic_spl', 'bic_temp', 'z02', 'z16', 'z50', 'z84', 'z97', 'zwidth1', 'zwidth2', 'z_map', 'zrmin', 'zrmax', 'z_risk', 'min_risk', 'd4000', 'd4000_e', 'dn4000', 'dn4000_e', 'dlineid', 'dlinesn', 'flux_pab', 'err_pab', 'ew50_pab', 'ewhw_pab', 'flux_hei_1083', 'err_hei_1083', 'ew50_hei_1083', 'ewhw_hei_1083', 'flux_siii', 'err_siii', 'ew50_siii', 'ewhw_siii', 'flux_oii_7325', 'err_oii_7325', 'ew50_oii_7325', 'ewhw_oii_7325', 'flux_ariii_7138', 'err_ariii_7138', 'ew50_ariii_7138', 'ewhw_ariii_7138', 'flux_sii', 'err_sii', 'ew50_sii', 'ewhw_sii', 'flux_ha', 'err_ha', 'ew50_ha', 'ewhw_ha', 'flux_oi_6302', 'err_oi_6302', 'ew50_oi_6302', 'ewhw_oi_6302', 'flux_hei_5877', 'err_hei_5877', 'ew50_hei_5877', 'ewhw_hei_5877', 'flux_oiii', 'err_oiii', 'ew50_oiii', 'ewhw_oiii', 'flux_hb', 'err_hb', 'ew50_hb', 'ewhw_hb', 'flux_oiii_4363', 'err_oiii_4363', 'ew50_oiii_4363', 'ewhw_oiii_4363', 'flux_hg', 'err_hg', 'ew50_hg', 'ewhw_hg', 'flux_hd', 'err_hd', 'ew50_hd', 'ewhw_hd', 'flux_h7', 'err_h7', 'ew50_h7', 'ewhw_h7', 'flux_h8', 'err_h8', 'ew50_h8', 'ewhw_h8', 'flux_h9', 'err_h9', 'ew50_h9', 'ewhw_h9', 'flux_h10', 'err_h10', 'ew50_h10', 'ewhw_h10', 'flux_neiii_3867', 'err_neiii_3867', 'ew50_neiii_3867', 'ewhw_neiii_3867', 'flux_oii', 'err_oii', 'ew50_oii', 'ewhw_oii', 'flux_nevi_3426', 'err_nevi_3426', 'ew50_nevi_3426', 'ewhw_nevi_3426', 'flux_nev_3346', 'err_nev_3346', 'ew50_nev_3346', 'ewhw_nev_3346', 'flux_mgii', 'err_mgii', 'ew50_mgii', 'ewhw_mgii', 'flux_civ_1549', 'err_civ_1549', 'ew50_civ_1549', 'ewhw_civ_1549', 'flux_ciii_1908', 'err_ciii_1908', 'ew50_ciii_1908', 'ewhw_ciii_1908', 'flux_oiii_1663', 'err_oiii_1663', 'ew50_oiii_1663', 'ewhw_oiii_1663', 'flux_heii_1640', 'err_heii_1640', 'ew50_heii_1640', 'ewhw_heii_1640', 'flux_niii_1750', 'err_niii_1750', 'ew50_niii_1750', 'ewhw_niii_1750', 'flux_niv_1487', 'err_niv_1487', 'ew50_niv_1487', 'ewhw_niv_1487', 'flux_nv_1240', 'err_nv_1240', 'ew50_nv_1240', 'ewhw_nv_1240', 'flux_lya', 'err_lya', 'ew50_lya', 'ewhw_lya', 'pdf_max', 'cdf_z', 'sn_pab', 'sn_hei_1083', 'sn_siii', 'sn_oii_7325', 'sn_ariii_7138', 'sn_sii', 'sn_ha', 'sn_oi_6302', 'sn_hei_5877', 'sn_oiii', 'sn_hb', 'sn_oiii_4363', 'sn_hg', 'sn_hd', 'sn_h7', 'sn_h8', 'sn_h9', 'sn_h10', 'sn_neiii_3867', 'sn_oii', 'sn_nevi_3426', 'sn_nev_3346', 'sn_mgii', 'sn_civ_1549', 'sn_ciii_1908', 'sn_oiii_1663', 'sn_heii_1640', 'sn_niii_1750', 'sn_niv_1487', 'sn_nv_1240', 'sn_lya', 'chinu', 'bic_diff', 'log_risk', 'log_pdf_max', 'zq', 'mtime', 'vel_bl', 'vel_nl', 'vel_z', 'vel_nfev', 'vel_flag', 'grizli_version']
def get_connection_info(config_file=None):
"""
Read the database connection info
"""
import yaml
if config_file is None:
config_file = os.path.join(os.path.dirname(__file__),
'../data/db.yml')
try:
local_file = os.path.join(os.getenv('HOME'), 'db.local.yml')
if os.path.exists(local_file):
print('Use ~/db.local.yml')
config_file = local_file
except:
pass
fp = open(config_file)
try:
db_info = yaml.load(fp, Loader=yaml.FullLoader)
except:
db_info = yaml.load(fp)
fp.close()
return db_info
def get_db_engine(config=None, echo=False):
"""
Generate an SQLAlchemy engine for the grizli database
"""
from sqlalchemy import create_engine
if config is None:
config = get_connection_info()
db_string = "postgresql://{0}:{1}@{2}:{3}/{4}".format(config['username'], config['password'], config['hostname'], config['port'], config['database'])
engine = create_engine(db_string, echo=echo)
return engine
def get_redshift_fit_status(root, id, table='redshift_fit', engine=None):
"""
Get status value from the database for root_id object
"""
import pandas as pd
if engine is None:
engine = get_db_engine(echo=False)
res = pd.read_sql_query("SELECT status FROM {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table), engine)
if len(res) == 0:
return -1
else:
return res['status'][0]
def update_jname():
from grizli import utils
res = grizli_db.from_sql("select p_root, p_id, p_ra, p_dec from photometry_apcorr", engine)
jn = [utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(0.001, 0.001), precision=2, targstr='j{rah}{ram}{ras}.{rass}{sign}{ded}{dem}{des}.{dess}') for ra, dec in zip(res['p_ra'], res['p_dec'])]
for c in res.colnames:
res.rename_column(c, c.replace('p_', 'j_'))
zres = grizli_db.from_sql("select root, phot_root, id, ra, dec, z_map,"
"q_z, t_g800l, t_g102, t_g141, status from "
"redshift_fit where ra is not null and "
"status > 5", engine)
# Find duplicates
from scipy.spatial import cKDTree
data = np.array([zres['ra'], zres['dec']]).T
ok = zres['q_z'].filled(-100) > -0.7
tree = cKDTree(data[ok])
dr, ix = tree.query(data[ok], k=2)
cosd = np.cos(data[:, 1]/180*np.pi)
dup = (dr[:, 1] < 0.01/3600) # & (zres['phot_root'][ix[:,0]] != zres['phot_root'][ix[:,1]])
ix0 = ix[:, 0]
ix1 = ix[:, 1]
dup = (dr[:, 1] < 0.01/3600)
dup &= (zres['phot_root'][ok][ix0] == zres['phot_root'][ok][ix1])
dup &= (zres['id'][ok][ix0] == zres['id'][ok][ix1])
# second is G800L
dup &= zres['t_g800l'].filled(0)[ok][ix1] > 10
plt.scatter(zres['z_map'][ok][ix0[dup]], zres['z_map'][ok][ix1[dup]],
marker='.', alpha=0.1)
def update_redshift_fit_status(root, id, status=0, table='redshift_fit', engine=None, verbose=True):
"""
Set the status flag in the table
"""
import time
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if engine is None:
engine = get_db_engine(echo=False)
old_status = get_redshift_fit_status(root, id, table=table, engine=engine)
if old_status < 0:
# Need to add an empty row
tab = Table()
tab['root'] = [root]
tab['id'] = [id]
tab['status'] = [status]
tab['mtime'] = [NOW]
row_df = tab.to_pandas()
add_redshift_fit_row(row_df, engine=engine, table=table,
verbose=verbose)
else:
sqlstr = """UPDATE {0}
SET status = {1}, mtime = '{2}'
WHERE (root = '{3}' AND id = {4});
""".format(table, status, NOW, root, id)
if verbose:
msg = 'Update status for {0} {1}: {2} -> {3} on `{4}` ({5})'
print(msg.format(root, id, old_status, status, table, NOW))
engine.execute(sqlstr)
def get_row_data(rowfile='gds-g800l-j033236m2748_21181.row.fits', status_flag=FLAGS['fit_complete']):
"""
Convert table from a row file to a pandas DataFrame
"""
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if isinstance(rowfile, str):
if rowfile.endswith('.fits'):
tab = Table.read(rowfile, character_as_bytes=False)
allowed_columns = COLUMNS
else:
# Output of stellar fits
tab = Table.read(rowfile, format='ascii.commented_header')
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
tab.rename_column('best_template', 'stellar_template')
try:
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
# BIC of spline-only and template fits
bic_spl = np.log(tab['dof'])*(tab['nk']-1) + tab['chi2_flat']
bic_star = np.log(tab['dof'])*(tab['nk']) + tab['chi2']
tab['bic_diff_star'] = bic_spl - bic_star
except:
print('Parse {0} failed'.format(rowfile))
pass
allowed_columns = ['root', 'id', 'ra', 'dec', 'chi2', 'nk', 'dof',
'chinu', 'chi2_flat', 'bic_diff_star', 'mtime',
'stellar_template', 'status', 'phot_root',
'as_epsf']
else:
tab = rowfile
if 'cdf_z' in tab.colnames:
cdf_z = tab['cdf_z'].data
tab.remove_column('cdf_z')
else:
cdf_z = None
tab['mtime'] = NOW
tab['status'] = status_flag
remove_cols = []
for c in tab.colnames:
if '-' in c:
tab.rename_column(c, c.replace('-', '_'))
for c in tab.colnames:
tab.rename_column(c, c.lower())
# Remove columns not in the database
remove_cols = []
for c in tab.colnames:
if c not in allowed_columns:
#print('Remove column: ', c)
remove_cols.append(c)
if len(remove_cols) > 0:
tab.remove_columns(remove_cols)
row_df = tab.to_pandas()
if cdf_z is not None:
row_df['cdf_z'] = cdf_z.tolist()
return row_df
def delete_redshift_fit_row(root, id, table='redshift_fit', engine=None):
"""
Delete a row from the redshift fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
res = engine.execute("DELETE from {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table))
def add_redshift_fit_row(row_df, table='redshift_fit', engine=None, verbose=True):
"""
Update the row in the redshift_fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
if isinstance(row_df, str):
row_df = get_row_data(row_df)
if ('root' not in row_df.columns) | ('id' not in row_df.columns):
print('Need at least "root" and "id" columns in the row data')
return False
root = row_df['root'][0]
id = row_df['id'][0]
status = get_redshift_fit_status(root, id, table=table, engine=engine)
# Delete the old row?
if status >= 0:
print('Delete and update row for {0}/{1} on `{2}`'.format(root, id,
table))
delete_redshift_fit_row(root, id, table=table, engine=engine)
else:
print('Add row for {0}/{1} on `{2}`'.format(root, id, table))
# Add the new data
row_df.to_sql(table, engine, index=False, if_exists='append', method='multi')
###########
def add_missing_rows(root='j004404m2034', engine=None):
"""
Add rows that were completed but that aren't in the table
"""
import glob
from astropy.table import vstack, Table
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
os.system('aws s3 sync s3://grizli-v1/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*row.fits"'.format(root))
row_files = glob.glob('{0}*row.fits'.format(root))
row_files.sort()
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}' AND status=6".format(root), engine)
res_ids = res['id'].to_list()
tabs = []
print('\n\n NROWS={0}, NRES={1}\n\n'.format(len(row_files), len(res)))
for row_file in row_files:
id_i = int(row_file.split('.row.fits')[0][-5:])
if id_i not in res_ids:
grizli_db.add_redshift_fit_row(row_file, engine=engine, verbose=True)
def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'):
"""
Convert 1D spectral data to lists suitable for putting into dataframes
and sending to the databases.
"""
from collections import OrderedDict
import astropy.io.fits as pyfits
from .. import utils
if not os.path.exists(file):
print('Spectrum file not found')
return False
im = pyfits.open(file)
obj_id = im[0].header['ID']
obj_root = im[0].header['TARGET']
if '.R30.' in file:
skip_columns = ['line', 'cont']
pref = 'spec1d_r30'
else:
skip_columns = []
pref = 'spec1d'
spectra = OrderedDict()
has_spectra = False
for gr in ['G102', 'G141', 'G800L']:
if gr in im:
has_spectra = True
sp = utils.GTable.read(file, hdu=gr)
prefix = '{0}_{1}_'.format(pref, gr.lower())
spd = {prefix+'id': obj_id, prefix+'root': obj_root}
for c in sp.colnames:
if c in skip_columns:
continue
spd[prefix+c] = sp[c].tolist()
spectra[gr.lower()] = spd
if has_spectra:
return spectra
else:
return False
def send_1D_to_database(files=[], engine=None):
"""
Send a list of 1D spectra to the spectra databases
ToDo: check for existing lines
"""
from collections import OrderedDict
import pandas as pd
if engine is None:
engine = get_db_engine()
tables = OrderedDict()
for file in files:
sp_i = convert_1D_to_lists(file=file)
print('Read spec1d file: {0}'.format(file))
for gr in sp_i:
# Initialize the columns
if gr not in tables:
tables[gr] = OrderedDict()
for c in sp_i[gr]:
tables[gr][c] = []
# Add the data
for c in sp_i[gr]:
tables[gr][c].append(sp_i[gr][c])
prefix = 'spec1d_r30' if '.R30.' in files[0] else 'spec1d'
for gr in tables:
tablename = '{0}_{1}'.format(prefix, gr)
df = pd.DataFrame(tables[gr])
# Put wavelengths in their own tables to avoid massive duplication
wave_table = tablename+'_wave'
if wave_table not in engine.table_names():
print('Create wave table: '+wave_table)
wdf = pd.DataFrame(data=tables[gr][wave_table][0],
columns=[wave_table])
wdf.to_sql(wave_table, engine, if_exists='replace',
index=True, index_label=tablename+'_idx')
# drop wave from spectra tables
df.drop('{0}_wave'.format(tablename), axis=1, inplace=True)
# Create table
if tablename not in engine.table_names():
print('Initialize table {0}'.format(tablename))
SQL = "CREATE TABLE {0} (\n".format(tablename)
SQL += ' {0}_root text,\n'.format(tablename)
SQL += ' {0}_id integer,\n'.format(tablename)
for c in df.columns:
item = df[c][0]
if isinstance(item, list):
SQL += ' {0} real[{1}],\n'.format(c, len(item))
engine.execute(SQL[:-2]+')')
try:
engine.execute("CREATE INDEX {0}_idx ON {0} ({0}_root, {0}_id);".format(tablename))
except:
pass
# Delete existing duplicates
if tablename in engine.table_names():
SQL = """DELETE from {0} WHERE """.format(tablename)
mat = ["({0}_root = '{1}' AND {0}_id = {2})".format(tablename, r, i) for r, i in zip(df[tablename+'_root'], df[tablename+'_id'])]
SQL += 'OR '.join(mat)
rsp = engine.execute(SQL)
# Send the table
print('Send {0} rows to {1}'.format(len(df), tablename))
df.to_sql(tablename, engine, index=False, if_exists='append',
method='multi')
def add_all_spectra():
from grizli.aws import db as grizli_db
roots = grizli_db.from_sql("select root,count(root) as n from redshift_fit group BY root order by n DESC", engine)
o = 1
for root in roots['root'][::o]:
existing = open('log').readlines()
if root+'\n' in existing:
print('Skip', root)
continue
fp = open('log', 'a')
fp.write(root+'\n')
fp.close()
try:
grizli_db.add_oned_spectra(root=root, engine=engine)
except:
pass
def add_oned_spectra(root='j214224m4420gr01', bucket='grizli-v1', engine=None):
import os
import glob
if engine is None:
engine = get_db_engine()
# import boto3
# s3 = boto3.resource('s3')
# bkt = s3.Bucket(bucket)
#
# files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/'.format(root))]
#
# for file in files:
# if (('.R30.fits' in file) | ('.1D.fits' in file)) & (not os.path.exists(file)):
# local_file = os.path.basename(file)
# print(local_file)
# bkt.download_file(file, local_file,
# ExtraArgs={"RequestPayer": "requester"})
os.system('aws s3 sync s3://{0}/Pipeline/{1}/Extractions/ ./ --exclude "*" --include "*R30.fits" --include "*1D.fits"'.format(bucket, root))
nmax = 500
# 1D.fits
files = glob.glob('{0}_*1D.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
files = glob.glob('{0}_*R30.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
os.system('rm {0}_*.1D.fits {0}_*.R30.fits'.format(root))
if False:
tablename = 'spec1d_g141'
#tablename = 'spec1d_g102'
#tablename = 'spec1d_r30_g141'
if 1:
# by root
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND root = '{0}' AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
else:
# everything
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
# Halpha EW
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, t_g141, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.3 AND err_ha > 0 ORDER BY ew50_ha".format(root, tablename), engine)
# Everything
fresp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, ew50_oiii, ew50_hb, ew50_oii, d4000, d4000_e, t_g141, t_g102, t_g800l, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 AND chinu < 2 ORDER BY z_map".format(root, tablename), engine)
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
resp = fresp
sort_column = 'z_map'
bin_factor = 1
wnorm = 6400
zref = 1.3e4/wnorm-1
sel = np.isfinite(fresp[sort_column]) & (fresp[sort_column] != -99)
norm_ix = np.interp(wnorm*(1+fresp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)
sel &= np.isfinite(norm_ix)
resp = fresp[sel]
norm_ix = np.cast[int](np.round(np.interp(wnorm*(1+resp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)))
resp.sort_values(sort_column, inplace=True)
if tablename == 'spec1d_g141':
exptime = resp['t_g141'].values
wlim = [1.1e4, 1.65e4]
else:
exptime = resp['t_g102'].values
wlim = [8000, 1.1e4, 1.65e4]
data = OrderedDict()
for c in resp.columns:
if c.startswith(tablename):
c_i = c.split(tablename+'_')[1]
try:
data[c_i] = np.array(resp[c].values.tolist())
except:
pass
#plt.imshow((data['flux'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
# Rest-frame
dz = np.diff(wave)[10]/wave[10]
max_zshift = np.cast[int](np.log(1+resp['z_map'].max())/dz)
zshift = np.cast[int]((np.log(1+resp['z_map']) - np.log(1+zref))/dz)
err_max = 5
# Continuum normalized
#norm = data['cont'][:,100]/data['flat'][:,100]
norm = np.zeros(len(resp))
for i, ix in enumerate(norm_ix):
norm[i] = data['line'][i, ix]/data['flat'][i, ix]
#norm = np.mean(data['cont'][:,50:120]/data['flat'][:,50:120], axis=1)
# 2D arrays
normed = ((data['flux']/data['flat']).T/norm).T
cnormed = ((data['cont']/data['flat']).T/norm).T
lnormed = (((data['line']-data['cont'])/data['flat']).T/norm).T
err = ((data['err']/data['flat']).T/norm).T
mask = np.isfinite(norm) & (norm > 0) & np.isfinite(norm_ix)
normed = normed[mask, :]
cnormed = cnormed[mask, :]
lnormed = lnormed[mask, :]
err = err[mask, :]
ivar = 1/err**2
ivar[err <= 0] = 0
# Weight by exposure time
ivar = (ivar.T*0+(exptime[mask]/4000.)*norm[mask]).T
zshift = zshift[mask]
# Clip edges
wclip = (wave > wlim[0]) & (wave < wlim[1])
mask_val = 1e10
normed[:, ~wclip] = -mask_val
cnormed[:, ~wclip] = -mask_val
lnormed[:, ~wclip] = -mask_val
sh = normed.shape
rest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
crest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
lrest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
rest[:, zshift.max():zshift.max()+sh[1]] = normed*1
crest[:, zshift.max():zshift.max()+sh[1]] = cnormed*1
lrest[:, zshift.max():zshift.max()+sh[1]] = lnormed*1
rest_ivar = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min()))
rest_ivar[:, zshift.max():zshift.max()+sh[1]] = ivar*1
for i in range(sh[0]):
rest[i, :] = np.roll(rest[i, :], -zshift[i])
crest[i, :] = np.roll(crest[i, :], -zshift[i])
lrest[i, :] = np.roll(lrest[i, :], -zshift[i])
rest_ivar[i, :] = np.roll(rest_ivar[i, :], -zshift[i])
ok = np.isfinite(rest) & np.isfinite(rest_ivar) & (rest > -0.8*mask_val)
rest_ivar[~ok] = 0
rest[~ok] = -mask_val
crest[~ok] = -mask_val
lrest[~ok] = -mask_val
shr = rest.shape
nbin = int((shr[0]//shr[1])//2*bin_factor)*2
kernel = np.ones((1, nbin)).T
# npix = np.maximum(nd.convolve((rest > -0.8*mask_val)*1, kernel), 1)
# srest = nd.convolve(rest*(rest > -0.8*mask_val), kernel)
# sbin = (srest/npix)[::nbin,:]
# plt.imshow(sbin, vmin=0, vmax=5)
num = nd.convolve(rest*rest_ivar, kernel)
cnum = nd.convolve(crest*rest_ivar, kernel)
lnum = nd.convolve(lrest*rest_ivar, kernel)
den = nd.convolve(rest_ivar, kernel)
wbin = (num/den)[::nbin, :]
wbin[~np.isfinite(wbin)] = 0
cwbin = (cnum/den)[::nbin, :]
cwbin[~np.isfinite(cwbin)] = 0
lwbin = (lnum/den)[::nbin, :]
lwbin[~np.isfinite(lwbin)] = 0
plt.imshow(wbin, vmin=0, vmax=5)
plt.imshow((data['line'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
def run_lambda_fits(root='j004404m2034', phot_root=None, mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None, zr=[0.01, 3.4], bucket='grizli-v1', verbose=True, extra={'bad_pa_threshold': 10}):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
print('Sync phot catalog')
if phot_root is None:
root = root
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits"'.format(phot_root, bucket))
print('Sync wcs.fits')
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits" --include "*wcs.fits"'.format(root, bucket))
phot = utils.read_catalog('{0}_phot_apcorr.fits'.format(phot_root))
phot['has_grism'] = 0
wcs_files = glob.glob('*wcs.fits')
for f in wcs_files:
w = utils.WCSFootprint(f, ext=0)
has = w.path.contains_points(np.array([phot['ra'], phot['dec']]).T)
print(f, has.sum())
phot['has_grism'] += has
mag = phot['mag_auto']*np.nan
mag_filt = np.array([' ']*len(phot))
sn = phot['mag_auto']*np.nan
for filt in ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f814w', 'f850lp', 'f606w', 'f775w']:
if '{0}_tot_1'.format(filt) in phot.colnames:
mag_i = 23.9-2.5*np.log10(phot['{0}_tot_1'.format(filt)])
fill = (~np.isfinite(mag)) & np.isfinite(mag_i)
mag[fill] = mag_i[fill]
mag_filt[fill] = filt
sn_i = phot['{0}_tot_1'.format(filt)]/phot['{0}_etot_1'.format(filt)]
sn[fill] = sn_i[fill]
sel = np.isfinite(mag) & (mag >= mag_limits[0]) & (mag <= mag_limits[1]) & (phot['has_grism'] > 0)
sel &= phot['flux_radius'] > 1
sel &= sn > sn_limit
if min_status is not None:
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}'".format(root, min_status), engine)
if len(res) > 0:
status = phot['id']*0-100
status[res['id']-1] = res['status']
sel &= status < min_status
ids = phot['id'][sel]
# Select just on min_status
if min_status > 1000:
if min_status > 10000:
# Include mag constraints
res = pd.read_sql_query("SELECT root, id, status, mtime, mag_auto FROM redshift_fit,photometry_apcorr WHERE root = '{0}' AND status = {1}/10000 AND mag_auto > {2} AND mag_auto < {3} AND p_root = root AND p_id = id".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
else:
# just select on status
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}' AND status = {1}/1000".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
ids = res['id'].tolist()
if len(ids) == 0:
return False
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name=bucket, skip_existing=False, sleep=False, skip_started=False, show_event=False, zr=zr, force_args=True, quasar_fit=False, output_path=None, save_figures='png', verbose=verbose, **extra)
print('Add photometry: {0}'.format(root))
grizli_db.add_phot_to_db(phot_root, delete=False, engine=engine)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
grizli_db.set_phot_root(root, phot_root, engine)
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0 AND root = '{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
return res
if False:
res = pd.read_sql_query("SELECT root, id, status, redshift, bic_diff, mtime FROM redshift_fit WHERE (root = '{0}')".format(root), engine)
# Get arguments
args = fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 3.4], force_args=True)
def set_phot_root(root, phot_root, engine):
"""
"""
print('Set phot_root = {0} > {1}'.format(root, phot_root))
SQL = """UPDATE redshift_fit
SET phot_root = '{phot_root}'
WHERE (root = '{root}');
""".format(phot_root=phot_root, root=root)
engine.execute(SQL)
if False:
# Check where phot_root not equal to root
res = pd.read_sql_query("SELECT root, id, status, phot_root FROM redshift_fit WHERE (phot_root != root)".format(root), engine)
# update the one pointing where it should change in photometry_apcorr
engine.execute("UPDATE photometry_apcorr SET p_root = 'j214224m4420' WHERE root = 'j214224m4420gr01';")
engine.execute("UPDATE redshift_fit SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
engine.execute("UPDATE redshift_fit_quasar SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
if False:
# Replace in-place
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%' AND root LIKE '%%-grism%%")
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%'")
engine.execute("update redshift_fit set phot_root = 'j214224m4420' WHERE root like 'j214224m4420gr%%'")
engine.execute("update redshift_fit_quasar set phot_root = replace(root, 'g800l', 'grism') where root like '%%g800l%%'")
# Set 3D-HST fields
res = grizli_db.from_sql("select distinct root from redshift_fit where root like '%%-grism%%'", engine)
for root in res['root']:
grizli_db.set_phot_root(root, root, engine)
grizli_db.set_phot_root(root.replace('-grism', '-g800l'), root, engine)
xres = grizli_db.from_sql("select root, count(root) from redshift_fit where root like '{0}-%%' group by root".format(root.split('-')[0]), engine)
print(xres)
# Update OBJID for natural join
# for tab in ['redshift_fit', 'redshift_fit_quasar', 'multibeam']
SQL = """
WITH sub AS (
SELECT objid as p_objid, p_root, p_id
FROM photometry_apcorr
)
UPDATE redshift_fit
SET objid = p_objid
FROM sub
WHERE phot_root = p_root AND id = p_id;
"""
db.from_sql(SQL, engine)
engine.execute(SQL)
def wait_on_db_update(root, t0=60, dt=30, n_iter=60, engine=None):
"""
Wait for db to stop updating on root
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
import time
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
n_i, n6_i, checksum_i = -1, -1, -1
for i in range(n_iter):
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}'".format(root), engine)
checksum = (2**res['status']).sum()
n = len(res)
n6 = (res['status'] == 6).sum()
n5 = (res['status'] == 5).sum()
if (n == n_i) & (checksum == checksum_i) & (n6 == n6_i):
break
now = time.ctime()
print('{0}, {1}: n={2:<5d} n5={5:<5d} n6={3:<5d} checksum={4}'.format(root, now, n, n6, checksum, n5))
n_i, n6_i, checksum_i = n, n6, checksum
if i == 0:
time.sleep(t0)
else:
time.sleep(dt)
return res
##
def fit_timeouts(root='j004404m2034', mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
res = pd.read_sql_query("SELECT id, status FROM redshift_fit WHERE root = '{0}' AND status = 5".format(root), engine)
if len(res) == 0:
return True
ids = res['id'].tolist()
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=False, zr=[0.01, 2.4], force_args=True)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
return res
# All timeouts
events = fit_redshift_lambda.fit_lambda(root='egs-g800l-j141956p5255', beams=[], ids=[20667], newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 2.4], force_args=True)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' ORDER BY root".format(root), engine)
base = {'bucket': 'grizli-v1', 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,2.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png'}
all_events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=True)
#################
# Fit locally on EC2
i0 = 0
import os
import pandas as pd
import numpy as np
from grizli.aws import db as grizli_db
from grizli.aws import fit_redshift_lambda, lambda_handler
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root LIKE '%%-grism%%' ORDER BY root", engine)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root NOT LIKE '%%-grism%%' AND root NOT LIKE '%%g800l%%' ORDER BY root", engine)
bucket = 'grizli-v1'
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'j114936p2222' ORDER BY id", engine)
bucket = 'grizli-v1'
# res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'cos-grism%%' order by id", engine)
# bucket = 'grizli-cosmos-v2'
N = len(res)
np.random.seed(1)
so = np.argsort(np.random.normal(size=N))
base = {'bucket': bucket, 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,3.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png', 'verbose': True, 'working_directory': os.getcwd()}
events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=False)
for event in events[i0::2]:
lambda_handler.handler(event, {})
########
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
print(len(res), len(xres))
# show points
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
# Photometry table
def set_filter_bits(phot):
"""
Set bits indicating available filters
"""
import numpy as np
filters = ['f160w', 'f140w', 'f125w', 'f110w', 'f105w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f625w', 'f606w', 'f475w', 'f438w', 'f435w', 'f555w', 'f350lp', 'f390w', 'f336w', 'f275w', 'f225w']
bits = [np.uint32(2**i) for i in range(len(filters))]
phot['filter_bit'] = np.zeros(len(phot), dtype=np.uint32)
phot['red_bit'] = np.zeros(len(phot), dtype=np.uint32)
for i, filt in enumerate(filters):
col = '{0}_flux_aper_0'.format(filt)
if col in phot.colnames:
red = bits[i] * np.isfinite(phot[col]) * (phot['filter_bit'] == 0)
phot['filter_bit'] |= bits[i] * np.isfinite(phot[col])
phot['red_bit'] |= red
print(filt, i, bits[i], red.max())
def phot_to_dataframe(phot, root):
"""
Convert phot_apcorr.fits table to a pandas DataFrame
- Add 'root' column
- remove "dummy" columns
- rename 'xmin', 'xmax', 'ymin', 'ymax' to 'image_xmin', ...
"""
phot['root'] = root
set_filter_bits(phot)
for c in ['dummy_flux', 'dummy_err']:
if c in phot.colnames:
phot.remove_column(c)
for c in ['xmin', 'xmax', 'ymin', 'ymax']:
phot.rename_column(c, 'image_'+c)
for c in ['root', 'id', 'ra', 'dec']:
phot.rename_column(c, 'p_'+c)
df = phot.to_pandas()
return df
def add_phot_to_db(root, delete=False, engine=None, nmax=500):
"""
Read the table {root}_phot_apcorr.fits and append it to the grizli_db `photometry_apcorr` table
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT p_root, p_id FROM photometry_apcorr WHERE p_root = '{0}'".format(root), engine)
if len(res) > 0:
if delete:
print('Delete rows where root={0}'.format(root))
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root))
if False:
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root))
else:
print('Data found for root={0}, delete them if necessary'.format(root))
return False
# Read the catalog
phot = Table.read('{0}_phot_apcorr.fits'.format(root), character_as_bytes=False)
# remove columns
remove = []
for c in phot.colnames:
if ('_corr_' in c) | ('_ecorr_' in c) | (c[-5:] in ['tot_4', 'tot_5', 'tot_6']) | ('dummy' in c):
remove.append(c)
phot.remove_columns(remove)
# Add new filter columns if necessary
empty = pd.read_sql_query("SELECT * FROM photometry_apcorr WHERE false", engine)
df = phot_to_dataframe(phot, root)
new_cols = []
for c in df.columns:
if c not in empty.columns:
new_cols.append(c)
if len(new_cols) > 0:
for c in new_cols:
print('Add column {0} to `photometry_apcorr` table'.format(c))
sql = "ALTER TABLE photometry_apcorr ADD COLUMN {0} real;".format(c)
res = engine.execute(sql)
# Add new table
print('Send {0}_phot_apcorr.fits to `photometry_apcorr`.'.format(root))
if nmax > 0:
# Split
N = len(phot) // nmax
for i in range(N+1):
print(' add rows {0:>5}-{1:>5} ({2}/{3})'.format(i*nmax, (i+1)*nmax, i+1, N+1))
df[i*nmax:(i+1)*nmax].to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
else:
df.to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
def multibeam_to_database(beams_file, engine=None, Rspline=15, force=False, **kwargs):
"""
Send statistics of the beams.fits file to the database
"""
import numpy as np
import pandas as pd
from astropy.time import Time
from .. import multifit, utils
if engine is None:
engine = get_db_engine(echo=False)
mtime = Time(os.stat(beams_file).st_mtime, format='unix').iso
root = beams_file.split('_')[0]
id = int(beams_file.split('_')[1].split('.')[0])
res = pd.read_sql_query("SELECT mtime from multibeam WHERE (root = '{0}' AND id = {1})".format(root, id), engine)
if len(res) == 1:
if (res['mtime'][0] == mtime) & (not force):
print('{0} already in multibeam table'.format(beams_file))
return True
mb = multifit.MultiBeam(beams_file, **kwargs)
print('Update `multibeam` and `beam_geometry` tables for {0}.'.format(beams_file))
# Dummy for loading the templates the same way as for the quasars
# for generating the spline fit
templ_args = {'uv_line_complex': True,
'broad_fwhm': 2800,
'narrow_fwhm': 1000,
'fixed_narrow_lines': True,
'Rspline': Rspline,
'include_reddened_balmer_lines': False}
q0, q1 = utils.load_quasar_templates(**templ_args)
for t in list(q0.keys()):
if 'bspl' not in t:
q0.pop(t)
tfit = mb.template_at_z(0, templates=q0, fitter='lstsq')
sp = tfit['line1d'].wave, tfit['line1d'].flux
m2d = mb.get_flat_model(sp, apply_mask=True, is_cgs=True)
mb.initialize_masked_arrays()
chi0 = (mb.scif_mask**2*mb.ivarf[mb.fit_mask]).sum()
# Percentiles of masked contam, sci, err and contam/sci
pvals = np.arange(5, 96, 5)
mpos = m2d > 0
contam_percentiles = np.percentile(mb.contamf_mask, pvals)
sci_percentiles = np.percentile(mb.scif_mask, pvals)
err_percentiles = np.percentile(1/mb.sivarf[mb.fit_mask], pvals)
sn_percentiles = np.percentile(mb.scif_mask*mb.sivarf[mb.fit_mask], pvals)
fcontam_percentiles = np.percentile(mb.contamf_mask/mb.scif_mask, pvals)
# multibeam dataframe
df = pd.DataFrame()
float_type = np.float
df['root'] = [root]
df['id'] = [id]
df['objid'] = [-1]
df['mtime'] = [mtime]
df['status'] = [6]
df['scip'] = [list(sci_percentiles.astype(float_type))]
df['errp'] = [list(err_percentiles.astype(float_type))]
df['snp'] = [list(sn_percentiles.astype(float_type))]
df['snmax'] = [float_type((mb.scif_mask*mb.sivarf[mb.fit_mask]).max())]
df['contamp'] = [list(contam_percentiles.astype(float_type))]
df['fcontamp'] = [list(fcontam_percentiles.astype(float_type))]
df['chi0'] = [np.int32(chi0)]
df['rspline'] = [Rspline]
df['chispl'] = [np.int32(tfit['chi2'])]
df['mb_dof'] = [mb.DoF]
df['wmin'] = [np.int32(mb.wave_mask.min())]
df['wmax'] = [np.int32(mb.wave_mask.max())]
# Input args
for a in ['fcontam', 'sys_err', 'min_sens', 'min_mask']:
df[a] = [getattr(mb, a)]
# Send to DB
res = engine.execute("DELETE from multibeam WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('multibeam', engine, index=False, if_exists='append', method='multi')
# beams dataframe
d = {}
for k in ['root', 'id', 'objid', 'filter', 'pupil', 'pa', 'instrument', 'fwcpos', 'order', 'parent', 'parent_ext', 'ccdchip', 'sci_extn', 'exptime', 'origin_x', 'origin_y', 'pad', 'nx', 'ny', 'sregion']:
d[k] = []
for beam in mb.beams:
d['root'].append(root)
d['id'].append(id)
d['objid'].append(-1)
for a in ['filter', 'pupil', 'instrument', 'pad',
'fwcpos', 'ccdchip', 'sci_extn', 'exptime']:
d[a].append(getattr(beam.grism, a))
d['order'].append(beam.beam.beam)
parent = beam.grism.parent_file.replace('.fits', '').split('_')
d['parent'].append(parent[0])
d['parent_ext'].append(parent[1])
d['origin_x'].append(beam.grism.origin[1])
d['origin_y'].append(beam.grism.origin[0])
d['nx'].append(beam.sh[1])
d['ny'].append(beam.sh[0])
f = beam.grism.wcs.calc_footprint().flatten()
fs = ','.join(['{0:.6f}'.format(c) for c in f])
d['sregion'].append('POLYGON({0})'.format(fs))
d['pa'].append(int(np.round(beam.get_dispersion_PA())))
df = pd.DataFrame.from_dict(d)
# Send to database
res = engine.execute("DELETE from beam_geometry WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('beam_geometry', engine, index=False, if_exists='append', method='multi')
if False:
# Fix multibeam arrays
import pandas as pd
import numpy as np
from sqlalchemy import types
from grizli.aws import db as grizli_db
engine = grizli_db.get_db_engine()
df = pd.read_sql_query('select id, root, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
c = 'snp'
data = pd.DataFrame()
data['id'] = df['id']
data['root'] = df['root']
dtype = {'root': types.String, 'id': types.Integer}
for c in df.columns:
if c.endswith('p'):
print(c)
dtype[c[:-1]+'_p'] = types.ARRAY(types.FLOAT)
data[c[:-1]+'_p'] = [list(np.cast[float](line.strip()[1:-1].split(','))) for line in df[c]]
data.to_sql('multibeam_tmp', engine, index=False, if_exists='append', method='multi')
from sqlalchemy import types
for c in df.columns:
if c.endswith('p'):
pass
for c in df.columns:
if c.endswith('p'):
sql = "ALTER TABLE multibeam ADD COLUMN {0} real[];".format(c[:-1]+'_p')
print(sql)
sql = "UPDATE multibeam mb SET {new} = tmp.{new} FROM multibeam_tmp tmp WHERE tmp.id = mb.id AND tmp.root = mb.root;".format(new=c[:-1]+'_p')
print(sql)
x = grizli_db.from_sql('select id, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
def test_join():
import pandas as pd
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
res = pd.read_sql_query("SELECT * FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
# on root
res = pd.read_sql_query("SELECT p.root, p.id, mag_auto, z_map, status FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE root='{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
def column_comments():
from collections import OrderedDict
import yaml
tablename = 'redshift_fit'
cols = pd.read_sql_query('select * from {0} where false'.format(tablename), engine)
d = {} # OrderedDict{}
for c in cols.columns:
d[c] = '---'
if not os.path.exists('{0}_comments.yml'.format(tablename)):
print('Init {0}_comments.yml'.format(tablename))
fp = open('{0}_comments.yml'.format(tablename), 'w')
yaml.dump(d, stream=fp, default_flow_style=False)
fp.close()
# Edit file
comments = yaml.load(open('{0}_comments.yml'.format(tablename)))
SQL = ""
upd = "COMMENT ON COLUMN {0}.{1} IS '{2}';\n"
for col in comments:
if comments[col] != '---':
SQL += upd.format(tablename, col, comments[col])
else:
print('Skip ', col)
def add_spectroscopic_redshifts(xtab, rmatch=1, engine=None, db=None):
"""
Add spectroscopic redshifts to the photometry_apcorr table
Input table needs (at least) columns:
['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']
"""
import glob
import pandas as pd
from astropy.table import vstack
from grizli.aws import db as grizli_db
from grizli import utils
for c in ['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']:
if c not in xtab.colnames:
print('Column {0} not found in input table'.format(c))
return False
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
# Force data types
tab = xtab[xtab['z_spec'] >= 0]
if hasattr(tab['ra'], 'mask'):
tab = tab[~tab['ra'].mask]
tab['z_spec_qual'] = tab['z_spec_qual']*1
tab['z_spec_qual_raw'] = tab['z_spec_qual_raw']*1
if False:
# duplicates
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit", engine)
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit where ra is null", engine)
# Select master table
if db is None:
res = pd.read_sql_query("SELECT p_root, p_id, p_ra, p_dec, z_spec from photometry_apcorr", engine)
db = utils.GTable.from_pandas(res)
for c in ['p_root', 'p_id', 'p_ra', 'p_dec']:
db.rename_column(c, c[2:])
idx, dr = db.match_to_catalog_sky(tab)
hasm = (dr.value < rmatch) & (tab['z_spec'] >= 0)
tab['z_spec_dr'] = dr.value
tab['z_spec_ra'] = tab['ra']
tab['z_spec_dec'] = tab['dec']
tab['db_root'] = db['root'][idx]
tab['db_id'] = db['id'][idx]
tabm = tab[hasm]['db_root', 'db_id', 'z_spec', 'z_spec_src', 'z_spec_dr', 'z_spec_ra', 'z_spec_dec', 'z_spec_qual_raw', 'z_spec_qual']
print('Send zspec to photometry_apcorr (N={0})'.format(hasm.sum()))
df = tabm.to_pandas()
df.to_sql('z_spec_tmp', engine, index=False, if_exists='replace', method='multi')
SQL = """UPDATE photometry_apcorr
SET z_spec = zt.z_spec,
z_spec_src = zt.z_spec_src,
z_spec_dr = zt.z_spec_dr,
z_spec_ra = zt.z_spec_ra,
z_spec_dec = zt.z_spec_dec,
z_spec_qual_raw = zt.z_spec_qual_raw,
z_spec_qual = zt.z_spec_qual
FROM z_spec_tmp as zt
WHERE (zt.db_root = p_root AND zt.db_id = p_id);
"""
engine.execute(SQL)
if False:
# Update redshift_fit ra/dec with photometry_table double prec.
SQL = """UPDATE redshift_fit
SET ra = p_ra
dec = p_dec
FROM photometry_apcorr
WHERE (phot_root = p_root AND id = p_id AND root = 'j123556p6221');
"""
def mtime_to_iso(ct):
"""
Convert mtime values to ISO format suitable for sorting, etc.
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
spl = ct.split()
iso = '{yr}-{mo:02d}-{dy:02d} {time}'.format(dy=int(spl[2]), mo=int(months.index(spl[1])+1), yr=spl[-1], time=spl[-2])
return iso
def various_selections():
# sdss z_spec
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^sdss-dr15'", table_root='sdss_zspec', sync='s3://grizli-v1/tables/')
# objects with carla redshifts (radio loud)
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^carla'", table_root='carla_zspec', sync='s3://grizli-v1/tables/')
# Bright galaxies with q_z flag
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND mag_auto < 22 AND z_map > 1.3", table_root='bright', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# High-z compiliation
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'q_z', 'h_zphot', 'h_src', 'h_dr'], where="AND status > 4 AND phot_root = h_root AND id = h_id AND h_dr < 1", tables=['highz_2015'], table_root='highz', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# z_spec with dz
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1", table_root='zspec_delta', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# Point sources
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND mag_auto < 24 AND flux_radius < 1.9 AND ((flux_radius < 1.5 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.9 AND flux_radius > 1.0 AND red_bit < 32))", table_root='point_sources', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Reliable redshifts
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', '(flux_radius < 1.7 AND ((flux_radius < 1.4 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.7 AND flux_radius > 1.0 AND red_bit < 32)))::int as is_point', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'sn_siii', 'sn_ha', 'sn_oiii', 'sn_oii', 'ew50_ha', 'd4000', 'd4000_e', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND chinu < 30 AND q_z > -0.7 order by q_z", table_root='reliable_redshifts', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full'], get_sql=False, sort_column=('q_z', -1))
# stellar classification?
# sql = """SELECT root, id, ra, dec, status, z_map, q_z_map, bic_diff,
# bic_diff_star,
# chinu as t_chinu, s_chinu, q_chinu,
# chinu - q_chinu as tq_chinu, q_chinu - s_chinu as qs_chinu,
# chinu - s_chinu as ts_chinu, stellar_template
# FROM redshift_fit,
# (SELECT root as s_root, id as s_id, chinu as s_chinu, bic_diff_star,
# stellar_template
# FROM stellar_fit
# WHERE status = 6
# ) as s,
# (SELECT root as q_root, id as q_id, chinu as q_chinu,
# bic_diff as q_bic_diff, z_map as q_z_map
# FROM redshift_fit_quasar
# WHERE status = 6
# ) as q
# WHERE (root = s_root AND id = s_id) AND (root = q_root AND id = q_id)
# """
#res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='carbon_stars', sync='s3://grizli-v1/tables/', png_ext=['stack','line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
sql = """SELECT root, id, status, ra, dec, t_g800l, t_g102, t_g141,
z_map, q_z_map, bic_diff,
bic_diff_star, (bic_diff_star > 10 AND q_chinu < 20 AND chinu - q_chinu > 0.05 AND q_chinu-s_chinu > 0 AND chinu-s_chinu > 0.1)::int as is_star,
chinu as t_chinu, s_chinu, q_chinu,
bic_qso-bic_gal as bic_gq,
bic_gal-bic_star as bic_gs,
bic_qso-bic_star as bic_qs,
(bic_spl+chimin)-bic_gal as bic_gx,
bic_spl_qso-bic_qso as bic_qx,
q_vel_bl, qso_q_z, qso_zw1, stellar_template
FROM (SELECT *, bic_temp+chimin as bic_gal FROM redshift_fit z,
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, bic_temp+chimin as bic_qso,
bic_spl+chimin as bic_spl_qso,
z_map as qso_z_map,
zwidth1/(1+z_map) as qso_zw1, vel_bl as q_vel_bl,
q_z as qso_q_z
FROM redshift_fit_quasar
WHERE status = 6
) q
WHERE (root = q_root AND id = q_id)) c
LEFT JOIN
(SELECT root as s_root, id as s_id, chinu as s_chinu,
LN(dof)*nk+chi2 as bic_star,
LN(dof)*(nk-1)+chi2_flat as bic_spline,
bic_diff_star,
stellar_template
FROM stellar_fit
WHERE status = 6
) s ON (root = s_root AND id = s_id) WHERE chinu-q_chinu > 0.5
"""
cstar = grizli_db.from_sql(sql, engine)
cstar['is_star'] = cstar['is_star'].filled(-1)
print('N={0}'.format(len(cstar)))
res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='quasars_and_stars', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
# best-fit as quasar
sql = """SELECT root, id, ra, dec, status, z_map, q_z_map,
q_z, bic_diff, q_bic_diff,
chinu as t_chinu, q_chinu,
chinu - q_chinu as tq_chinu,
(q_bic_temp + q_chimin) - (bic_temp + chimin) as bic_diff_quasar,
q_vel_bl
FROM redshift_fit z JOIN
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, z_map as q_z_map, vel_bl,
chimin as q_chimin, bic_temp as q_bic_temp, vel_bl as q_vel_bl
FROM redshift_fit_quasar
WHERE status = 6
) as q
WHERE (root = q_root AND id = q_id) AND status = 6 AND q_z > -1
"""
qq = grizli_db.from_sql(sql, engine)
res = grizli_db.make_html_table(engine=engine, res=qq, table_root='quasar_fit', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Strong lines
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'sn_ha', 'sn_oiii', 'sn_oii'], where="AND status > 4 AND mag_auto < 24 AND (sn_ha > 10 OR sn_oiii > 10 OR sn_oii > 10) AND flux_radius >= 1.6", table_root='strong_lines', sync='s3://grizli-v1/tables/', png_ext=['stack', 'full', 'qso.full', 'star'])
# brown dwarf?
tablename = 'spec1d_r30_g141'
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
# 1.15, 1.25, 1.4
i0 = 25, 28, 29, 32
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25]/{0}_flux[28] as c1'.format(tablename), '{0}_flux[32]/{0}_flux[28] as c2'.format(tablename)], where="AND status > 4 AND flux_radius < 2 AND flux_radius > 1 AND mag_auto < 25 AND {0}_root = root AND {0}_id = id AND {0}_flux[28] > 0 AND {0}_flux[28]/{0}_err[28] > 5 AND {0}_flux[32] > 0 AND {0}_flux[25] > 0 AND {0}_flux[32]/{0}_flux[28] < 0.5".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25] as c25'.format(tablename), '{0}_flux[32] as c32'.format(tablename)], where="AND status > 4 AND z_spec = 0".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# with line ratios
lstr = 'err_{0} > 0 AND err_{0} < 5e-17'
err_lines = ' AND '.join(lstr.format(li) for li in
['hb', 'oiii', 'ha', 'sii'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn', 'flux_hb/flux_ha as HbHa', 'flux_hb/flux_oiii as HbO3', 'flux_oiii/flux_ha as O3Ha'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1 AND sn_oiii > 3 AND sn_ha > 2 AND {0}".format(err_lines), table_root='zspec_lines', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
if False:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[7, 6])
ax = fig.add_subplot(111)
ax.scatter(np.log(1+res['z_spec']), np.log(1+res['z_map']), alpha=0.2, c=np.log10(res['zw1']), marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
sc = ax.scatter(np.log([1]), np.log([1]), alpha=0.8, c=[0], marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
cb = plt.colorbar(sc, shrink=0.6)
cb.set_label(r'$(z_{84}-z_{16})/(1+z_{50})$')
cb.set_ticks([-3, -2, -1])
cb.set_ticklabels([0.001, 0.01, 0.1])
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
xts = ax.set_yticks(np.log(1+xt))
xtl = ax.set_yticklabels(xti)
ax.set_xlim(0, np.log(1+3.5))
ax.set_ylim(0, np.log(1+3.5))
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.set_xlabel('z_spec')
ax.set_ylabel('z_MAP')
ax.set_aspect(1)
ax.grid()
ax.text(0.95, 0.05, r'$N={0}$'.format(len(res)), ha='right', va='bottom', transform=ax.transAxes)
ax.plot(ax.get_xlim(), ax.get_xlim(), color='k', alpha=0.2, linewidth=1, zorder=-10)
fig.tight_layout(pad=0.1)
fig.savefig('grizli_v1_literature_zspec.pdf')
# COSMOS test
root = 'cos-grism-j100012p0210'
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND bic_diff > 100 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# high bic_diff = unambiguous
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e', '-(bic_temp-bic_spl) as bic_diff_spl'], where="AND status > 5 AND (((bic_diff > 50 OR zwidth1/(1+z_map) < 0.01) AND chinu < 2))", table_root='unamb', sync='s3://grizli-v1/tables/')
# with d4000
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status > 5 AND chinu < 3 AND d4000 > 1 AND d4000 < 5 AND d4000_e > 0 AND d4000_e < 0.25 AND bic_diff > 5", table_root='d4000', sync='s3://grizli-v1/tables/')
# LBG?
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '-(bic_temp-bic_spl) as bic_diff_spl', 'splf01/splf02 as r12', 'splf02/splf03 as r23', 'splf02/sple02 as sn02'], where="AND status > 5 AND mag_auto > 23 AND bic_diff > -50 AND splf01/splf02 < 0.3 AND splf02/sple02 > 2 AND splf01 != 0 AND splf02 != 0 AND splf03 != 0 ".format(root), table_root='lbg_g800l', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# stars?
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND bic_diff > 100 AND chinu < 1.5 AND mag_auto < 24 AND sn_Ha > 20", table_root='star', sync='s3://grizli-v1/tables/')
# By root
root = 'j001420m3030'
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND root = '{0}' AND bic_diff > 5".format(root), table_root=root+'-fit', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# G800L spec-zs
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '(z_map-z_spec)/(1+z_spec) as delta_z'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND t_g800l > 0", table_root='zspec_g800l', sync='s3://grizli-v1/tables/')
# Large G800L likely mismatch [OIII]/Ha
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 3 AND mag_auto < 23 AND bic_diff > 5", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Potential Ly-a?
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 5 AND sn_ha > 0 AND flux_oiii/flux_ha > 1.8", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Continuum resid
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND bic_diff > 5 AND splf01 > 0 AND bic_diff > 50".format(root), table_root='xxx', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 as fresid', 'splf01/sple01 as sn01', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND f814w_tot_1 > 0 AND splf01 != 0 AND splf01/sple01 > 1 AND f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 0 AND (f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 < 0.3 OR f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 4)", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf01', 'sple01', 'f814w_tot_1', 'f850lp_tot_1', 'flux_auto/flux_iso as flux_aper_corr', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND splf01 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf01'], 1.e-22)*8140**2/3.e18*1.e29)
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf03', 'sple03', 'f140w_tot_1', 'f160w_tot_1', 'flux_auto/flux_iso as flux_aper_corr'], where="AND status > 5 AND t_g141 > 0 AND sple03 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf03'], 1.e-22)*1.2e4**2/3.e18*1.e29)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 50 AND mag_auto < 24 group by root;", engine)
def from_sql(query, engine):
import pandas as pd
from grizli import utils
res = pd.read_sql_query(query, engine)
tab = utils.GTable.from_pandas(res)
set_column_formats(tab)
return tab
def render_for_notebook(tab, image_extensions=['stack', 'full', 'line'], bucket='grizli-v1', max_rows=20, link_root=True):
"""
Render images for inline display in a notebook
In [1]: from IPython.display import HTML
In [2]: HTML(tab)
"""
import pandas as pd
pd.set_option('display.max_colwidth', -1)
rows = tab[:max_rows].copy()
buckets = [bucket]*len(rows)
for i, r in enumerate(rows['root']):
if r.startswith('cos-g'):
buckets[i] = 'grizli-cosmos-v2'
rows['bucket'] = buckets
rows['ext'] = 'longstring' # longer than the longest extension
s3url = 'https://s3.amazonaws.com/{bucket}/Pipeline/{root}/Extractions/{root}_{id:05d}.{ext}.png'
def href_root(root):
if root.startswith('cos-g'):
bucket_i = 'grizli-cosmos-v2'
else:
bucket_i = bucket
s3 = 'https://s3.amazonaws.com/'+bucket_i+'/Pipeline/{0}/Extractions/{0}.html'
return '<a href={0}>{1}</a>'.format(s3.format(root), root)
def path_to_image_html(path):
return '<a href={0}><img src="{0}"/></a>'.format(path)
# link for root
if link_root:
fmt = {'root': href_root}
else:
fmt = {}
for ext in image_extensions:
rows['ext'] = ext
urls = [s3url.format(**row) for row in rows.to_pandas().to_dict(orient='records')]
rows[ext] = urls
fmt[ext] = path_to_image_html
rows.remove_columns(['bucket', 'ext'])
out = rows.to_pandas().to_html(escape=False, formatters=fmt)
return out
def add_to_charge():
engine = grizli_db.get_db_engine()
p = pd.read_sql_query('select distinct p_root from photometry_apcorr', engine)
f = pd.read_sql_query('select distinct field_root from charge_fields', engine)
new_fields = []
for root in p['p_root'].values:
if root not in f['field_root'].values:
print(root)
new_fields.append(root)
df = pd.DataFrame()
df['field_root'] = new_fields
df['comment'] = 'CANDELS'
ix = df['field_root'] == 'j214224m4420'
df['comment'][ix] = 'Rafelski UltraDeep'
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def overview_table():
"""
Generate a new overview table with the redshift histograms
"""
from grizli.aws import db as grizli_db
import pandas as pd
from grizli import utils
engine = grizli_db.get_db_engine()
ch = from_sql("select * from charge_fields", engine)
by_mag = from_sql("select p_root as root, COUNT(p_root) as nmag from photometry_apcorr where mag_auto < 24 group by p_root;", engine)
by_nz = from_sql("select root, COUNT(root) as nz from redshift_fit where bic_diff > 30 group by root;", engine)
for count in [by_mag, by_nz]:
new_col = count.colnames[1]
ch[new_col] = -1
for r, n in zip(count['root'], count[new_col]):
ix = ch['field_root'] == r
ch[new_col][ix] = n
zhist = ['https://s3.amazonaws.com/grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png'.format(r) for r in ch['field_root']]
ch['zhist'] = ['<a href="{1}"><img src={0} height=300px></a>'.format(zh, zh.replace('_zhist.png', '.html')) for zh in zhist]
cols = ['field_root', 'field_ra', 'field_dec', 'mw_ebv', 'gaia5', 'nassoc', 'nfilt', 'filter', 'target', 'comment', 'proposal_id', 'proposal_pi', 'field_t_g800l', 'field_t_g102', 'field_t_g141', 'mast', 'footprint', 'rgb', 'nmag', 'nz', 'zhist', 'summary', 'log']
sortable = []
for c in cols:
if not hasattr(ch[c][0], 'upper'):
sortable.append(c)
# https://s3.amazonaws.com/grizli-v1/Master/CHArGE-July2019.html
table_root = 'CHArGE-July2019.zhist'
ch[cols].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=True)
os.system('aws s3 sync ./ s3://grizli-v1/Master/ --exclude "*" --include "{1}.html" --include "{1}.json" --acl public-read'.format('', table_root))
def run_all_redshift_fits():
##############
# Run all
from grizli.aws import db as grizli_db
import pandas as pd
engine = grizli_db.get_db_engine()
# By grism
res = pd.read_sql_query("select field_root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (nassoc < 200 AND (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%');", engine)
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
count = 0
for i, (root, ta, tb, tr, pi) in enumerate(zip(res['field_root'], res['field_t_g800l'], res['field_t_g102'], res['field_t_g141'], res['proposal_pi'])):
if root in orig_roots:
continue
count += 1
zmax = 1.6
if tb > 0:
zmax = 2.2
if tr > 0:
zmax = 3.2
print('\n\n', i, count, root, ta, tb, tr, pi, zmax, '\n\n')
phot_root = None
try:
grizli_db.run_lambda_fits(root, phot_root=phot_root,
min_status=6, zr=[0.01, zmax])
except:
pass
####
# Redo fits on reprocessed fields
# for i in range(2,11):
# root = 'j214224m4420gr{0:02d}'.format(i)
# print(root)
#
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from redshift_fit_quasar WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from stellar_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root), engine)
if False:
# Remove the whole thing
res = engine.execute("DELETE from exposure_log WHERE (parent = '{0}')".format(root), engine)
res = engine.execute("DELETE from charge_fields WHERE (field_root = '{0}')".format(root), engine)
grizli_db.run_lambda_fits(root, phot_root=root, min_status=2, zr=[0.01, zmax], mag_limits=[15, 26], engine=engine)
# for root in "j233844m5528 j105732p3620 j112416p1132 j113812m1134 j113848m1134 j122852p1046 j143200p0959 j152504p0423 j122056m0205 j122816m1132 j131452p2612".split():
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'rgb', 'line'], show_hist=True)
grizli_db.aws_rgb_thumbnails(root, engine=engine)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
def aws_rgb_thumbnails(root, bucket='grizli-v1', engine=None, thumb_args={}, ids=None, verbose=True, res=None):
"""
Make thumbnails for everything that has an entry in the redshift_fit table
"""
from grizli.aws import aws_drizzler, fit_redshift_lambda
if engine is None:
engine = get_db_engine(echo=False)
if res is None:
res = from_sql("SELECT root, id, ra, dec FROM redshift_fit WHERE root = '{0}' AND ra > 0".format(root), engine)
aws_prep_dir = 's3://{0}/Pipeline/{1}/Prep/'.format(bucket, root)
aws_bucket = 's3://{0}/Pipeline/{1}/Thumbnails/'.format(bucket, root)
event = {'make_segmentation_figure': True,
'aws_prep_dir': aws_prep_dir,
'single_output': True,
'combine_similar_filters': True,
'show_filters': ['visb', 'visr', 'y', 'j', 'h'],
'include_ir_psf': False,
'include_saturated': True,
'subtract_median': True,
'sync_fits': True,
'thumb_height': 2.0,
'scale_ab': 21,
'aws_bucket': aws_bucket,
'master': None,
'rgb_params': {'xsize': 4, 'output_dpi': None,
'rgb_min': -0.01, 'add_labels': False,
'output_format': 'png', 'show_ir': False,
'scl': 2, 'suffix': '.rgb', 'mask_empty': False,
'tick_interval': 1, 'pl': 1},
'remove': True,
'filters': ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m',
'f850lp', 'f814w', 'f775w', 'f606w', 'f475w',
'f555w', 'f600lp', 'f390w', 'f350lp'],
'half_optical_pixscale': True,
'theta': 0,
'kernel': 'square',
'pixfrac': 0.33,
'wcs': None,
'size': 6,
'pixscale': 0.1}
for k in thumb_args:
event[k] = thumb_args[k]
N = len(res)
for i in range(N):
id = res['id'][i]
ra = res['ra'][i]
dec = res['dec'][i]
root_i = res['root'][i]
if ids is not None:
if id not in ids:
continue
event['ra'] = ra
event['dec'] = dec
event['label'] = '{0}_{1:05d}'.format(root_i, id)
fit_redshift_lambda.send_event_lambda(event, verbose=verbose)
def count_sources_for_bad_persistence():
"""
Count the number of extracted objects for each id and look for fields
with few objects, which are usually problems with the persistence mask
"""
import pandas as pd
from grizli.aws import db as grizli_db
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 5 AND mag_auto < 24 group by root;", engine)
counts = utils.GTable.from_pandas(counts)
so = np.argsort(counts['n'])
sh = """
BUCKET=grizli-v
root=j113812m1134
aws s3 rm --recursive s3://grizli-v1/Pipeline/${root}/ --include "*"
grism_run_single.sh ${root} --run_fine_alignment=True --extra_filters=g800l --bucket=grizli-v1 --preprocess_args.skip_single_optical_visits=True --mask_spikes=True --persistence_args.err_threshold=1
"""
def add_missing_photometry():
# Add missing photometry
import os
import pandas as pd
from grizli.aws import db as grizli_db
from grizli.pipeline import photoz
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("select distinct root from redshift_fit where root like 'j%%'", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
# Missing grism fields?
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
# All photometry
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where nassoc < 200 AND log LIKE '%%inish%%' AND field_root LIKE 'j%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
count = 0
for root in res:
if root not in orig_roots:
count += 1
print(count, root)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot.fits .'.format(root))
if not os.path.exists('{0}_phot_apcorr.fits'.format(root)):
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot.fits .'.format(root))
if os.path.exists('{0}_phot_apcorr.fits'.format(root)):
grizli_db.add_phot_to_db(root, delete=False, engine=engine)
else:
if os.path.exists('{0}_phot.fits'.format(root)):
# Make the apcorr file
utils.set_warnings()
total_flux = 'flux_auto'
try:
obj = photoz.eazy_photoz(root, object_only=True,
apply_prior=False, beta_prior=True,
aper_ix=1,
force=True,
get_external_photometry=False,
compute_residuals=False,
total_flux=total_flux)
except:
continue
grizli_db.add_phot_to_db(root, delete=False,
engine=engine)
# 3D-HST
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits --acl public-read
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-grism-j141956p5255', min_status=6, zr=[0.01, 3.2])
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/uds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-grism-j021732m0512', min_status=6, zr=[0.01, 3.2])
# GDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-grism-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/gds-g800l-j033236m2748_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gds-grism-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gds-g800l-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# GDN
copy = """
#aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-grism-j123656p6215_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-g800l-j123656p6215_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gdn-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/gdn-g800l-j123656p6215_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gdn-grism-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gdn-g800l-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# 3D-HST G800L
copy = """
aws s3 rm s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/ --recursive --exclude "*" --include "egs-g800l-j141956p5255_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/egs-g800l-j141956p5255_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
res = grizli_db.wait_on_db_update('egs-g800l-j141956p5255', dt=15, n_iter=120, engine=engine)
res = grizli_db.wait_on_db_update('uds-g800l-j021732m0512', dt=15, n_iter=120, engine=engine)
# UDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/ --recursive --exclude "*" --include "uds-g800l-j021732m0512_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/uds-g800l-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-g800l-j021732m0512', phot_root='uds-grism-j021732m0512', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# Cosmos on oliveraws
copy = """
aws s3 rm s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/ --recursive --exclude "*" --include "cos-grism-j100012p0210_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/Cosmos/cos-cnd-mosaic_phot_apcorr.fits s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/cos-grism-j100012p0210_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('cos-grism-j100012p0210', min_status=6, zr=[0.01, 3.2], mag_limits=[17, 17.1], bucket='grizli-cosmos-v2')
os.system('sudo halt')
def set_column_formats(info, extra={}):
# Print formats
formats = {}
formats['ra'] = formats['dec'] = '.5f'
formats['mag_auto'] = formats['delta_z'] = '.2f'
formats['chinu'] = formats['chimin'] = formats['chimax'] = '.1f'
formats['bic_diff'] = formats['bic_temp'] = formats['bic_spl'] = '.1f'
formats['bic_poly'] = '.1f'
formats['dlinesn'] = formats['bic_spl'] = '.1f'
formats['flux_radius'] = formats['flux_radius_20'] = '.1f'
formats['flux_radius_90'] = '.1f'
formats['log_pdf_max'] = formats['log_risk'] = '.1f'
formats['d4000'] = formats['d4000_e'] = '.2f'
formats['dn4000'] = formats['dn4000_e'] = '.2f'
formats['z_spec'] = formats['z_map'] = formats['reshift'] = '.3f'
formats['z_spec_dr'] = '.1f'
formats['t_g141'] = formats['t_g102'] = formats['t_g800l'] = '.0f'
formats['zwidth1'] = formats['zw1'] = '.3f'
formats['zwidth2'] = formats['zw2'] = '.3f'
formats['q_z'] = '.2f'
formats['dz'] = '.3f'
for k in extra:
formats[k] = extra[k]
for c in info.colnames:
if c in formats:
info[c].format = formats[c]
elif c.startswith('sn_'):
info[c].format = '.1f'
elif c.startswith('mag_'):
info[c].format = '.2f'
elif '_ujy' in c:
info[c].format = '.2f'
elif c.startswith('ew_'):
info[c].format = '.1f'
elif ('q_z' in c):
info[c].format = '.2f'
elif ('zw' in c) | ('z_map' in c):
info[c].format = '.3f'
elif ('chinu' in c):
info[c].format = '.1f'
elif c.startswith('bic_'):
info[c].format = '.1f'
elif c in ['z02', 'z16', 'z50', 'z84', 'z97']:
info[c].format = '.3f'
elif c[:4] in ['splf', 'sple']:
info[c].format = '.1e'
elif c.startswith('flux_') | c.startswith('err_'):
info[c].format = '.1e'
def query_from_ds9(ds9, radius=5, engine=None, extra_cols=['mag_auto', 'z_map', 'bic_diff', 't_g800l', 't_g102', 't_g141'], extra_query='', table_root='/tmp/ds9_query'):
"""
Make a table by running a query for objects based on a DS9 pan position
"""
from grizli import utils, prep
if engine is None:
engine = get_db_engine(echo=False)
ra, dec = np.cast[float](ds9.get('pan fk5').split())
dd = radius/3600.
dr = dd/np.cos(dec/180*np.pi)
min_cols = ['root', 'id', 'status', 'ra', 'dec']
colstr = ','.join(min_cols + extra_cols)
q = from_sql(f'select {colstr} '
f'from redshift_fit natural join photometry_apcorr '
f'where ra > {ra-dr} AND ra < {ra+dr}'
f' AND dec > {dec-dd} and dec < {dec+dd}' + extra_query,
engine)
tt = utils.GTable()
tt['ra'] = [ra]
tt['dec'] = [dec]
_idx, _dr = tt.match_to_catalog_sky(q)
q['_dr'] = _dr
q['_dr'].format = '.2f'
so = np.argsort(q['_dr'])
make_html_table(sync=None, res=q[so], use_json=False, table_root=table_root, sort_column=('_dr', 1))
comment = [f'{id}' for id in q['id'][so]]
prep.table_to_regions(q[so], table_root+'.reg', comment=comment)
return q[so]
def make_html_table(engine=None, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status >= 5 AND root='j163852p4039'", tables=[], table_root='query', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], sort_column=('bic_diff', -1), fit_table='redshift_fit', verbose=True, get_sql=False, res=None, show_hist=False, extra_formats={}, use_json=True, use_join=False):
"""
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = get_db_engine(echo=False)
if len(tables) > 0:
extra_tables = ','+','.join(tables)
else:
extra_tables = ''
if use_join:
query = "SELECT {0} FROM {1} NATURAL JOIN photometry_apcorr WHERE {2};".format(','.join(columns), fit_table, where)
query = query.replace('WHERE AND', 'AND')
else:
query = "SELECT {0} FROM photometry_apcorr, {3}{1} WHERE phot_root = p_root AND id = p_id {2};".format(','.join(columns), extra_tables, where, fit_table)
if get_sql:
return query
if res is not None:
info = res
else:
res = pd.read_sql_query(query, engine)
info = utils.GTable.from_pandas(res)
if verbose:
print('Query: {0}\n Results N={1}'.format(query, len(res)))
if 'cdf_z' in info.colnames:
info.remove_column('cdf_z')
for c in info.colnames:
if c.startswith('p_'):
try:
info.rename_column(c, c[2:])
except:
pass
all_columns = info.colnames.copy()
if 'idx' not in info.colnames:
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">#{2:05d}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
all_columns.insert(0, 'idx')
all_columns.pop(all_columns.index('id'))
set_column_formats(info, extra=extra_formats)
print('Sort: ', sort_column, sort_column[0] in all_columns)
if sort_column[0] in all_columns:
scol = info[sort_column[0]]
if hasattr(scol, 'mask'):
sdata = scol.filled(fill_value=-np.inf).data
else:
sdata = scol
so = np.argsort(sdata)[::sort_column[1]]
#info = info[so[::sort_column[1]]]
# PNG columns
AWS = 'https://s3.amazonaws.com/grizli-v1/Pipeline'
bucket = ['grizli-cosmos-v2' if r.startswith('cos-') else 'grizli-v1' for r in info['root']]
for ext in png_ext:
if ext == 'thumb':
subdir = 'Thumbnails'
print(ext, subdir)
elif ext == 'rgb':
subdir = 'Thumbnails'
else:
subdir = 'Extractions'
if 'png_{0}'.format(ext) not in info.colnames:
png = ['{0}_{1:05d}.{2}.png'.format(root, id, ext) for root, id in zip(info['root'], info['id'])]
if ext == 'rgb':
js = '<a href={0}/{2}><img src={0}/{1} onmouseover="this.src = this.src.replace(\'rgb.pn\', \'seg.pn\')" onmouseout="this.src = this.src.replace(\'seg.pn\', \'rgb.pn\')" height=200></a>'
paths = ['{0}/{1}/{2}'.format(AWS.replace('grizli-v1', buck),
root, subdir)
for buck, root in zip(bucket, info['root'])]
png_url = [js.format(path, p,
p.replace('.rgb.png', '.thumb.png'))
for path, p in zip(paths, png)]
info['png_{0}'.format('rgb')] = png_url
else:
info['png_{0}'.format(ext)] = ['<a href="{0}/{1}/{2}/{3}"><img src={0}/{1}/{2}/{3} height=200></a>'.format(AWS.replace('grizli-v1', buck), root, subdir, p) for buck, root, p in zip(bucket, info['root'], png)]
all_columns.append('png_{0}'.format(ext))
sortable = []
for c in all_columns:
if not hasattr(info[c][0], 'upper'):
sortable.append(c)
info[all_columns][so].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=use_json)
if show_hist:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[8, 4])
ax = fig.add_subplot(111)
ax.hist(np.log(1+res['z_map']), bins=np.log(1+bins), color='k',
alpha=0.2, label=table_root, normed=False)
clip = res['bic_diff'].values > 30
ax.hist(np.log(1+res['z_map'].values[clip]), bins=np.log(1+bins),
color='r', alpha=0.3, normed=False)
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('z_map')
ax.set_ylabel(r'$N$')
# Label to show line mis-id
dz_wrong = (6563.-5007)/5007
ax.plot(np.arange(5)*dz_wrong, np.ones(5)*ax.get_ylim()[1], marker='.', markerfacecolor='w', markeredgecolor='w', color='r', markersize=10)
ax.set_xlim(0, np.log(1+3.7))
ax.grid()
ax.legend(loc='upper right')
fig.tight_layout(pad=0.1)
fig.text(1-0.02, 0.02, time.ctime(), ha='right', va='bottom', transform=fig.transFigure, fontsize=5)
fig.savefig('{0}_zhist.png'.format(table_root))
if sync:
os.system('aws s3 sync ./ {0} --exclude "*" --include "{1}.html" --include "{1}.json" --include "{1}_zhist.png" --acl public-read'.format(sync, table_root))
return res
def get_exposure_info():
"""
Get exposure information from the MAST databases
"""
import mastquery.query
master = 'grizli-v1-19.12.04'
tab = utils.read_catalog('{0}_visits.fits'.format(master))
all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]
all_files = []
for v in all_visits:
all_files.extend(v['files'])
prog = [f[1:4] for f in all_files]
_res = np.unique(np.array(prog), return_counts=True)
t = utils.GTable()
t['prog'] = _res[0]
t['count'] = _res[1]
so = np.argsort(t['count'])
t = t[so[::-1]]
for pr in t['prog']:
print(pr)
if os.path.exists('{0}_query.fits'.format(pr)):
continue
try:
_q = mastquery.query.run_query(obs_id='[ij]{0}*'.format(pr))
_p = mastquery.query.get_products_table(_q)
except:
continue
_q.write('{0}_query.fits'.format(pr))
_p.write('{0}_prod.fits'.format(pr))
# Send to AWS
from grizli.aws import db
import pandas as pd
from astropy.table import Table
engine = db.get_db_engine()
files = glob.glob('*query.fits')
files.sort()
cols = ['obs_id', 'target', 'ra', 'dec', 't_min', 't_max', 'exptime', 'wavelength_region', 'filter', 'em_min', 'em_max', 'target_classification', 'obs_title', 't_obs_release', 'instrument_name', 'proposal_pi', 'proposal_id', 'proposal_type', 'footprint', 'dataRights', 'mtFlag', 'obsid', 'objID', 'visit']
for i, file in enumerate(files):
print(file)
_q = Table.read(file, character_as_bytes=False)
_q['proposal_id'] = np.cast[np.int16](_q['proposal_id'])
_q['obsid'] = np.cast[np.int64](_q['obsid'])
_q['objID'] = np.cast[np.int64](_q['objID'])
df = _q[cols].to_pandas()
df.to_sql('mast_query', engine, index=False, if_exists='append', method='multi')
files = glob.glob('*_prod.fits')
files.sort()
cols = ['obsid', 'dataset']
for i, file in enumerate(files):
print(i, file)
_p = Table.read(file, character_as_bytes=False)
_p['obsid'] = np.cast[np.int64](_p['obsid'])
_p['dataset'] = [d[:-1] for d in _p['observation_id']]
df = _p[cols].to_pandas()
df.to_sql('mast_products', engine, index=False, if_exists='append', method='multi')
##########
# Exposure log
# Initialize, adding an array column manually for the footprints
v = all_visits[0]
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
# Make table
engine.execute('drop table exposure_log;')
df.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
engine.execute('alter table exposure_log add column footprint float [];')
engine.execute('delete from exposure_log where True;')
SKIP = 1000
for i, v in enumerate(all_visits):
print(i, v['parent'], v['product'])
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
if (i % SKIP) == 0:
df0 = df[:0]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
df['footprint'] = fps
if (i % SKIP) > 0:
df0 = df0.append(df)
if (i % SKIP) == SKIP-1:
print('>>> to DB >>> ({0}, {1})'.format(i, len(df0)))
df0.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
def get_exposures_at_position(ra, dec, engine, dr=10):
cosdec = np.cos(dec/180*np.pi)
res = db.from_sql('select * from exposure_log where (ABS(ra - {0}) < {1}) AND (ABS(dec-{2}) < {3})'.format(ra, dr/cosdec, dec, dr), engine)
return res
def add_irac_table():
from scipy.spatial import ConvexHull
os.chdir('/Users/gbrammer/Research/HST/CHArGE/FieldsSummary')
files = glob.glob('*ipac.fits')
files.sort()
bands = ['IRAC 3.6um', 'IRAC 4.5um', 'IRAC 5.8um', 'IRAC 8.0um', 'MIPS 24um']
bkey = {}
for b in bands:
key = b.replace(' ', '').replace('.', '')[:-2].lower()
bkey[key] = b
N = 0
data = {'field_root': []}
aor_data = {'field_root': [], 'reqkey': []}
for k in bkey:
data['exp_'+k] = []
data['n_'+k] = []
data['fp_'+k] = []
for i, file in enumerate(files):
tab = utils.read_catalog(file)
field = file.split('_ipac')[0]
if 'x' in tab.colnames:
data['field_root'].append(field)
for k in bkey:
data['exp_'+k].append(0)
data['n_'+k].append(0)
data['fp_'+k].append([])
continue
N += len(tab)
print(i, file, N)
data['field_root'].append(field)
for k in bkey:
sel = tab['with_hst'] & (tab['wavelength'] == bkey[k])
data['exp_'+k].append(tab['exposuretime'][sel].sum()/3600)
data['n_'+k].append(sel.sum())
if sel.sum() == 0:
data['fp_'+k].append([])
continue
r, d = [], []
for j in range(4):
r.extend(tab['ra{0}'.format(j+1)][sel].data)
d.extend(tab['dec{0}'.format(j+1)][sel].data)
pts = np.array([r, d]).T
vert = ConvexHull(pts).vertices
fp = pts[vert, :]
data['fp_'+k].append(fp.T.tolist())
aors = np.unique(tab['reqkey'])
aor_data['field_root'].extend([field]*len(aors))
aor_data['reqkey'].extend(list(aors))
#
import pandas as pd
df = pd.DataFrame(aor_data)
df.to_sql('spitzer_aors', engine, index=False, if_exists='append', method='multi')
df = pd.DataFrame(data)
# First row to initialize table
first = df[0:1]
for k in bkey:
first.pop('fp_'+k)
engine.execute('drop table exposure_log;')
first.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
for k in bkey:
cmd = 'alter table spitzer_log add column fp_{0} float [];'.format(k)
engine.execute(cmd)
engine.execute('delete from spitzer_log where True;')
df.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
def show_all_fields():
plt.ioff()
res = pd.read_sql_query("select distinct root from redshift_fit order by root;", engine)
roots = res['root'].tolist()
for root in roots:
print('\n\n', root, '\n\n')
if os.path.exists('{0}_zhist.png'.format(root)):
continue
try:
if False:
res = pd.read_sql_query("select root,id,status from redshift_fit where root = '{0}';".format(root), engine)
res = pd.read_sql_query("select status, count(status) as n from redshift_fit where root = '{0}' group by status;".format(root), engine)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'q_z'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
if False:
grizli_db.set_phot_root(root, phot_root, engine)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND root = '{0}' AND (bic_diff > 20 OR zwidth1/(1+z_map) < 0.01)".format(root), table_root=root, sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line', 'sed'], show_hist=False)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND phot_root = '{0}' AND bic_diff > 20".format(phot_root), table_root=phot_root, sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=False)
except:
continue
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
|
{"hexsha": "aceef6e53447813622c94473a304064efa0e32fe", "size": 105785, "ext": "py", "lang": "Python", "max_stars_repo_path": "grizli/aws/db.py", "max_stars_repo_name": "jkmatharu/grizli", "max_stars_repo_head_hexsha": "7e2eb918667ac9f845d0847452a72138fc22fbcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grizli/aws/db.py", "max_issues_repo_name": "jkmatharu/grizli", "max_issues_repo_head_hexsha": "7e2eb918667ac9f845d0847452a72138fc22fbcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grizli/aws/db.py", "max_forks_repo_name": "jkmatharu/grizli", "max_forks_repo_head_hexsha": "7e2eb918667ac9f845d0847452a72138fc22fbcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4971217105, "max_line_length": 2914, "alphanum_fraction": 0.6076003214, "include": true, "reason": "import numpy,from scipy,import astropy,from astropy", "num_tokens": 32814}
|
import logging
from typing import Any, Callable, Collection, Dict, Optional, Sequence, Tuple, Union
import numpy
import skimage.transform
import skimage.transform
import torch
from hylfm.utils.for_log import DuplicateLogFilter
from .affine_utils import get_lf_roi_in_raw_lf, get_ls_roi
from .base import Transform
from ..hylfm_types import Array
logger = logging.getLogger(__name__)
class Crop(Transform):
def __init__(
self,
*,
crop: Optional[Tuple[Tuple[Optional[int], Optional[int]], ...]] = None,
crop_fn: Optional[Callable[[Tuple[int, ...]], Tuple[Tuple[int, int], ...]]] = None,
apply_to: Union[str, Dict[str, str]],
):
super().__init__(apply_to=apply_to)
if (crop is not None and crop_fn is not None) or (crop is None and crop_fn is None):
raise ValueError("exclusive arguments: `crop` and `crop_fn`")
elif crop_fn is None:
# assert all(len(c) == 2 for c in crop)
self.crop_fn = None
self.crop = crop
else:
self.crop_fn = crop_fn
self.crop = None
def apply_to_sample(self, tensor: Sequence) -> Union[numpy.ndarray, torch.Tensor]:
if not isinstance(tensor, (numpy.ndarray, torch.Tensor)):
raise TypeError(type(tensor))
crop = self.crop if self.crop_fn is None else self.crop_fn(tensor.shape)
assert len(tensor.shape) == len(crop), (tensor.shape, crop)
return tensor[tuple(slice(lower, upper) for lower, upper in crop)]
class RandomlyFlipAxis(Transform):
randomly_changes_shape = True
def __init__(self, axis: int, **super_kwargs):
super().__init__(**super_kwargs)
self.axis = axis
def apply_to_sample(self, **sample_tensors: Union[numpy.ndarray, torch.Tensor]) -> Dict[str, Any]:
if numpy.random.uniform() < 0.5:
for key in sample_tensors:
if isinstance(sample_tensors[key], numpy.ndarray):
sample_tensors[key] = numpy.flip(sample_tensors[key], axis=self.axis)
elif isinstance(sample_tensors[key], torch.Tensor):
sample_tensors[key] = sample_tensors[key].flip([self.axis])
else:
raise NotImplementedError
return sample_tensors
class RandomIntensityScale(Transform):
def __init__(self, factor_min: float, factor_max: float, independent: bool, **super_kwargs):
super().__init__(**super_kwargs)
self.factor_min = factor_min
self.factor_max = factor_max
self.independent = independent
def _get_factor(self):
return numpy.random.uniform(low=self.factor_min, high=self.factor_max)
def apply_to_sample(self, **sample_tensors: Array) -> Dict[str, Array]:
factor = self._get_factor()
for key, sample in sample_tensors.items():
sample_tensors[key] = sample * factor
if self.independent:
factor = self._get_factor()
return sample_tensors
class RandomRotate90(Transform):
randomly_changes_shape = True
def __init__(self, axes: Tuple[int, int] = (-2, -1), **super_kwargs):
super().__init__(**super_kwargs)
self.axes = [sa if sa < 0 else sa + 1 for sa in axes] # add batch dim to axes
def apply_to_batch(self, **batch: Array) -> Dict[str, Sequence]:
k = numpy.random.randint(4)
for key, tensor in batch.items():
if isinstance(tensor, numpy.ndarray):
batch[key] = numpy.rot90(tensor, k=k, axes=self.axes)
else:
raise NotImplementedError(type(tensor))
return batch
class Resize(Transform):
def __init__(self, shape: Sequence[Union[int, float]], order: int, apply_to: str):
assert isinstance(apply_to, str)
super().__init__(apply_to=apply_to)
self.shape = shape
assert 0 <= order <= 5, order
self.order = order
self.log_filter = DuplicateLogFilter()
def apply_to_sample(self, tensor):
assert len(tensor.shape) == len(self.shape), (tensor.shape, self.shape)
out_shape_float = [
sin if sout is None else sout * sin if isinstance(sout, float) else sout
for sin, sout in zip(tensor.shape, self.shape)
]
out_shape = [round(s) for s in out_shape_float]
if out_shape_float != out_shape:
logger = logging.Logger(self.__class__.__name__)
logger.addFilter(self.log_filter)
logger.warning(
"Resize tensor (orig. size: %s) to rounded %s = %s", tensor.shape, out_shape_float, out_shape
)
# logger.debug("Resize tensor: %s by %s to %s", tensor.shape, self.shape, out_shape)
out = skimage.transform.resize(tensor, out_shape, order=self.order, preserve_range=True)
return out
class SelectRoi(Transform):
def __init__(self, roi: Sequence[Union[int, None, slice]], apply_to: str):
assert isinstance(apply_to, str)
super().__init__(apply_to=apply_to)
self.roi = tuple(self._slice_descr_to_slice(r) for r in roi)
@staticmethod
def _slice_descr_to_slice(slice_descr: Union[int, None, str]):
if isinstance(slice_descr, slice):
return slice_descr
elif slice_descr is None:
return slice(None)
elif isinstance(slice_descr, int):
return slice_descr
else:
raise NotImplementedError(slice_descr)
def apply_to_sample(self, tensor):
return tensor[self.roi]
class Transpose(Transform):
def __init__(self, axes: Sequence[int], apply_to: str):
assert isinstance(apply_to, str)
super().__init__(apply_to=apply_to)
self.axes = axes
def apply_to_sample(self, tensor):
if isinstance(tensor, numpy.ndarray):
return tensor.transpose(self.axes)
else:
raise NotImplementedError(type(tensor))
class CropLSforDynamicTraining(Transform):
def __init__(self, apply_to: str, crop_names: Collection[str], nnum: int, scale: int, z_ls_rescaled: int):
assert isinstance(apply_to, str)
super().__init__(
input_mapping={apply_to: "tensor", "crop_name": "crop_name"}, output_mapping={"tensor": apply_to}
)
self.crops = {}
for crop_name in crop_names:
ls_roi = get_ls_roi(
crop_name,
nnum=nnum,
for_slice="slice" in apply_to,
wrt_ref=False,
z_ls_rescaled=z_ls_rescaled,
ls_scale=scale,
)
ls_roi = ((0, None),) + ls_roi # add channel dim
self.crops[crop_name] = Crop(apply_to=apply_to, crop=ls_roi)
def apply_to_sample(self, tensor: Any, crop_name: str) -> Union[numpy.ndarray, torch.Tensor]:
return self.crops[crop_name].apply_to_sample(tensor=tensor)
class CropWhatShrinkDoesNot(Transform):
def __init__(self, apply_to: str, crop_names: Collection[str], nnum: int, scale: int, shrink: int, wrt_ref: bool):
assert isinstance(apply_to, str)
super().__init__(
input_mapping={apply_to: "tensor", "crop_name": "crop_name"}, output_mapping={"tensor": apply_to}
)
self.crops = {}
for crop_name in crop_names:
roi = get_lf_roi_in_raw_lf(crop_name, nnum=nnum, shrink=shrink, scale=scale, wrt_ref=wrt_ref)
if apply_to != "lf":
roi = ((0, None),) + roi # add z dim
roi = ((0, None),) + roi # add channel dim
self.crops[crop_name] = Crop(apply_to=apply_to, crop=roi)
def apply_to_sample(self, tensor: Array, crop_name: str) -> Union[numpy.ndarray, torch.Tensor]:
return self.crops[crop_name].apply_to_sample(tensor=tensor)
class Pad(Transform):
def __init__(self, pad_width: Sequence[Sequence[int]], pad_mode: str, nnum: Optional[int] = None, **super_kwargs):
super().__init__(**super_kwargs)
if any([len(p) != 2 for p in pad_width]) or any([pw < 0 for p in pad_width for pw in p]):
raise ValueError(f"invalid pad_width sequence: {pad_width}")
if pad_mode == "lenslets":
if nnum is None:
raise ValueError("nnum required to pad lenslets")
else:
raise NotImplementedError(pad_mode)
self.pad_width = pad_width
self.pad_mode = pad_mode
self.nnum = nnum
def apply_to_sample(self, tensor: Any) -> Union[numpy.ndarray, torch.Tensor]:
assert len(tensor.shape) - 1 == len(self.pad_width)
if isinstance(tensor, numpy.ndarray):
if self.pad_mode == "lenslets":
for i, (pw0, pw1) in enumerate(self.pad_width):
if pw0:
border_lenslets = tensor[(slice(None),) * (i + 1) + (slice(0, pw0 * self.nnum),)]
tensor = numpy.concatenate([border_lenslets, tensor], axis=i + 1)
if pw1:
border_lenslets = tensor[(slice(None),) * (i + 1) + (slice(-pw1 * self.nnum, None),)]
tensor = numpy.concatenate([tensor, border_lenslets], axis=i + 1)
return tensor
else:
raise NotImplementedError(self.pad_mode)
# return numpy.pad(tensor, pad_width=)
else:
NotImplementedError(type(tensor))
class FlipAxis(Transform):
def __init__(self, axis: int, **super_kwargs):
super().__init__(**super_kwargs)
assert axis != 0, "You are not supposed to flip the batch dimension!"
self.axis = axis
def apply_to_batch(self, tensor: Union[numpy.ndarray, torch.Tensor]) -> Union[numpy.ndarray, torch.Tensor]:
if isinstance(tensor, numpy.ndarray):
return numpy.flip(tensor, axis=self.axis)
elif isinstance(tensor, torch.Tensor):
return tensor.flip([self.axis])
else:
raise NotImplementedError
# for debugging purposes:
class SetPixelValue(Transform):
def __init__(self, value: float, **super_kwargs):
super().__init__(**super_kwargs)
self.value = value
def apply_to_sample(self, tensor: Any) -> Union[numpy.ndarray, torch.Tensor]:
tensor[...] = self.value
return tensor
|
{"hexsha": "07531758331cb9e879c4c02239b59a1c6732c1d1", "size": 10346, "ext": "py", "lang": "Python", "max_stars_repo_path": "hylfm/transforms/image.py", "max_stars_repo_name": "kreshuklab/hylfm-net", "max_stars_repo_head_hexsha": "9f1013640e40e998674b65176023367b1e978782", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-13T05:46:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T06:12:04.000Z", "max_issues_repo_path": "hylfm/transforms/image.py", "max_issues_repo_name": "kreshuklab/hylfm-net", "max_issues_repo_head_hexsha": "9f1013640e40e998674b65176023367b1e978782", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-13T08:29:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T16:45:19.000Z", "max_forks_repo_path": "hylfm/transforms/image.py", "max_forks_repo_name": "kreshuklab/hylfm-net", "max_forks_repo_head_hexsha": "9f1013640e40e998674b65176023367b1e978782", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:02:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-12T06:51:33.000Z", "avg_line_length": 38.1771217712, "max_line_length": 118, "alphanum_fraction": 0.6196597719, "include": true, "reason": "import numpy", "num_tokens": 2358}
|
module EMLstoic
using DanaTypes
using DotPlusInheritance
using Reexport
@reexport using ...reactors.EMLtank_basic
import EMLtypes.length
include("stoic/stoic_vap.jl")
include("stoic/stoic_liq.jl")
include("stoic/stoic_extent_vap.jl")
include("stoic/stoic_extent_liq.jl")
include("stoic/stoic_conv_vap.jl")
include("stoic/stoic_conv_liq.jl")
end
|
{"hexsha": "bf2371eba0499bf5350be90fe8b35d59463d8dd9", "size": 355, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "JuliaEMSOModels/reactors/stoic.jl", "max_stars_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_stars_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-18T02:32:44.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-18T02:32:44.000Z", "max_issues_repo_path": "JuliaEMSOModels/reactors/stoic.jl", "max_issues_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_issues_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-01-21T16:35:07.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-21T16:35:07.000Z", "max_forks_repo_path": "JuliaEMSOModels/reactors/stoic.jl", "max_forks_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_forks_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3076923077, "max_line_length": 42, "alphanum_fraction": 0.7971830986, "num_tokens": 116}
|
from __future__ import annotations
import os
from typing import Optional, Union
import tensorflow as tf
from numpy import random
from GNN import GNN_metrics as mt, GNN_utils as utils
from GNN.GNN import GNNnodeBased, GNNedgeBased, GNNgraphBased
from GNN.LGNN import LGNN
from GNN.MLP import MLP, get_inout_dims
from GNN.graph_class import GraphObject
#######################################################################################################################
# SCRIPT OPTIONS - modify the parameters to adapt the execution to the problem under consideration ####################
#######################################################################################################################
# MUTAG option - if True, gnn/lgnn is trained on a real-world dataset MUTAG
# problem is set automatically to graph classification -> addressed_problem='c', problem_based='g'
use_MUTAG: bool = True
# GENERIC GRAPH PARAMETERS. See utils.randomGraph for details
# Node and edge labels are initialized randomly. Target clusters are given by sklearn.
# Each graph has at least <min_nodes_number> nodes and at most <max_nodes_number> nodes
# Possible <aggregation_mode> for matrix ArcNoe belonging to graphs in ['average', 'normalized', 'sum']
# problem_based in ['n', 'a','g'] -> ['c' classification, 'r' regression]
# addressed_problem in ['c', 'r'] -> ['g' graph-based; 'n' node-based; 'a' arc-based;]
problem_based : str = 'n'
addressed_problem : str = 'c'
graphs_number : int = 100
min_nodes_number : int = 15
max_nodes_number : int = 40
dim_node_label : int = 3
dim_arc_label : int = 1
dim_target : int = 2
density : float = 0.7
aggregation_mode : str = 'average'
# LEARNING SETS PARAMETERS
perc_Train : float = 0.7
perc_Valid : float = 0.2
batch_size : int = 32
normalize : bool = True
seed : Optional[int] = None
norm_nodes_range : Optional[tuple[Union[int, float], Union[int, float]]] = None # (-1,1) # other possible value
norm_arcs_range : Optional[tuple[Union[int, float], Union[int, float]]] = None # (0,1) # other possible value
# NET STATE PARAMETERS
activations_net_state : str = 'selu'
kernel_init_net_state : str = 'lecun_normal'
bias_init_net_state : str = 'lecun_normal'
kernel_reg_net_state : str = None
bias_reg_net_state : str = None
dropout_rate_st : float = 0.1
dropout_pos_st : Union[list[int], int] = 0
hidden_units_net_state : Optional[Union[list[int], int]] = None
### NET OUTPUT PARAMETERS
activations_net_output : str = 'softmax'
kernel_init_net_output : str = 'glorot_normal'
bias_init_net_output : str = 'glorot_normal'
kernel_reg_net_output : str = None
bias_reg_net_output : str = None
dropout_rate_out : float = 0.1
dropout_pos_out : Union[list[int], int] = 0
hidden_units_net_output : Optional[Union[list[int], int]] = None
# GNN PARAMETERS
dim_state : int = 0
max_iter : int = 5
state_threshold : float = 0.01
# LGNN PARAMETERS
layers : int = 5
get_state : bool = False#True
get_output : bool = True
path_writer : str = 'writer/'
optimizer : tf.keras.optimizers = tf.optimizers.Adam(learning_rate=0.001)
lossF : tf.function = tf.keras.losses.categorical_crossentropy
lossArguments : Optional[dict[str, callable]] = {'from_logits': False}
extra_metrics : Optional[dict[str, callable]] = {i: mt.Metrics[i] for i in
['Acc', 'Bacc', 'Tpr', 'Tnr', 'Fpr', 'Fnr', 'Ck', 'Js', 'Prec', 'Rec', 'Fs']}
metrics_args : Optional[dict[str, dict[str, any]]] = {i: {'average': 'weighted', 'zero_division': 0} for i in ['Fs', 'Prec', 'Rec', 'Js']}
#######################################################################################################################
# SCRIPT ##############################################################################################################
#######################################################################################################################
### LOAD DATASET
if use_MUTAG:
# from MUTAG
addressed_problem = 'c'
problem_based = 'g'
from load_MUTAG import graphs
else:
# random graphs
graphs = [utils.randomGraph(nodes_number=int(random.choice(range(min_nodes_number, max_nodes_number))),
dim_node_label=dim_node_label,
dim_arc_label=dim_arc_label,
dim_target=dim_target,
density=density,
normalize_features=False,
aggregation_mode=aggregation_mode,
problem_based=problem_based)
for i in range(graphs_number)]
### PREPROCESSING
# SPLITTING DATASET in Train, Validation and Test set
iTr, iTe, iVa = utils.getindices(len(graphs), perc_Train, perc_Valid, seed=seed)
gTr = [graphs[i] for i in iTr]
gTe = [graphs[i] for i in iTe]
gVa = [graphs[i] for i in iVa]
# BATCHES - gTr is list of GraphObject; gVa and gTe are GraphObjects + use gTr[0] for taking useful dimensions
gTr = utils.getbatches(gTr, batch_size=batch_size, problem_based=problem_based, aggregation_mode=aggregation_mode)
gVa = GraphObject.merge(gVa, problem_based=problem_based, aggregation_mode=aggregation_mode)
gTe = GraphObject.merge(gTe, problem_based=problem_based, aggregation_mode=aggregation_mode)
gGen = gTr[0].copy()
# GRAPHS NORMALIZATION, based on training graphs
if normalize:
utils.normalize_graphs(gTr, gVa, gTe,
based_on='gTr',
norm_rangeN=norm_nodes_range,
norm_rangeA=norm_arcs_range)
### MODELS
# NETS - STATE
input_net_st, layers_net_st = zip(*[get_inout_dims(net_name='state', dim_node_label=gGen.DIM_NODE_LABEL,
dim_arc_label=gGen.DIM_ARC_LABEL, dim_target=gGen.DIM_TARGET,
problem_based=problem_based, dim_state=dim_state,
hidden_units=hidden_units_net_state,
layer=i, get_state=get_state, get_output=get_output) for i in range(layers)])
nets_St = [MLP(input_dim=i, layers=j,
activations=activations_net_state,
kernel_initializer=kernel_init_net_state,
bias_initializer=bias_init_net_state,
kernel_regularizer=kernel_reg_net_state,
bias_regularizer=bias_reg_net_state,
dropout_rate=dropout_rate_st,
dropout_pos=dropout_pos_st) for i, j in zip(input_net_st, layers_net_st)]
# NETS - OUTPUT
input_net_out, layers_net_out = zip(*[get_inout_dims(net_name='output', dim_node_label=gGen.DIM_NODE_LABEL,
dim_arc_label=gGen.DIM_ARC_LABEL, dim_target=gGen.DIM_TARGET,
problem_based=problem_based, dim_state=dim_state,
hidden_units=hidden_units_net_output,
layer=i, get_state=get_state, get_output=get_output) for i in range(layers)])
nets_Out = [MLP(input_dim=i, layers=j,
activations=activations_net_output,
kernel_initializer=kernel_init_net_output,
bias_initializer=bias_init_net_output,
kernel_regularizer=kernel_reg_net_output,
bias_regularizer=bias_reg_net_output,
dropout_rate=dropout_rate_out,
dropout_pos=dropout_pos_out) for i, j in zip(input_net_out, layers_net_out)]
# GNNs
gnntype = {'n': GNNnodeBased, 'a': GNNedgeBased, 'g': GNNgraphBased}[problem_based]
# noinspection PyTypeChecker
gnns = [gnntype(net_state=st,
net_output=out,
optimizer=optimizer.__class__(**optimizer.get_config()),
loss_function=lossF,
loss_arguments=lossArguments,
state_vect_dim=dim_state,
max_iteration=max_iter,
threshold=state_threshold,
addressed_problem=addressed_problem,
extra_metrics=extra_metrics,
extra_metrics_arguments=metrics_args,
path_writer=f'{path_writer}/GNN{idx}') for idx, st, out in zip(range(layers), nets_St, nets_Out)]
# SINGLE GNN
gnn = gnns[0].copy(path_writer=f'{path_writer}GNN_single', copy_weights=True)
# LGNN
lgnn = LGNN(gnns=gnns,
get_state=get_state,
get_output=get_output,
optimizer=optimizer,
loss_function=lossF,
loss_arguments=lossArguments,
addressed_problem=addressed_problem,
extra_metrics=extra_metrics,
extra_metrics_arguments=metrics_args,
path_writer=f'{path_writer}LGNN',
namespace='LGNN')
|
{"hexsha": "5a386c93e1d9ca1e44f62a1f5ac5e27d4d5e72d8", "size": 9273, "ext": "py", "lang": "Python", "max_stars_repo_path": "starter.py", "max_stars_repo_name": "vishalbelsare/GNN_tf_2.x", "max_stars_repo_head_hexsha": "4b6429ed58f2c0922257600a9287d5cc5a10395b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-09T08:45:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T12:00:18.000Z", "max_issues_repo_path": "starter.py", "max_issues_repo_name": "vishalbelsare/GNN_tf_2.x", "max_issues_repo_head_hexsha": "4b6429ed58f2c0922257600a9287d5cc5a10395b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "starter.py", "max_forks_repo_name": "vishalbelsare/GNN_tf_2.x", "max_forks_repo_head_hexsha": "4b6429ed58f2c0922257600a9287d5cc5a10395b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-23T09:57:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-24T05:37:13.000Z", "avg_line_length": 47.5538461538, "max_line_length": 142, "alphanum_fraction": 0.5819044538, "include": true, "reason": "from numpy", "num_tokens": 2029}
|
#!/usr/bin/env python
import rospy
import tf
from auv_msgs.msg import NavSts
from uw_vs.msg import PilotRequest
from geometry_msgs.msg import Pose, TwistStamped
import numpy as np
global TOPIC_NAV # get the vehicle simulated pose
global TOPIC_POSE # publishes simulated pose on UWSim
global TOPIC_CMD # get the controller command
global TOPIC_PILOT # publishes the command to the pilot
global sub_nav # subscribes to simulator to get navigation data
global pub_pose # publishes the pose on UWSim (on simulation)
global sub_cmd # subscribes to controller to get the command
global pub_pilot # publishes the command to the vehicle pilot (on simulation)
global pose # vehicle pose [x,y,z,r,p,q]
def callbackCmd(data):
# get command and publishes it to simulator
pr = PilotRequest()
pr.header.stamp = rospy.Time.now()
pr.velocity = np.array([data.twist.linear.x,
data.twist.linear.y,
data.twist.linear.z,
data.twist.angular.x,
data.twist.angular.y,
data.twist.angular.z])
pub_pilot.publish(pr)
def callbackNav(data):
# get simulated pose and publishes it into UWSim
pose.position.x = data.position.north
pose.position.y = data.position.east
pose.position.z = data.position.depth
quaternion = tf.transformations.quaternion_from_euler(data.orientation.roll, data.orientation.pitch, data.orientation.yaw)
pose.orientation.x = quaternion[0]
pose.orientation.y = quaternion[1]
pose.orientation.z = quaternion[2]
pose.orientation.w = quaternion[3]
pub_pose.publish(pose)
def repeater():
rospy.init_node('dyn_interface', anonymous=True)
rate = rospy.Rate(10) # 10hz
rospy.spin()
if __name__ == '__main__':
rospy.loginfo("main")
TOPIC_POSE = '/nessie/pose'
TOPIC_NAV = '/nav/nav_sts'
TOPIC_CMD = '/cmd/twist'
TOPIC_PILOT = '/pilot/velocity_req'
pose = Pose()
sub_nav = rospy.Subscriber(TOPIC_NAV, NavSts, callbackNav)
pub_pose = rospy.Publisher(TOPIC_POSE, Pose, queue_size=10)
sub_cmd = rospy.Subscriber(TOPIC_CMD, TwistStamped, callbackCmd)
pub_pilot = rospy.Publisher(TOPIC_PILOT, PilotRequest, queue_size=10)
repeater()
|
{"hexsha": "93a56dbdb95fad0f7aed04864c4999658668f3ce", "size": 2083, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dyn_interface.py", "max_stars_repo_name": "LaboratoireCosmerTOULON/uwvs_osl", "max_stars_repo_head_hexsha": "c3d790c451d13bebc1265b5d6011655ef660232e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dyn_interface.py", "max_issues_repo_name": "LaboratoireCosmerTOULON/uwvs_osl", "max_issues_repo_head_hexsha": "c3d790c451d13bebc1265b5d6011655ef660232e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dyn_interface.py", "max_forks_repo_name": "LaboratoireCosmerTOULON/uwvs_osl", "max_forks_repo_head_hexsha": "c3d790c451d13bebc1265b5d6011655ef660232e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1475409836, "max_line_length": 123, "alphanum_fraction": 0.7609217475, "include": true, "reason": "import numpy", "num_tokens": 540}
|
import pandas as pd
import numpy as np
import click
import h5py
import os
import logging
from array import array
from copy import deepcopy
from tqdm import tqdm
from astropy.io import fits
from fact.credentials import create_factdb_engine
from zfits import FactFits
from scipy.optimize import curve_fit
from joblib import Parallel, delayed
import drs4Calibration.config as config
from drs4Calibration.constants import NRCHID, NRCELL, NRTEMPSENSOR, ROI, ADCCOUNTSTOMILIVOLT
from drs4Calibration.tools import safety_stuff
import matplotlib.pyplot as plt
from time import time
def print_delta_time(time, string=""):
hours = int(time / 3600)
rest = time % 3600
minutes = int(rest / 60)
seconds = round(rest % 60, 2)
print(string+" deltaTime: ", hours, minutes, seconds)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFitsFiles.txt",
type=click.Path(exists=False))
def search_drs_fits_files(drs_file_list_doc_path: str):
'''
Search through the fact-database and store the path of all drsFiles
under the given storePath
Args:
drs_file_list_doc_path (str):
Full path to the storeFile
with the extension '.txt'
'''
# TODO check safety stuff. maybe remove
#safety_stuff(drs_file_list_doc_path)
def filename(row):
return os.path.join(
str(row.date.year),
"{:02d}".format(row.date.month),
"{:02d}".format(row.date.day),
"{}_{:03d}.fits.fz".format(row.fNight, row.fRunID),
)
# 40drs4320Bias
drs_infos = pd.read_sql(
"RunInfo",
create_factdb_engine(),
columns=[
"fNight", "fRunID",
"fRunTypeKey", "fDrsStep",
"fNumEvents"])
drs_file_infos = drs_infos.query("fRunTypeKey == 2 &" +
"fDrsStep == 2 &" +
"fNumEvents == 1000").copy()
# fNumEvents == 1000 prevent for unfinished/broken files
drs_file_infos["date"] = pd.to_datetime(drs_file_infos.fNight.astype(str),
format="%Y%m%d")
drs_files = drs_file_infos.apply(filename, axis=1).tolist()
pd.DataFrame(drs_files).to_csv(drs_file_list_doc_path, index=False,
header=False)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/selectedDrsFitsFiles.txt",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/newBaseline_timeTest.h5",
type=click.Path(exists=False))
@click.argument('source_folder_path',
default="/net/big-tank/POOL/projects/fact/drs4_calibration_data/",
type=click.Path(exists=False))
def store_drs_values(drs_file_list_doc_path, store_file_path, source_folder_path):
with h5py.File(store_file_path, 'w') as hf:
hf.create_dataset(
name="Time", dtype="float32",
shape=(0, 1), maxshape=(None, 1),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="Temperature", dtype="float32",
shape=(0, NRTEMPSENSOR), maxshape=(None, NRTEMPSENSOR),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="NewBaseline", dtype="float32",
shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
compression="gzip", compression_opts=9,
fletcher32=True)
class SourceDataSet:
# @resettable
run_begin = pd.to_datetime("")
run_end = pd.to_datetime("")
def __init__(self):
type(self).run_begin = pd.to_datetime("")
type(self).run_end = pd.to_datetime("")
source_data_set = SourceDataSet()
drs_file_list = open(drs_file_list_doc_path).read().splitlines()
for drs_fits_file_path in tqdm(drs_file_list):
drs_fits_file_path = drs_file_list[700] # care!!
date_path_part = drs_fits_file_path.split('_')[0]
drs_fits_file_path = (source_folder_path+"raw/" +
drs_fits_file_path.strip("\n"))
drs_file_path = (drs_fits_file_path.strip("fits.fz") +
".drs.fits.gz")
temp_file_path = (source_folder_path+"aux/" +
date_path_part+".FAD_CONTROL_TEMPERATURE.fits")
if(os.path.isfile(drs_fits_file_path) and os.path.isfile(temp_file_path)):
time_marker1 = time()
with fits.open(drs_file_path,
ignoremissing=True,
ignore_missing_end=True) as drs_table:
source_data_set.run_begin = pd.to_datetime(drs_table[1].header["RUN2-BEG"])
source_data_set.run_end = pd.to_datetime(drs_table[1].header["RUN2-END"])
print(type(source_data_set.run_begin), type(source_data_set.run_end))
time_marker2 = time()
print_delta_time(time_marker2 - time_marker1, "open drs_file_path")
time_marker3 = time()
with fits.open(temp_file_path,
mmap=True,
mode='denywrite',
ignoremissing=True,
ignore_missing_end=True) as table:
table_time = table[1].data["Time"]
table_temperature = table[1].data["temp"]
time_marker4 = time()
print_delta_time(time_marker4 - time_marker3, "open temp_file_path")
print(type(table_time), table_time.shape, type(table_temperature), table_temperature.shape)
time_marker5 = time()
if table_temperature.shape[1] != NRTEMPSENSOR:
temp_filename = temp_file_path.split('/')[-1]
message = (
" File not used: Just "+str(table_temperature.shape[1]) +
" Temperature Values in File '"+temp_filename+"'")
raise Exception(message)
table_datetime = pd.to_datetime(table_time * 24 * 3600 * 1e9)
data_len = len(table_datetime)
lower_mask = np.where(table_datetime > source_data_set.run_begin)[0]
upper_mask = np.where(table_datetime < source_data_set.run_end)[0]
mask = []
if(len(lower_mask) is not 0 and
len(upper_mask) is not 0):
lower_boundarie_idx = lower_mask[0]
upper_boundarie_idx = upper_mask[-1]
if(lower_boundarie_idx > 0):
lower_boundarie_idx = lower_boundarie_idx - 1
if(upper_boundarie_idx < data_len):
upper_boundarie_idx = upper_boundarie_idx + 1
mask = np.arange(lower_boundarie_idx, upper_boundarie_idx+1, 1, dtype="int")
if len(mask) == 0:
message = ("Cant use drs file," +
" runs out of range of temperature data taking")
raise Exception(message)
timestamps_during_run = np.array(table_time[mask])
temperature_during_run = np.array(table_temperature[mask])
if timestamps_during_run.shape[0] > 1:
time_mean = np.mean(timestamps_during_run, dtype="float32")
else:
time_mean = timestamps_during_run
if temperature_during_run.shape[0] > 1:
temp_mean = np.mean(temperature_during_run, dtype="float32",
axis=0)
else:
temp_mean = temperature_during_run
time_marker6 = time()
print_delta_time(time_marker6 - time_marker5, "calc temp/time")
print_delta_time(time_marker6 - time_marker1, "complete")
time_marker7 = time()
fits_stream = FactFits(drs_fits_file_path)
time_marker8 = time()
print_delta_time(time_marker8 - time_marker7, "load fits_stream")
cell_sample_value_mean_default = array("f", [np.NaN] * (NRCELL*ROI))
chid_cell_sample_value_mean_default = array("f", [np.NaN] * (NRCHID*NRCELL*ROI))
chid_cell_sample_value_mean = deepcopy(chid_cell_sample_value_mean_default)
for chid in tqdm(range(NRCHID)):
#time_marker9 = time()
cell_sample_values = [x[:] for x in [[]] * (1024*300)]
#time_marker10 = time()
#print_delta_time(time_marker10 - time_marker9, "init cell_sample_values")
fits_stream = FactFits(drs_fits_file_path)
for event in tqdm(fits_stream):
start_cell = event["StartCellData"][chid]
data = event["Data"]
for sample in range(ROI):
cell = (start_cell + sample) % NRCELL
value = data[chid][sample]
cell_sample_values[cell*ROI+sample].append(value)
#print(type(event["Data"]), event["Data"].shape)
# print(cell_sample_values[5*300+150])
# print(cell_sample_values[15*300+150])
# print(cell_sample_values[100*300+150])
cell_sample_value_mean = deepcopy(cell_sample_value_mean_default)
for index in tqdm(range(len(cell_sample_values))):
#print(type(cell_sample_values[index]), cell_sample_values[index])
values = cell_sample_values[index]
if(len(values) == 1):
cell_sample_value_mean[index] = values[0]
elif (len(values) > 1):
cell_sample_value_mean[index] = np.mean(values)
chid_cell_sample_value_mean[chid*NRCELL*ROI:(chid+1)*NRCELL*ROI] = cell_sample_value_mean
#print(cell_sample_value_mean)
return
#fits_stream.close()
with h5py.File(store_file_path) as h5pyTable:
add_value_to_h5py_table(
h5pyTable,
"Time",
time_mean)
add_value_to_h5py_table(
h5pyTable,
"Temperature",
temp_mean)
add_value_to_h5py_table(
h5pyTable,
"NewBaseline",
chid_cell_sample_value_mean)
else:
drs_filename = drs_fits_file_path.split('/')[-1]
temp_filename = temp_file_path.split('/')[-1]
print(" Pair of drs file '"+drs_filename+"'" +
" and temp file '"+temp_filename+"' does not exist")
def add_value_to_h5py_table(h5pyTable, columnName, value):
data = h5pyTable[columnName]
data.resize((len(data)+1, data.maxshape[1]))
data[len(data)-1, :] = value
@click.command()
@click.argument('path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/newBaseline.h5",
type=click.Path(exists=False))
def plot(path):
chid = 0
cell = 0
sample = 10
with h5py.File(path) as h5pyTable:
time = h5pyTable["Time"][:]
temp = h5pyTable["Temperature"][:, int(chid/9)]
value = h5pyTable["NewBaseline"][:, (chid*NRCELL+cell)*ROI+sample]
print(type(time), len(time))
print(type(temp), len(temp))
print(type(value), len(value))
mask = np.where(value == value)
temp = temp[mask]
value = value[mask]*ADCCOUNTSTOMILIVOLT
temp_matrix = np.vstack([temp, np.ones(len(temp))]).T
slope, offset = np.linalg.lstsq(temp_matrix, value)[0]
plt.plot(temp, value, ".")
temp_range = np.linspace(10, 40, 10000)
y_fit = slope*temp_range+offset
plt.plot(temp_range, y_fit)
print(slope, offset)
print(y_fit)
#fitPlot, = plt.plot(temp_range, fit-single_photon_limit, "--", color=color_mean)
#fitPlot, = plt.plot(temp_range, fit+single_photon_limit, "--", color=color_mean)
plt.title("new Baseline \nChid: "+str(chid)+", Cell: "+str(cell)+", Sample: "+str(sample), fontsize=15, y=1.00) # , fontsize=20, y=0.95
plt.xlabel(r'Temperature /$\mathrm{^\circ C}$')
plt.ylabel("Baseline"+r' /$\mathrm{mV}$')
plt.xlim(min(temp)-1, max(temp)+1)
plt.grid()
plt.gca().ticklabel_format(useOffset=False)
plt.savefig("test.jpg")
plt.show()
plt.close()
# def value(temp, chid, cell, sample):
# ADCCOUNTSTOMILIVOLT = 2000.0 / 4096.0
# NRCELL = 1024
# ROI = 300
# f = fits.open("drsFitParameter.fits")
# bs = f[1].data["BaselineSlope"][0][chid*NRCELL+cell]
# bo = f[1].data["BaselineOffset"][0][chid*NRCELL+cell]
# ts = f[1].data["TriggerOffsetSlope"][0][chid*ROI+sample]
# to = f[1].data["TriggerOffsetOffset"][0][chid*ROI+sample]
# oldValue = bs*temp+bo+ts*temp+to
# h5pyTable = h5py.File("newBaseline.h5")
# time = h5pyTable["Time"][:]
# temperature = h5pyTable["Temperature"][:, int(chid/9)]
# value = h5pyTable["NewBaseline"][:, (chid*NRCELL+cell)*ROI+sample]
# mask = np.where(value == value)
# temperature = temperature[mask]
# value = value[mask]*ADCCOUNTSTOMILIVOLT
# temp_matrix = np.vstack([temperature, np.ones(len(temperature))]).T
# slope, offset = np.linalg.lstsq(temp_matrix, value)[0]
# newValue = slope*temp+offset
# return [oldValue, newValue, oldValue-newValue]
for cell in range(10):
print("cell", cell)
delta = []
for sample in range(300):
delta.append(value(20, 0, cell, sample)[2])
plt.plot(np.arange(300), delta)
plt.xlabel(r'Sample')
plt.ylabel("Delta Baseline"+r' /$\mathrm{mV}$')
plt.grid()
plt.savefig("deltaBaseline_temp20_chid0_cell"+str(cell)+".jpg")
plt.show()
plt.close()
# @click.command()
# @click.argument('source_file_path',
# default="/net/big-tank/POOL/" +
# "projects/fact/drs4_calibration_data/" +
# "calibration/calculation/time/temp/timeCalibrationData20160817_017.fits",
# type=click.Path(exists=True))
# @click.argument('store_file_path',
# default="/net/big-tank/POOL/" +
# "projects/fact/drs4_calibration_data/" +
# "calibration/calculation/time/temp/timeCalibrationData.h5",
# type=click.Path(exists=False))
# def store_new(source_file_path, store_file_path):
# # TODO check safety stuff. maybe remove
# safety_stuff(store_file_path)
#
# with h5py.File(store_file_path, 'w') as hf:
# hf.create_dataset(
# name="delta_t", dtype="float32",
# shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
# compression="gzip", compression_opts=5,
# fletcher32=True)
# hf.create_dataset(
# name="voltage", dtype="float32",
# shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
# compression="gzip", compression_opts=5,
# fletcher32=True)
#
# chid_array_offset = np.linspace(0, (NRCHID-1)*NRCELL, NRCHID, dtype='uint32')
# chid_array_offset = np.repeat(chid_array_offset, repeats=ROI)
# with fits.open(source_file_path,
# mmap=True,
# mode='denywrite',
# ignoremissing=True,
# ignore_missing_end=True) as table:
# nr_rows = 0
# max_counter = 0
# counter = np.zeros((NRCHID*NRCELL), dtype='uint32')
# nr_events = table[1].data["Data"].shape[0]
# for chid in range(NRCHID):
# chid_array_offset = chid*NRCELL*RIO
# cell_ids = table[1].data["cellIDs"][:, chid]
# array_indices = np.add(np.multiply(cell_ids, 300), chid_array_offset)
# voltage = table[1].data["Data"]
# delta_t = table[1].data["deltaT"]
#
#
# def add_value_to_h5py_table(h5pyTable, columnName, value):
# data = h5pyTable[columnName]
# data.resize(len(data)+1, axis=0)
# data[-1, :] = value
@click.command()
@click.argument('source_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/time/temp/timeCalibrationData20160817_017.fits",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/time/temp/timeCalibrationData20160817_017_newVersion.h5",
type=click.Path(exists=False))
def store(source_file_path, store_file_path):
# TODO check safety stuff. maybe remove
safety_stuff(store_file_path)
with fits.open(source_file_path,
mmap=True,
mode='denywrite',
ignoremissing=True,
ignore_missing_end=True) as table:
cell_ids = table[1].data["cellIDs"]
voltage = table[1].data["Data"]
delta_t = table[1].data["deltaT"]
sorted_delta_t = []
sorted_voltage = []
for chid in tqdm(range(NRCHID)):
cell_ids_chid = cell_ids[:, chid*ROI:(chid+1)*ROI]
delta_t_chid = delta_t[:, chid*ROI:(chid+1)*ROI]
voltage_chid = voltage[:, chid*ROI:(chid+1)*ROI]
for cell in tqdm(range(NRCELL)):
mask_cell = np.where(cell_ids_chid.ravel() == cell)
sorted_delta_t.append([delta_t_chid.ravel()[mask_cell]])
sorted_voltage.append([voltage_chid.ravel()[mask_cell]])
def read_chid(fits_file, chid):
num_events = fits_file[1].data.shape[0]
data = pd.DataFrame()
for key in ('cellIDs', 'deltaT', 'Data'):
data[key] = (
fits_file[1].data[key][:, chid * 300: (chid + 1) * 300]
.ravel()
.byteswap()
.newbyteorder()
)
data['sample'] = np.tile(np.arange(300), num_events)
data.rename(
columns={
'cellIDs': 'cell',
'Data': 'adc_counts',
'deltaT': 'delta_t',
},
inplace=True,
)
data.dropna(inplace=True)
return data
def time_function(x, a, b, c):
return a * x ** b + c
def fit(df, cell, plot=False):
big_time = df.delta_t.quantile(0.75)
p0 = [
0.3,
-0.66,
df.adc_counts[df.delta_t >= big_time].mean(),
]
try:
(a, b, c), cov = curve_fit(
f,
df['delta_t'],
df['adc_counts'],
p0=p0,
maxfev=100000,
)
except RuntimeError:
logging.error('Could not fit cell {}'.format(cell))
return np.full(4, np.nan)
ndf = len(df.index) - 3
residuals = df['adc_counts'] - f(df['delta_t'], a, b, c)
model_value = slope_cell*temp_cell + offset_cell
residuals = drs_value_cell - model_value
chisquare = np.sum(pow(residuals[submask], 2)/abs(model_value[submask]))
chisquare = np.sum(residuals**2) / ndf
return a, b, c, chisquare
@click.command()
@click.argument('source_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFiles.txt",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFiles.txt",
type=click.Path(exists=False))
@click.argument('jobs',
default=1)
@click.argument('verbosity',
default=0)
def calculate_time_fitvalues(source_file_path: str, store_file_path: str,
jobs: int, verbosity: int):
"""
Fit raw data with powerlaw a*x**b+c and calculate chisquare for every fit.
data is contained in a pandas data frame.
Args:
source_file_path (str):
Full path to the sourceParameter file with the extension '.h5'
store_file_path (str):
Full path to the storeFile with the extension '.h5'
jobs (int):
The maximum number of concurrently running jobs,
or the size of the thread-pool. -Nr of CPUs used
verbosity (int):
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
"""
logging.basicConfig(
filename=store_file_path.split('.')[0]+".log", filemode='w',
format='%(levelname)s:%(message)s', level=logging.DEBUG)
# TODO check safety stuff. maybe remove
safety_stuff(store_file_path)
slope = []
exponent = []
offset = []
sample_limits = [10, 290]
for chid in range(1440):
logging.info('%s', chid)
if chid % 9 == 8:
sample_limits[1] = 240
for cell in range(NRCELLSPERCHID):
voltage_cell = voltage[chid*NRCELLSPERCHID]
data = read_chid(f, chid)
data = data[(data['sample'] > lower_limit) &
(data['sample'] < upper_limit)]
big_time = df.delta_t.quantile(0.75)
p0 = [
0.3,
-0.66,
df.adc_counts[df.delta_t >= big_time].mean(),
]
try:
(a, b, c), cov = curve_fit(
f,
df['delta_t'],
df['adc_counts'],
p0=p0,
maxfev=100000,
)
except RuntimeError:
logging.error('Could not fit cell {}'.format(cell))
return np.full(4, np.nan)
# new_columns = fits.ColDefs(
# [fits.Column(
# name="Slope", format=str(NRCELLSPERCHID)+'E',
# unit="mV/second", dim=[1, NRCELLSPERCHID],
# array=[slope]),
# fits.Column(
# name="exponent", format=str(NRCELLSPERCHID)+'E',
# unit="1", dim=[1, NRCELLSPERCHID],
# array=[exponent]),
# fits.Column(
# name="Offset", format=str(NRCELLSPERCHID)+'E',
# unit="mV", dim=[1, NRCELLSPERCHID],
# array=[offset])])
|
{"hexsha": "857b18b35686656b46bc34d4625e565c64b6ccff", "size": 23133, "ext": "py", "lang": "Python", "max_stars_repo_path": "drs4Calibration/timelapse/drs4CalibrationTool_time.py", "max_stars_repo_name": "fact-project/DrsTemperatureCalibration", "max_stars_repo_head_hexsha": "3702ee390c16cf2c5930d4a0f24c1354d036d645", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "drs4Calibration/timelapse/drs4CalibrationTool_time.py", "max_issues_repo_name": "fact-project/DrsTemperatureCalibration", "max_issues_repo_head_hexsha": "3702ee390c16cf2c5930d4a0f24c1354d036d645", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drs4Calibration/timelapse/drs4CalibrationTool_time.py", "max_forks_repo_name": "fact-project/DrsTemperatureCalibration", "max_forks_repo_head_hexsha": "3702ee390c16cf2c5930d4a0f24c1354d036d645", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0476973684, "max_line_length": 140, "alphanum_fraction": 0.5714779752, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 5561}
|
#!/usr/bin/env python3
# coding: utf-8
# ------------ #
# Lum Analysis #
# ------------ #
### Modules
# std library
import os
from os.path import join
from collections import OrderedDict
# dependencies
import scipy.signal as signal
import numpy as np
from datetime import datetime, timedelta
# custom
from pupil_code.pupil_tools.data_tools import readInfo, readPupil, processPupil
from pupil_code.pupil_tools.data_tools import readLux, graphPlot, upsampleLux
from pupil_code.pupil_tools.data_tools import readCamera, drawDistance, saveCsv
from pupil_code.pupil_tools.signal_tools import interpnan, interpzero
from pupil_code.pupil_tools.colour_tools import calcPupil
### Functions & Procedures
def lumAnalysis(self):
# self.plot.close()
data_source = self.settingsDict['recordingFolder']
lux_data_source = self.settingsDict['luxFolder']
print(lux_data_source)
recording_name = data_source.split("/")[-1]
recording_source = os.path.dirname(data_source)
# export inside the recording
export_source = join(data_source, "exports", "000")
# export all in a separate folder
export_source_alt = self.settingsDict['exportFolder']
# PlotSize
fig, ax = self.plot.subplots(figsize=(10, 5))
ax.set_ylim(-5, 10)
##### unified pupil size #####
age = self.settingsDict['partAge']
referenceAge = 28.58
nOfEye = 2
fieldAngle = 167
##### unified pupil size #####
useCamera = self.settingsDict['useCamera']
confidence_treshold = 0.6
filterForConf = True
##### end cofig #####
timelag = self.settingsDict['timelag']
sampleFreq = 120
distSampleLenght = 1*sampleFreq # eye_frames 120fps
pupilFiltering = int(self.settingsDict['pupilFiltering'])*2
sampleFreqCamera = 30
export = self.settingsDict['exportData']
showPlot = self.settingsDict['showPlot']
##### read recond info #####
pupil_coulmn = 6 # 13 in mm 6 in px
pupil_offset = 0
pupilData = readPupil(export_source)
recordingInfo = readInfo(data_source)
# get Time from the info file
recStartTime = datetime.fromtimestamp(float(recordingInfo["start_time_system_s"]))
recStartTimeAlt = float(recordingInfo["start_time_synced_s"])
bootTime = datetime.fromtimestamp(float(recordingInfo["start_time_system_s"])-recStartTimeAlt)
timeFromBoot = recStartTime-bootTime
recDuration = recordingInfo["duration_s"]
recDurationSeconds = timedelta(seconds=float(recDuration))
recEndTime = recStartTime + recDurationSeconds
print("Reconding started at :", recStartTime)
print("Computer booted at :", bootTime)
print("It was on for :", timeFromBoot)
print("The recording lasted :", recDuration)
pupilValues = processPupil(pupilData,
pupil_coulmn,
recStartTimeAlt,
filterForConf,
confidence_treshold)
recPupilValues, recTimeStamps, recFrames, recSimpleTimeStamps, recConfidence = pupilValues
# remove nan form the pupil arrary
recPupilValues = interpnan(recPupilValues)
recPupilValues_filter = signal.savgol_filter(recPupilValues, 1*sampleFreq+1, 2)
recPupilValues = signal.savgol_filter(recPupilValues, int(sampleFreq/10)+1, 6)
recConfidence = signal.savgol_filter(recConfidence, int(sampleFreq/10)+1, 6)
luxTimeStamps, luxValues = readLux(lux_data_source,
data_source,
recStartTime,
recEndTime)
luxTimeStamps = [x - timelag for x in luxTimeStamps]
# filtered set of lux (10fps)
luxValues = signal.savgol_filter(interpnan(luxValues), 10+1, 6)
luxValues = upsampleLux(luxTimeStamps,
luxValues,
recTimeStamps,
recordingInfo,
True)
pupilValue = calcPupil(luxValues, age, referenceAge, nOfEye, fieldAngle)
luxPupilValues = interpnan(pupilValue)
meanLux = np.nanmean(luxPupilValues, axis=0)
meanRec = np.nanmean(recPupilValues_filter, axis=0)
stdLux = np.nanstd(luxPupilValues)
stdRec = np.nanstd(recPupilValues_filter)
pupil_coeff = meanLux / meanRec
# pupil_coeff = ( meanLux-stdLux )/ (meanRec - stdRec )
print(f"calculated pupil_coeff={pupil_coeff}")
recPupilValues_scaled = [x * pupil_coeff for x in recPupilValues]
recPupilValues_filter_scaled = [x * pupil_coeff for x in recPupilValues_filter]
graphPlot(self.plot,
recSimpleTimeStamps,
luxPupilValues,
"blue",
0.8,
"Sensor Calculated Pupil")
if not useCamera:
graphPlot(self.plot,
recSimpleTimeStamps,
recPupilValues_scaled,
"gray",
0.5,
"Raw EyeTracker Pupil")
graphPlot(self.plot,
recSimpleTimeStamps,
recPupilValues_filter_scaled,
"black",
0.8,
"Smoothed EyeTracker Pupil")
if useCamera:
indexLum, timeStampsLum, avgLum, spotLum = readCamera(data_source)
avgLum = upsampleLux(timeStampsLum, avgLum, recTimeStamps, recordingInfo, False)
spotLum = upsampleLux(timeStampsLum, spotLum, recTimeStamps, recordingInfo, False)
scaledSpotLum = []
for i in range(0, len(recTimeStamps)):
sensorLux = luxValues[i]
cameraALum = avgLum[i]
cameraSLum = spotLum[i]
cameraLum_min = sensorLux / (cameraALum * 10+1)
cameraLum_max = cameraLum_min * 11
# linear interpolation method
scaledSpot = ((cameraLum_max * cameraSLum)+ (cameraLum_min * (1-cameraSLum)) )/2
scaledSpotLum.append(scaledSpot)
scaledSpotLum = signal.savgol_filter(interpnan(interpzero(scaledSpotLum)), sampleFreq*3+1, 1)
spotPupilValues = calcPupil(scaledSpotLum, age, referenceAge, nOfEye, fieldAngle)
meanLum = np.nanmean(spotPupilValues, axis=0)
meanRec = np.nanmean(recPupilValues_filter, axis=0)
stdLum = np.nanstd(spotPupilValues)
stdRec = np.nanstd(recPupilValues_filter)
pupilLum_coeff = meanLum/meanRec
print(f"pupilLum_coeff={pupilLum_coeff}")
recPupilValues_filter_scaled_Lum = [x * pupilLum_coeff for x in recPupilValues_filter]
graphPlot(self.plot,
recSimpleTimeStamps,
spotPupilValues,
"orange",
1,
"Camera Calculated Pupil")
graphPlot(self.plot,
recSimpleTimeStamps,
recPupilValues_filter_scaled_Lum,
"black",
0.8,
"Smoothed EyeTracker Pupil")
if useCamera:
distanceVal, distanceTime = drawDistance(self.plot,
recPupilValues_filter_scaled_Lum,
spotPupilValues,
recSimpleTimeStamps,
distSampleLenght,
pupilFiltering)
else:
distanceVal, distanceTime = drawDistance(self.plot,
recPupilValues_filter_scaled,
luxPupilValues,
recSimpleTimeStamps,
distSampleLenght,
pupilFiltering)
handles, labels = self.plot.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
self.plot.legend(by_label.values(), by_label.keys())
self.plot.xlabel('Time s')
self.plot.ylabel('Pupil diameter mm')
self.plot.title(f"CW{recording_name}")
if showPlot:
self.plot.savefig(join(export_source, f'plot{recording_name}.pdf'),
bbox_inches='tight')
self.plot.savefig(join(export_source_alt, f'plot_{recording_name}.pdf'),
bbox_inches='tight')
if export:
csv_header = ["timestamp_unix",
"timestamp_relative",
"frame_n",
"confidence",
"mm_pupil_diameter_scaled",
"mm_pupil_diameter_calc_lux",
"px_pupil_diameter_raw",
"recording_name",
"age"]
csv_rows = [recTimeStamps,
recSimpleTimeStamps,
recFrames,
recConfidence,
recPupilValues_filter_scaled,
luxPupilValues,
recPupilValues,
recording_name,
age]
if useCamera:
csv_header.append("mm_pupil_diameter_calc_camera")
csv_rows.append(spotPupilValues)
saveCsv(export_source, "pupilOutput.csv", csv_header, csv_rows)
saveCsv(export_source_alt, f"{recording_name}_pupilOutput.csv", csv_header, csv_rows)
csv_header = ["drelative_wl", "timestamp_relative", "recording_name", "age", "timestamp_unix"]
distanceTimeEpoch = [x + float(recordingInfo["start_time_system_s"]) for x in distanceTime]
csv_rows = [distanceVal, distanceTime, recording_name, age, distanceTimeEpoch]
saveCsv(export_source_alt, f"{recording_name}_pupilOutputDistance.csv", csv_header, csv_rows)
saveCsv(export_source, "pupilOutputDistance.csv", csv_header, csv_rows)
if showPlot:
self.plot.show(block=False)
|
{"hexsha": "c8628051d0dda665df699d728dd2eb6dbe349693", "size": 9897, "ext": "py", "lang": "Python", "max_stars_repo_path": "pupil_code/lum_analysis.py", "max_stars_repo_name": "pignoniG/cognitive_analysis_tool", "max_stars_repo_head_hexsha": "90568fc83493a10b567c1f957a57b3ef3a1cf69f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-07-31T18:32:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-17T05:01:20.000Z", "max_issues_repo_path": "pupil_code/lum_analysis.py", "max_issues_repo_name": "annaEyevia/cognitive_analysis_tool", "max_issues_repo_head_hexsha": "90568fc83493a10b567c1f957a57b3ef3a1cf69f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pupil_code/lum_analysis.py", "max_forks_repo_name": "annaEyevia/cognitive_analysis_tool", "max_forks_repo_head_hexsha": "90568fc83493a10b567c1f957a57b3ef3a1cf69f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-15T09:58:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T13:14:05.000Z", "avg_line_length": 35.9890909091, "max_line_length": 102, "alphanum_fraction": 0.6010912398, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2248}
|
""" Draw wiring diagrams (aka string diagrams) using Graphviz.
"""
module GraphvizWiringDiagrams
export to_graphviz
import ...Doctrines: HomExpr
using ...WiringDiagrams, ...WiringDiagrams.WiringDiagramSerialization
import ..Graphviz
import ..Graphviz: to_graphviz
# Constants and data types
##########################
# Default Graphviz font. Reference: http://www.graphviz.org/doc/fontfaq.txt
const default_font = "Serif"
# Default graph, node, edge, and cell attributes.
const default_graph_attrs = Graphviz.Attributes(
:fontname => default_font,
)
const default_node_attrs = Graphviz.Attributes(
:fontname => default_font,
:shape => "none",
:width => "0",
:height => "0",
:margin => "0",
)
const default_edge_attrs = Graphviz.Attributes(
:arrowsize => "0.5",
:fontname => default_font,
)
const default_cell_attrs = Graphviz.Attributes(
:border => "1",
:cellpadding => "4",
)
struct GraphvizBox
stmts::Vector{Graphviz.Statement} # Usually Graphviz.Node
input_ports::Vector{Graphviz.NodeID}
output_ports::Vector{Graphviz.NodeID}
end
# Conversion
############
""" Render a wiring diagram using Graphviz.
The input `f` can also be a morphism expression, which is converted into a
wiring diagram.
# Arguments
- `graph_name="G"`: name of Graphviz digraph
- `direction=:vertical`: layout direction.
Either `:vertical` (top to bottom) or `:horizontal` (left to right).
- `node_labels=true`: whether to label the nodes
- `labels=false`: whether to label the edges
- `label_attr=:label`: what kind of edge label to use (if `labels` is true).
One of `:label`, `:xlabel`, `:headlabel`, or `:taillabel`.
- `port_size="24"`: minimum size of ports on box, in points
- `junction_size="0.05"`: size of junction nodes, in inches
- `outer_ports=true`: whether to display the outer box's input and output ports.
If disabled, no incoming or outgoing wires will be shown either!
- `anchor_outer_ports=true`: whether to enforce ordering of the outer box's
input and output, i.e., ordering of the incoming and outgoing wires
- `graph_attrs=default_graph_attrs`: top-level graph attributes
- `node_attrs=default_node_attrs`: top-level node attributes
- `edge_attrs=default_edge_attrs`: top-level edge attributes
- `cell_attrs=default_cell_attrs`: main cell attributes in node HTML-like label
"""
function to_graphviz(f::WiringDiagram;
graph_name::String="G", direction::Symbol=:vertical,
node_labels::Bool=true, labels::Bool=false, label_attr::Symbol=:label,
port_size::String="24", junction_size::String="0.05",
outer_ports::Bool=true, anchor_outer_ports::Bool=true,
graph_attrs::Graphviz.Attributes=Graphviz.Attributes(),
node_attrs::Graphviz.Attributes=Graphviz.Attributes(),
edge_attrs::Graphviz.Attributes=Graphviz.Attributes(),
cell_attrs::Graphviz.Attributes=Graphviz.Attributes())::Graphviz.Graph
@assert direction in (:vertical, :horizontal)
@assert label_attr in (:label, :xlabel, :headlabel, :taillabel)
vertical = direction == :vertical
# State variables.
stmts = Graphviz.Statement[]
port_map = Dict{Port,Graphviz.NodeID}()
update_port_map! = (v::Int, kind::PortKind, node_ids) -> begin
for (i, node_id) in enumerate(node_ids)
port_map[Port(v,kind,i)] = node_id
end
end
# Invisible nodes for incoming and outgoing wires.
if outer_ports
gv_box = graphviz_outer_box(f; anchor=anchor_outer_ports, vertical=vertical)
append!(stmts, gv_box.stmts)
update_port_map!(input_id(f), OutputPort, gv_box.input_ports)
update_port_map!(output_id(f), InputPort, gv_box.output_ports)
end
# Visible nodes for boxes.
cell_attrs = merge(default_cell_attrs, cell_attrs)
for v in box_ids(f)
gv_box = graphviz_box(box(f,v), box_id([v]),
vertical=vertical, labels=node_labels, port_size=port_size,
junction_size=junction_size, cell_attrs=cell_attrs)
append!(stmts, gv_box.stmts)
update_port_map!(v, InputPort, gv_box.input_ports)
update_port_map!(v, OutputPort, gv_box.output_ports)
end
# Edges.
for (i, wire) in enumerate(wires(f))
source, target = wire.source, wire.target
if !(haskey(port_map, source) && haskey(port_map, target))
continue
end
# Use the port value to label the wire. We take the source port.
# In most wiring diagrams, the source and target ports should yield the
# same label, but that is not guaranteed. An advantage of choosing the
# source port over the target port is that it will carry the
# "more specific" type when implicit conversions are allowed.
port = port_value(f, source)
attrs = Graphviz.Attributes(
:id => wire_id(Int[], i),
:comment => edge_label(port),
)
if labels
attrs[label_attr] = edge_label(port)
end
edge = Graphviz.Edge(port_map[source], port_map[target]; attrs...)
push!(stmts, edge)
end
# Graph.
graph_attrs = merge(graph_attrs, Graphviz.Attributes(
:rankdir => vertical ? "TB" : "LR"
))
Graphviz.Digraph(graph_name, stmts;
graph_attrs=merge(default_graph_attrs, graph_attrs),
node_attrs=merge(default_node_attrs, node_attrs),
edge_attrs=merge(default_edge_attrs, edge_attrs))
end
function to_graphviz(f::HomExpr; kw...)::Graphviz.Graph
to_graphviz(to_wiring_diagram(f); kw...)
end
""" Graphviz box for a generic box.
"""
function graphviz_box(box::AbstractBox, node_id::String;
vertical::Bool=true, labels::Bool=true, port_size::String="0",
cell_attrs::Graphviz.Attributes=Graphviz.Attributes(), kw...)
# Main node.
nin, nout = length(input_ports(box)), length(output_ports(box))
text_label = labels ? node_label(box.value) : ""
html_label = node_html_label(nin, nout, text_label;
vertical=vertical, port_size=port_size, attrs=cell_attrs)
# Note: The `id` attribute is included in the Graphviz output but is not used
# internally by Graphviz. It is for use by downstream applications.
# Reference: http://www.graphviz.org/doc/info/attrs.html#d:id
node = Graphviz.Node(node_id,
id = node_id,
comment = node_label(box.value),
label = html_label,
)
# Input and output ports.
graphviz_port = (kind::PortKind, port::Int) -> begin
Graphviz.NodeID(node_id, port_name(kind, port), port_anchor(kind, vertical))
end
inputs = [ graphviz_port(InputPort, i) for i in 1:nin ]
outputs = [ graphviz_port(OutputPort, i) for i in 1:nout ]
GraphvizBox([node], inputs, outputs)
end
""" Graphviz box for a junction.
"""
function graphviz_box(junction::Junction, node_id::String;
junction_size::String="0", kw...)
node = Graphviz.Node(node_id,
id = node_id,
comment = "junction",
label = "",
shape = "circle",
style = "filled",
fillcolor = "black",
width = junction_size,
height = junction_size,
)
inputs = repeat([Graphviz.NodeID(node_id)], junction.ninputs)
outputs = repeat([Graphviz.NodeID(node_id)], junction.noutputs)
GraphvizBox([node], inputs, outputs)
end
""" Create "HTML-like" node label for a box.
"""
function node_html_label(nin::Int, nout::Int, text_label::String;
vertical::Bool=true, port_size::String="0",
attrs::Graphviz.Attributes=Graphviz.Attributes())::Graphviz.Html
if vertical
Graphviz.Html("""
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="0">
<TR><TD>$(ports_horizontal_html_label(InputPort,nin,port_size))</TD></TR>
<TR><TD $(html_attributes(attrs))>$(escape_html(text_label))</TD></TR>
<TR><TD>$(ports_horizontal_html_label(OutputPort,nout,port_size))</TD></TR>
</TABLE>""")
else
Graphviz.Html("""
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="0">
<TR>
<TD>$(ports_vertical_html_label(InputPort,nin,port_size))</TD>
<TD $(html_attributes(attrs))>$(escape_html(text_label))</TD>
<TD>$(ports_vertical_html_label(OutputPort,nout,port_size))</TD>
</TR>
</TABLE>""")
end
end
""" Create horizontal "HTML-like" label for the input or output ports of a box.
"""
function ports_horizontal_html_label(kind::PortKind, nports::Int,
port_size::String="0")::Graphviz.Html
cols = if nports > 0
join("""<TD HEIGHT="0" WIDTH="$port_size" PORT="$(port_name(kind,i))"></TD>"""
for i in 1:nports)
else
"""<TD HEIGHT="0" WIDTH="$port_size"></TD>"""
end
Graphviz.Html("""
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="0"><TR>$cols</TR></TABLE>""")
end
""" Create vertical "HTML-like" label for the input or output ports of a box.
"""
function ports_vertical_html_label(kind::PortKind, nports::Int,
port_size::String="0")::Graphviz.Html
rows = if nports > 0
join("""<TR><TD HEIGHT="$port_size" WIDTH="0" PORT="$(port_name(kind,i))"></TD></TR>"""
for i in 1:nports)
else
"""<TR><TD HEIGHT="$port_size" WIDTH="0"></TD></TR>"""
end
Graphviz.Html("""
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="0">$rows</TABLE>""")
end
""" Graphviz box for the outer box of a wiring diagram.
"""
function graphviz_outer_box(f::WiringDiagram;
anchor::Bool=true, vertical::Bool=true)
# Subgraphs containing invisible nodes.
stmts = Graphviz.Statement[]
ninputs, noutputs = length(input_ports(f)), length(output_ports(f))
if ninputs > 0
push!(stmts, graphviz_outer_ports(input_id(f), InputPort, ninputs;
anchor=anchor, vertical=vertical))
end
if noutputs > 0
push!(stmts, graphviz_outer_ports(output_id(f), OutputPort, noutputs;
anchor=anchor, vertical=vertical))
end
# Input and output ports.
graphviz_port = (port::Port) -> Graphviz.NodeID(
port_node_name(port.box, port.port),
port_anchor(port.kind, vertical)
)
inputs = [ graphviz_port(Port(input_id(f), OutputPort, i)) for i in 1:ninputs ]
outputs = [ graphviz_port(Port(output_id(f), InputPort, i)) for i in 1:noutputs ]
GraphvizBox(stmts, inputs, outputs)
end
""" Create invisible nodes for the input or output ports of an outer box.
"""
function graphviz_outer_ports(v::Int, kind::PortKind, nports::Int;
anchor::Bool=true, vertical::Bool=true)::Graphviz.Subgraph
@assert nports > 0
dir = vertical ? "LR" : "TB"
port_width = "$(round(24/72,digits=3))" # port width in inches
nodes = [ port_node_name(v, i) for i in 1:nports ]
stmts = Graphviz.Statement[
Graphviz.Node(nodes[i], id=port_name(kind, i)) for i in 1:nports
]
if anchor
push!(stmts, Graphviz.Edge(nodes))
end
Graphviz.Subgraph(
stmts,
graph_attrs=Graphviz.Attributes(
:rank => kind == InputPort ? "source" : "sink",
:rankdir => dir,
),
node_attrs=Graphviz.Attributes(
:style => "invis",
:shape => "none",
:label => "",
:width => dir == "LR" ? port_width : "0",
:height => dir == "TB" ? port_width : "0",
),
edge_attrs=Graphviz.Attributes(
:style => "invis",
),
)
end
port_node_name(v::Int, port::Int) = string(box_id([v]), "p", port)
""" Graphviz anchor for port.
"""
function port_anchor(kind::PortKind, vertical::Bool)
if vertical
kind == InputPort ? "n" : "s"
else
kind == InputPort ? "w" : "e"
end
end
""" Create a label for the main content of a box.
"""
node_label(box_value::Any) = string(box_value)
node_label(::Nothing) = ""
""" Create a label for an edge.
"""
edge_label(port_value::Any) = string(port_value)
edge_label(::Nothing) = ""
""" Encode attributes for Graphviz HTML-like labels.
"""
function html_attributes(attrs::Graphviz.Attributes)::String
join(["$(uppercase(string(k)))=\"$v\"" for (k,v) in attrs], " ")
end
""" Escape special HTML characters: &, <, >, ", '
Borrowed from HttpCommon package: https://github.com/JuliaWeb/HttpCommon.jl
"""
function escape_html(s::AbstractString)
s = replace(s, "&" => "&")
s = replace(s, "\"" => """)
s = replace(s, "'" => "'")
s = replace(s, "<" => "<")
s = replace(s, ">" => ">")
return s
end
end
|
{"hexsha": "e11ece1f585272b967a97a3479ae51cb8ca14df3", "size": 11852, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/graphics/GraphvizWiringDiagrams.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Catlab.jl-134e5e36-593f-5add-ad60-77f754baafbe", "max_stars_repo_head_hexsha": "b8e5e1eab26b53ec7e53c503c1dd5b256e37460b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/graphics/GraphvizWiringDiagrams.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Catlab.jl-134e5e36-593f-5add-ad60-77f754baafbe", "max_issues_repo_head_hexsha": "b8e5e1eab26b53ec7e53c503c1dd5b256e37460b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/graphics/GraphvizWiringDiagrams.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Catlab.jl-134e5e36-593f-5add-ad60-77f754baafbe", "max_forks_repo_head_hexsha": "b8e5e1eab26b53ec7e53c503c1dd5b256e37460b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9598853868, "max_line_length": 91, "alphanum_fraction": 0.6833445832, "num_tokens": 3214}
|
import torch
import cv2
import lib.dataset_handler
import lib.generate_gt_anchor
import lib.tag_anchor
import Net
import numpy as np
import os
import time
import random
import copy
def val(net, criterion, batch_num, using_cuda, logger, img_list):
random_list = random.sample(img_list, batch_num)
total_loss = 0
total_cls_loss = 0
total_v_reg_loss = 0
total_o_reg_loss = 0
start_time = time.time()
for im in random_list:
root, file_name = os.path.split(im)
root, _ = os.path.split(root)
name, _ = os.path.splitext(file_name)
gt_name = 'gt_' + name + '.txt'
gt_path = os.path.join(root, "test_gt", gt_name)
if not os.path.exists(gt_path):
print('Ground truth file of image {0} not exists.'.format(gt_path))
continue
gt_txt = lib.dataset_handler.read_gt_file(gt_path, have_BOM=True)
img = cv2.imread(im)
if img is None:
batch_num -= 1
continue
img, gt_txt = lib.dataset_handler.scale_img(img, gt_txt)
tensor_img = img[np.newaxis, :, :, :]
tensor_img = tensor_img.transpose((0, 3, 1, 2))
if using_cuda:
tensor_img = torch.FloatTensor(tensor_img).cuda()
else:
tensor_img = torch.FloatTensor(tensor_img)
vertical_pred, score, side_refinement = net(tensor_img)
del tensor_img
positive = []
negative = []
vertical_reg = []
side_refinement_reg = []
visual_img = copy.deepcopy(img)
try:
for box in gt_txt:
gt_anchor, visual_img = lib.generate_gt_anchor.generate_gt_anchor(img, box, draw_img_gt=visual_img)
positive1, negative1, vertical_reg1, side_refinement_reg1 = lib.tag_anchor.tag_anchor(gt_anchor, score, box)
positive += positive1
negative += negative1
vertical_reg += vertical_reg1
side_refinement_reg += side_refinement_reg1
except:
print("warning: img %s raise error!" % im)
batch_num -= 1
continue
if len(vertical_reg) == 0 or len(positive) == 0 or len(side_refinement_reg) == 0:
batch_num -= 1
continue
loss, cls_loss, v_reg_loss, o_reg_loss = criterion(score, vertical_pred, side_refinement, positive,
negative, vertical_reg, side_refinement_reg)
total_loss += float(loss)
total_cls_loss += float(cls_loss)
total_v_reg_loss += float(v_reg_loss)
total_o_reg_loss += float(o_reg_loss)
end_time = time.time()
total_time = end_time - start_time
print('#################### Start evaluate ####################')
print('loss: {0}'.format(total_loss / float(batch_num)))
logger.info('Evaluate loss: {0}'.format(total_loss / float(batch_num)))
print('classification loss: {0}'.format(total_cls_loss / float(batch_num)))
logger.info('Evaluate vertical regression loss: {0}'.format(total_v_reg_loss / float(batch_num)))
print('vertical regression loss: {0}'.format(total_v_reg_loss / float(batch_num)))
logger.info('Evaluate side-refinement regression loss: {0}'.format(total_o_reg_loss / float(batch_num)))
print('side-refinement regression loss: {0}'.format(total_o_reg_loss / float(batch_num)))
logger.info('Evaluate side-refinement regression loss: {0}'.format(total_o_reg_loss / float(batch_num)))
print('{1} iterations for {0} seconds.'.format(total_time, batch_num))
print('##################### Evaluate end #####################')
print('\n')
return total_loss
|
{"hexsha": "5bcd8a8f9ad964048335dfcb35bf59fc2f9fa225", "size": 3697, "ext": "py", "lang": "Python", "max_stars_repo_path": "detector/ctpn/evaluate.py", "max_stars_repo_name": "qiu9yu/Lets_OCR", "max_stars_repo_head_hexsha": "62d68b044250d02a9d5ac8c4fbd08cec83faa0d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 671, "max_stars_repo_stars_event_min_datetime": "2018-12-03T01:59:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T08:57:45.000Z", "max_issues_repo_path": "detector/ctpn/evaluate.py", "max_issues_repo_name": "sushuzhi/Lets_OCR", "max_issues_repo_head_hexsha": "b2af7120a34d785434c96e820b6eb1aa69269d20", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2018-12-03T12:56:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T07:23:40.000Z", "max_forks_repo_path": "detector/ctpn/evaluate.py", "max_forks_repo_name": "sushuzhi/Lets_OCR", "max_forks_repo_head_hexsha": "b2af7120a34d785434c96e820b6eb1aa69269d20", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 357, "max_forks_repo_forks_event_min_datetime": "2018-11-07T00:40:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T04:09:35.000Z", "avg_line_length": 38.9157894737, "max_line_length": 124, "alphanum_fraction": 0.6213145794, "include": true, "reason": "import numpy", "num_tokens": 843}
|
import sys
import pyqtgraph as pg
import datetime
import time
import numpy as np
import logging
from PyQt5.QtWidgets import QDialog
from .gui.chartWindowGui import *
from .pg_time_axis import DateAxisItem
"""
Charts are plotted using pyqtgrpah library.
Data are read directly from the image list
model (imageListModel) so charts plot exactly
what is shown in the image list table view.
Improvements needed:
- Filter colors should be moved in Settings
so that users can customize them.
- A factory to build plots, they are now
build directly.
"""
class ChartWindow(QDialog):
logger = logging.getLogger(__name__)
def __init__(self, app):
super().__init__()
self.ui = Ui_Dialog2()
self.ui.setupUi(self)
self.setWindowTitle("Charts")
self.app = app
def closeEvent(self, event):
self.app.settings.setValue("sizeChartW", self.size())
self.app.settings.setValue("posChartW", self.pos())
try:
self.close()
except Exception as e:
self.logger.debug(f"Closing not existing window {e}")
event.accept()
def plot(self, imageListModel):
self.imageListModel = imageListModel
try:
self.resize(self.app.settings.value("sizeChartW"))
self.move(self.app.settings.value("posChartW"))
except Exception as e:
self.logger.error(f"{e}")
self.show()
self.ui.labelColorL.setStyleSheet("color:rgb(244, 244, 244);font-weight:bold")
self.ui.labelColorR.setStyleSheet("color:rgb(255,0,0);font-weight:bold")
self.ui.labelColorG.setStyleSheet("color:rgb(0, 140, 55);font-weight:bold")
self.ui.labelColorB.setStyleSheet("color:rgb(0,0,255);font-weight:bold")
self.ui.labelColorHa.setStyleSheet("color:rgb(190, 255, 0);font-weight:bold")
self.ui.labelColorOiii.setStyleSheet(
"color:rgb(150, 200, 255);font-weight:bold"
)
self.ui.labelColorSii.setStyleSheet("color:rgb(255, 120, 190);font-weight:bold")
self.ui.labelColorN.setStyleSheet("color:rgb(120,120,120);font-weight:bold")
frame = []
filters = []
alt = []
az = []
fwhm = []
eccentricity = []
noise = []
snrweight = []
datetimestr = []
timestampObj = []
colors = []
g1 = []
g2 = []
g3 = []
g4 = []
g5 = []
g6 = []
g7 = []
g8 = []
g9 = []
g10 = []
g11 = []
g12 = []
g13 = []
g14 = []
g15 = []
for row in range(imageListModel.rowCount()):
indexFilters = imageListModel.index(row, 5)
indexAlt = imageListModel.index(row, 14)
indexAz = imageListModel.index(row, 15)
indexDatetimestr = imageListModel.index(row, 16)
indexFwhm = imageListModel.index(row, 25)
indexEccentricity = imageListModel.index(row, 26)
indexSnrweight = imageListModel.index(row, 27)
indexNoise = imageListModel.index(row, 28)
filters.append((str(imageListModel.data(indexFilters))))
alt.append((float(imageListModel.data(indexAlt))))
az.append((float(imageListModel.data(indexAz))))
datetimestr.append((str(imageListModel.data(indexDatetimestr))))
fwhm.append((float(imageListModel.data(indexFwhm))))
eccentricity.append((float(imageListModel.data(indexEccentricity))))
snrweight.append((float(imageListModel.data(indexSnrweight))))
noise.append((float(imageListModel.data(indexNoise))))
# pg only works with timestamps
date_time_obj = datetime.datetime.strptime(
datetimestr[row], "%Y-%m-%dT%H:%M:%S"
)
timestampObj.append(datetime.datetime.timestamp(date_time_obj))
# colors
if filters[row] in self.app.confFilters["L"]:
colors.append(pg.mkBrush(244, 244, 244, 255))
elif filters[row] in self.app.confFilters["R"]:
colors.append(pg.mkBrush(255, 0, 0, 255))
elif filters[row] in self.app.confFilters["B"]:
colors.append(pg.mkBrush(0, 0, 255, 255))
elif filters[row] in self.app.confFilters["G"]:
colors.append(pg.mkBrush(0, 140, 55, 255))
elif filters[row] in self.app.confFilters["Ha"]:
colors.append(pg.mkBrush(190, 255, 0, 255))
elif filters[row] in self.app.confFilters["Oiii"]:
colors.append(pg.mkBrush(150, 200, 255, 255))
elif filters[row] in self.app.confFilters["Sii"]:
colors.append(pg.mkBrush(255, 120, 190, 255))
else:
colors.append(pg.mkBrush(120, 120, 120, 255))
# create data sets
g1.append({"pos": (alt[row], az[row]), "brush": colors[row]})
g2.append({"pos": (az[row], alt[row]), "brush": colors[row]})
g3.append({"pos": (timestampObj[row], alt[row]), "brush": colors[row]})
g4.append({"pos": (alt[row], fwhm[row]), "brush": colors[row]})
g5.append({"pos": (az[row], fwhm[row]), "brush": colors[row]})
g6.append({"pos": (timestampObj[row], fwhm[row]), "brush": colors[row]})
g7.append({"pos": (alt[row], eccentricity[row]), "brush": colors[row]})
g8.append({"pos": (az[row], eccentricity[row]), "brush": colors[row]})
g9.append(
{"pos": (timestampObj[row], eccentricity[row]), "brush": colors[row]}
)
g10.append({"pos": (alt[row], noise[row]), "brush": colors[row]})
g11.append({"pos": (az[row], noise[row]), "brush": colors[row]})
g12.append({"pos": (timestampObj[row], noise[row]), "brush": colors[row]})
g13.append({"pos": (alt[row], snrweight[row]), "brush": colors[row]})
g14.append({"pos": (az[row], snrweight[row]), "brush": colors[row]})
g15.append(
{"pos": (timestampObj[row], snrweight[row]), "brush": colors[row]}
)
pg.setConfigOption("background", "k")
pg.setConfigOption("foreground", "w")
pg.setConfigOption("antialias", True)
pg.setConfigOptions(imageAxisOrder="row-major")
print(g3)
# Graph1: Alt-Az
self.ui.graphWidget1.setLabel("left", "Az (deg)", color="white", size=30)
self.ui.graphWidget1.setLabel("bottom", "Alt (deg)", color="white", size=30)
self.ui.graphWidget1.showGrid(x=True, y=True)
scatter1 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget1.addItem(scatter1)
scatter1.setData(g1)
self.lrAlt = pg.LinearRegionItem([100, 200])
self.lrAlt.setZValue(-10)
self.ui.graphWidget1.addItem(self.lrAlt)
# Graph2: AZ-Alt
self.ui.graphWidget2.setLabel("left", "Alt (deg)", color="white", size=30)
self.ui.graphWidget2.setLabel("bottom", "Az (deg)", color="white", size=30)
self.ui.graphWidget2.showGrid(x=True, y=True)
scatter2 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget2.addItem(scatter2)
scatter2.setData(g2)
self.lrAz = pg.LinearRegionItem([100, 200])
self.lrAz.setZValue(-10)
self.ui.graphWidget2.addItem(self.lrAz)
# Graph3: Time-Alt
self.ui.graphWidget3.setLabel("left", "Alt (deg)", color="white", size=30)
self.ui.graphWidget3.showGrid(x=True, y=True)
axis3 = DateAxisItem(orientation="bottom")
axis3.attachToPlotItem(self.ui.graphWidget3.getPlotItem())
axis3.setLabel("Time", units="h")
scatter3 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget3.addItem(scatter3)
scatter3.setData(g3)
self.lrDate = pg.LinearRegionItem([min(timestampObj), max(timestampObj)])
self.lrDate.setZValue(-10)
self.ui.graphWidget3.addItem(self.lrDate)
# Graph4: Alt-FWHM
self.ui.graphWidget4.setLabel("bottom", "Alt (deg)", color="white", size=30)
self.ui.graphWidget4.setLabel("left", "FWHM", color="white", size=30)
self.ui.graphWidget4.showGrid(x=True, y=True)
scatter4 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget4.addItem(scatter4)
scatter4.setData(g4)
# Graph5: Az-FWHM
self.ui.graphWidget5.setLabel("bottom", "Az (deg)", color="white", size=30)
self.ui.graphWidget5.setLabel("left", "FWHM", color="white", size=30)
self.ui.graphWidget5.showGrid(x=True, y=True)
scatter5 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget5.addItem(scatter5)
scatter5.setData(g5)
# Graph6: Time-FWHM
self.ui.graphWidget6.setLabel("left", "FWHM", color="white", size=30)
self.ui.graphWidget6.showGrid(x=True, y=True)
axis6 = DateAxisItem(orientation="bottom")
axis6.attachToPlotItem(self.ui.graphWidget6.getPlotItem())
axis6.setLabel("Time", units="h")
scatter6 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget6.addItem(scatter6)
scatter6.setData(g6)
# Graph7: Alt-Eccentricity
self.ui.graphWidget7.setLabel("bottom", "Alt (deg)", color="white", size=30)
self.ui.graphWidget7.setLabel("left", "Eccentricity", color="white", size=30)
self.ui.graphWidget7.showGrid(x=True, y=True)
scatter7 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget7.addItem(scatter7)
scatter7.setData(g7)
# Graph8: Az-Eccentricity
self.ui.graphWidget8.setLabel("bottom", "Az (deg)", color="white", size=30)
self.ui.graphWidget8.setLabel("left", "Eccentricity", color="white", size=30)
self.ui.graphWidget8.showGrid(x=True, y=True)
scatter8 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget8.addItem(scatter8)
scatter8.setData(g8)
# Graph9: Time-Eccentricity
self.ui.graphWidget9.setLabel("left", "Eccentricity", color="white", size=30)
self.ui.graphWidget9.showGrid(x=True, y=True)
axis9 = DateAxisItem(orientation="bottom")
axis9.attachToPlotItem(self.ui.graphWidget9.getPlotItem())
axis9.setLabel("Time", units="h")
scatter9 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget9.addItem(scatter9)
scatter9.setData(g9)
# Graph10: Alt-Noise
self.ui.graphWidget10.setLabel("bottom", "Alt (deg)", color="white", size=30)
self.ui.graphWidget10.setLabel("left", "Noise", color="white", size=30)
self.ui.graphWidget10.showGrid(x=True, y=True)
scatter10 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget10.addItem(scatter10)
scatter10.setData(g10)
# Graph11: Az-Noise
self.ui.graphWidget11.setLabel("bottom", "Az (deg)", color="white", size=30)
self.ui.graphWidget11.setLabel("left", "Noise", color="white", size=30)
self.ui.graphWidget11.showGrid(x=True, y=True)
scatter11 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget11.addItem(scatter11)
scatter11.setData(g11)
# Graph12: Time-Noise
self.ui.graphWidget12.setLabel("left", "Noise", color="white", size=30)
self.ui.graphWidget12.showGrid(x=True, y=True)
axis12 = DateAxisItem(orientation="bottom")
axis12.attachToPlotItem(self.ui.graphWidget12.getPlotItem())
axis12.setLabel("Time", units="h")
scatter12 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget12.addItem(scatter12)
scatter12.setData(g12)
# Graph13: Alt-SNRWeight
self.ui.graphWidget13.setLabel("bottom", "Alt (deg)", color="white", size=30)
self.ui.graphWidget13.setLabel("left", "SNRWeight", color="white", size=30)
self.ui.graphWidget13.showGrid(x=True, y=True)
scatter13 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget13.addItem(scatter13)
scatter13.setData(g13)
# Graph14: Az-SNRWeight
self.ui.graphWidget14.setLabel("bottom", "Az (deg)", color="white", size=30)
self.ui.graphWidget14.setLabel("left", "SNRWeight", color="white", size=30)
self.ui.graphWidget14.showGrid(x=True, y=True)
scatter14 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget14.addItem(scatter14)
scatter14.setData(g14)
# Graph15: Time-SNRWeight
self.ui.graphWidget15.setLabel("left", "SNRWeight", color="white", size=30)
self.ui.graphWidget15.showGrid(x=True, y=True)
axis15 = DateAxisItem(orientation="bottom")
axis15.attachToPlotItem(self.ui.graphWidget15.getPlotItem())
axis15.setLabel("Time", units="h")
scatter15 = pg.ScatterPlotItem(brush=pg.mkBrush(width=5, color="w"), symbol="o")
self.ui.graphWidget15.addItem(scatter15)
scatter15.setData(g15)
self.lrAlt.sigRegionChanged.connect(self.updatePlotAlt)
self.ui.graphWidget4.sigXRangeChanged.connect(self.updateRegionAlt)
self.ui.graphWidget7.sigXRangeChanged.connect(self.updateRegionAlt)
self.ui.graphWidget10.sigXRangeChanged.connect(self.updateRegionAlt)
self.ui.graphWidget13.sigXRangeChanged.connect(self.updateRegionAlt)
self.lrAz.sigRegionChanged.connect(self.updatePlotAz)
self.ui.graphWidget5.sigXRangeChanged.connect(self.updateRegionAz)
self.ui.graphWidget8.sigXRangeChanged.connect(self.updateRegionAz)
self.ui.graphWidget11.sigXRangeChanged.connect(self.updateRegionAz)
self.ui.graphWidget14.sigXRangeChanged.connect(self.updateRegionAz)
self.lrDate.sigRegionChanged.connect(self.updatePlotDate)
self.ui.graphWidget6.sigXRangeChanged.connect(self.updateRegionDate)
self.ui.graphWidget9.sigXRangeChanged.connect(self.updateRegionDate)
self.ui.graphWidget12.sigXRangeChanged.connect(self.updateRegionDate)
self.ui.graphWidget15.sigXRangeChanged.connect(self.updateRegionDate)
def updatePlotAlt(self):
self.ui.graphWidget4.setXRange(*self.lrAlt.getRegion(), padding=0)
self.ui.graphWidget7.setXRange(*self.lrAlt.getRegion(), padding=0)
self.ui.graphWidget10.setXRange(*self.lrAlt.getRegion(), padding=0)
self.ui.graphWidget13.setXRange(*self.lrAlt.getRegion(), padding=0)
def updateRegionAlt(self, region):
self.lrAlt.setRegion(self.ui.graphWidget4.getViewBox().viewRange()[0])
def updatePlotAz(self):
self.ui.graphWidget5.setXRange(*self.lrAz.getRegion(), padding=0)
self.ui.graphWidget8.setXRange(*self.lrAz.getRegion(), padding=0)
self.ui.graphWidget11.setXRange(*self.lrAz.getRegion(), padding=0)
self.ui.graphWidget14.setXRange(*self.lrAz.getRegion(), padding=0)
def updateRegionAz(self, region):
self.lrAz.setRegion(self.ui.graphWidget5.getViewBox().viewRange()[0])
def updatePlotDate(self):
self.ui.graphWidget6.setXRange(*self.lrDate.getRegion(), padding=0)
self.ui.graphWidget9.setXRange(*self.lrDate.getRegion(), padding=0)
self.ui.graphWidget12.setXRange(*self.lrDate.getRegion(), padding=0)
self.ui.graphWidget15.setXRange(*self.lrDate.getRegion(), padding=0)
def updateRegionDate(self):
self.lrDate.setRegion(self.ui.graphWidget6.getViewBox().viewRange()[0])
|
{"hexsha": "7c6be6cf967db9ec28aef0f9a1ff9df2b29f12d7", "size": 15964, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrodom/chartWindow.py", "max_stars_repo_name": "fenriques/AstroDom", "max_stars_repo_head_hexsha": "84b54d3299cf591c39b214248339a201ae8ae6ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-05-17T14:57:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-20T12:29:43.000Z", "max_issues_repo_path": "astrodom/chartWindow.py", "max_issues_repo_name": "fenriques/AstroDom", "max_issues_repo_head_hexsha": "84b54d3299cf591c39b214248339a201ae8ae6ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-04T20:49:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-04T12:35:07.000Z", "max_forks_repo_path": "astrodom/chartWindow.py", "max_forks_repo_name": "fenriques/AstroDom", "max_forks_repo_head_hexsha": "84b54d3299cf591c39b214248339a201ae8ae6ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8571428571, "max_line_length": 88, "alphanum_fraction": 0.6336757705, "include": true, "reason": "import numpy", "num_tokens": 4024}
|
import git
import numpy as np
import os
import argparse
import sys
import json
import torch
from utils.spec_reader import SpecReader
from policy_gradients import models
from policy_gradients.agent import Trainer
from cox.store import Store, schema_from_dict
# Tee object allows for logging to both stdout and to file
class Tee(object):
def __init__(self, file_path, stream_type, mode='a'):
assert stream_type in ['stdout', 'stderr']
self.file = open(file_path, mode)
self.stream_type = stream_type
self.errors = 'chill'
if stream_type == 'stdout':
self.stream = sys.stdout
sys.stdout = self
else:
self.stream = sys.stderr
sys.stderr = self
def write(self, data):
self.file.write(data)
self.stream.write(data)
def flush(self):
self.file.flush()
self.stream.flush()
def main(params):
for k, v in zip(params.keys(), params.values()):
assert v is not None, f"Value for {k} is None"
# #
# Setup logging
# #
metadata_schema = schema_from_dict(params)
base_directory = params['out_dir']
store = Store(base_directory)
# redirect stderr, stdout to file
"""
def make_err_redirector(stream_name):
tee = Tee(os.path.join(store.path, stream_name + '.txt'), stream_name)
return tee
stderr_tee = make_err_redirector('stderr')
stdout_tee = make_err_redirector('stdout')
"""
# Store the experiment path and the git commit for this experiment
metadata_schema.update({
'store_path':str,
'git_commit':str
})
repo = git.Repo(path=os.path.dirname(os.path.realpath(__file__)),
search_parent_directories=True)
metadata_table = store.add_table('metadata', metadata_schema)
metadata_table.update_row(params)
metadata_table.update_row({
'store_path':store.path,
'git_commit':repo.head.object.hexsha
})
metadata_table.flush_row()
# Table for checkpointing models and envs
if params['save_iters'] > 0:
store.add_table('checkpoints', {
'val_model':store.PYTORCH_STATE,
'policy_model':store.PYTORCH_STATE,
'envs':store.PICKLE,
'policy_opt': store.PYTORCH_STATE,
'val_opt': store.PYTORCH_STATE,
'iteration':int
})
# The trainer object is in charge of sampling trajectories and
# taking PPO/TRPO optimization steps
p = Trainer.agent_from_params(params, store=store)
rewards = []
# Table for final results
final_table = store.add_table('final_results', {
'iteration':int,
'5_rewards':float,
'terminated_early':bool
})
def finalize_table(iteration, terminated_early, rewards):
final_5_rewards = np.array(rewards)[-5:].mean()
final_table.append_row({
'iteration':iteration,
'5_rewards':final_5_rewards,
'terminated_early':terminated_early
})
# Try-except so that we save if the user interrupts the process
try:
for i in range(params['train_steps']):
print('Step %d' % (i,))
if params['save_iters'] > 0 and i % params['save_iters'] == 0:
store['checkpoints'].append_row({
'iteration':i,
'val_model': p.val_model.state_dict(),
'policy_model': p.policy_model.state_dict(),
'policy_opt': p.POLICY_ADAM.state_dict(),
'val_opt': p.val_opt.state_dict(),
'envs':p.envs
})
mean_reward = p.train_step()
rewards.append(mean_reward)
finalize_table(i, False, rewards)
except KeyboardInterrupt:
torch.save(p.val_model, 'saved_experts/%s-expert-vf' % (params['game'],))
torch.save(p.policy_model, 'saved_experts/%s-expert-pol' % (params['game'],))
finalize_table(i, True, rewards)
store.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate experiments to be run.')
#Added argument:
parser.add_argument('--default_config_path', type=str, default='specs/train_defaut_config.py')
# Basic setup
# parser.add_argument('--config-path', type=str, required=True,
# help='json for this config')
parser.add_argument('--config-path', type=str, default='BabyAI.json',
help='json for this config')
parser.add_argument('--game', type=str, help='gym game')
parser.add_argument('--mode', type=str, choices=['ppo', 'trpo'],
help='pg alg')
parser.add_argument('--out-dir', type=str,
help='out dir for store + logging')
parser.add_argument('--advanced-logging', type=bool, const=True, nargs='?')
parser.add_argument('--kl-approximation-iters', type=int,
help='how often to do kl approx exps')
parser.add_argument('--log-every', type=int)
parser.add_argument('--policy-net-type', type=str,
choices=models.POLICY_NETS.keys())
parser.add_argument('--value-net-type', type=str,
choices=models.VALUE_NETS.keys())
parser.add_argument('--train-steps', type=int,
help='num agent training steps')
parser.add_argument('--cpu', type=bool, const=True, nargs='?')
# Which value loss to use
parser.add_argument('--value-calc', type=str,
help='which value calculation to use')
parser.add_argument('--initialization', type=str)
# General Policy Gradient parameters
parser.add_argument('--num-actors', type=int, help='num actors (serial)',
choices=[1])
parser.add_argument('--t', type=int,
help='num timesteps to run each actor for')
parser.add_argument('--gamma', type=float, help='discount on reward')
parser.add_argument('--lambda', type=float, help='GAE hyperparameter')
parser.add_argument('--val-lr', type=float, help='value fn learning rate')
parser.add_argument('--val-epochs', type=int, help='value fn epochs')
# PPO parameters
parser.add_argument('--adam-eps', type=float, choices=[0, 1e-5], help='adam eps parameter')
parser.add_argument('--num-minibatches',type=int,
help='num minibatches in ppo per epoch')
parser.add_argument('--ppo-epochs', type=int)
parser.add_argument('--ppo-lr', type=float,
help='if nonzero, use gradient descent w this lr')
parser.add_argument('--ppo-lr-adam', type=float,
help='if nonzero, use adam with this lr')
parser.add_argument('--anneal-lr', type=bool,
help='if we should anneal lr linearly from start to finish')
parser.add_argument('--clip-eps', type=float, help='ppo clipping')
parser.add_argument('--entropy-coeff', type=float,
help='entropy weight hyperparam')
parser.add_argument('--value-clipping', type=bool,
help='should clip values (w/ ppo eps)')
parser.add_argument('--value-multiplier', type=float,
help='coeff for value loss in combined step ppo loss')
parser.add_argument('--share-weights', type=bool,
help='share weights in valnet and polnet')
parser.add_argument('--clip-grad-norm', type=float,
help='gradient norm clipping (-1 for no clipping)')
# TRPO parameters
parser.add_argument('--max-kl', type=float, help='trpo max kl hparam')
parser.add_argument('--max-kl-final', type=float, help='trpo max kl final')
parser.add_argument('--fisher-frac-samples', type=float,
help='frac samples to use in fisher vp estimate')
parser.add_argument('--cg-steps', type=int,
help='num cg steps in fisher vp estimate')
parser.add_argument('--damping', type=float, help='damping to use in cg')
parser.add_argument('--max-backtrack', type=int, help='max bt steps in fvp')
# Normalization parameters
parser.add_argument('--norm-rewards', type=str, help='type of rewards normalization',
choices=['rewards', 'returns', 'none'])
parser.add_argument('--norm-states', type=bool, help='should norm states')
parser.add_argument('--clip-rewards', type=float, help='clip rews eps')
parser.add_argument('--clip-observations', type=float, help='clips obs eps')
# Saving
parser.add_argument('--save-iters', type=int, help='how often to save model (0 = no saving)')
args = parser.parse_args()
args_tmp = vars(args)
SpecReader(args_tmp['default_config_path'])
from utils.spec_reader import spec
spec.set_vals(args_tmp)
# For grid searches only
# parser.add_argument('--cox-experiment-path', type=str, default='')
json_params = json.load(open(args.config_path))
# Override the JSON config with the argparse config
params = vars(args)
json_params.update({k: params[k] for k in params if params[k] is not None})
params = json_params
missing_keys = []
for key in json_params:
if key not in params:
missing_keys.append(key)
assert not missing_keys, "Following keys not in args: " + str(missing_keys)
missing_keys = []
for key in params:
if key not in json_params and key != "config_path":
missing_keys.append(key)
assert not missing_keys, "Following keys not in JSON: " + str(missing_keys)
main(params)
|
{"hexsha": "6a167e0df80b4dcc0003c7c9ed215c4ad80e5683", "size": 9699, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/run.py", "max_stars_repo_name": "tristan-ka/ppo-wmg", "max_stars_repo_head_hexsha": "e26ab78ab77cc6f42cb24e03f71a3315489478f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/run.py", "max_issues_repo_name": "tristan-ka/ppo-wmg", "max_issues_repo_head_hexsha": "e26ab78ab77cc6f42cb24e03f71a3315489478f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/run.py", "max_forks_repo_name": "tristan-ka/ppo-wmg", "max_forks_repo_head_hexsha": "e26ab78ab77cc6f42cb24e03f71a3315489478f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-24T15:58:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-24T15:58:16.000Z", "avg_line_length": 37.7392996109, "max_line_length": 98, "alphanum_fraction": 0.6195484071, "include": true, "reason": "import numpy", "num_tokens": 2149}
|
(* Title: OInvariants.thy
License: BSD 2-Clause. See LICENSE.
Author: Timothy Bourke
*)
section "Open reachability and invariance"
theory OInvariants
imports Invariants
begin
subsection "Open reachability"
text \<open>
By convention, the states of an open automaton are pairs. The first component is considered
to be the global state and the second is the local state.
A state is `open reachable' under @{term S} and @{term U} if it is the initial state, or it
is the destination of a transition---where the global components satisfy @{term S}---from an
open reachable state, or it is the destination of an interleaved environment step where the
global components satisfy @{term U}.
\<close>
inductive_set oreachable
:: "('g \<times> 'l, 'a) automaton
\<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> 'a \<Rightarrow> bool)
\<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> bool)
\<Rightarrow> ('g \<times> 'l) set"
for A :: "('g \<times> 'l, 'a) automaton"
and S :: "'g \<Rightarrow> 'g \<Rightarrow> 'a \<Rightarrow> bool"
and U :: "'g \<Rightarrow> 'g \<Rightarrow> bool"
where
oreachable_init: "s \<in> init A \<Longrightarrow> s \<in> oreachable A S U"
| oreachable_local: "\<lbrakk> s \<in> oreachable A S U; (s, a, s') \<in> trans A; S (fst s) (fst s') a \<rbrakk>
\<Longrightarrow> s' \<in> oreachable A S U"
| oreachable_other: "\<lbrakk> s \<in> oreachable A S U; U (fst s) \<sigma>' \<rbrakk>
\<Longrightarrow> (\<sigma>', snd s) \<in> oreachable A S U"
lemma oreachable_local' [elim]:
assumes "(\<sigma>, p) \<in> oreachable A S U"
and "((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A"
and "S \<sigma> \<sigma>' a"
shows "(\<sigma>', p') \<in> oreachable A S U"
using assms by (metis fst_conv oreachable.oreachable_local)
lemma oreachable_other' [elim]:
assumes "(\<sigma>, p) \<in> oreachable A S U"
and "U \<sigma> \<sigma>'"
shows "(\<sigma>', p) \<in> oreachable A S U"
proof -
from \<open>U \<sigma> \<sigma>'\<close> have "U (fst (\<sigma>, p)) \<sigma>'" by simp
with \<open>(\<sigma>, p) \<in> oreachable A S U\<close> have "(\<sigma>', snd (\<sigma>, p)) \<in> oreachable A S U"
by (rule oreachable_other)
thus "(\<sigma>', p) \<in> oreachable A S U" by simp
qed
lemma oreachable_pair_induct [consumes, case_names init other local]:
assumes "(\<sigma>, p) \<in> oreachable A S U"
and "\<And>\<sigma> p. (\<sigma>, p) \<in> init A \<Longrightarrow> P \<sigma> p"
and "(\<And>\<sigma> p \<sigma>'. \<lbrakk> (\<sigma>, p) \<in> oreachable A S U; P \<sigma> p; U \<sigma> \<sigma>' \<rbrakk> \<Longrightarrow> P \<sigma>' p)"
and "(\<And>\<sigma> p \<sigma>' p' a. \<lbrakk> (\<sigma>, p) \<in> oreachable A S U; P \<sigma> p;
((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A; S \<sigma> \<sigma>' a \<rbrakk> \<Longrightarrow> P \<sigma>' p')"
shows "P \<sigma> p"
using assms (1) proof (induction "(\<sigma>, p)" arbitrary: \<sigma> p)
fix \<sigma> p
assume "(\<sigma>, p) \<in> init A"
with assms(2) show "P \<sigma> p" .
next
fix s \<sigma>'
assume "s \<in> oreachable A S U"
and "U (fst s) \<sigma>'"
and IH: "\<And>\<sigma> p. s = (\<sigma>, p) \<Longrightarrow> P \<sigma> p"
from this(1) obtain \<sigma> p where "s = (\<sigma>, p)"
and "(\<sigma>, p) \<in> oreachable A S U"
by (metis surjective_pairing)
note this(2)
moreover from IH and \<open>s = (\<sigma>, p)\<close> have "P \<sigma> p" .
moreover from \<open>U (fst s) \<sigma>'\<close> and \<open>s = (\<sigma>, p)\<close> have "U \<sigma> \<sigma>'" by simp
ultimately have "P \<sigma>' p" by (rule assms(3))
with \<open>s = (\<sigma>, p)\<close> show "P \<sigma>' (snd s)" by simp
next
fix s a \<sigma>' p'
assume "s \<in> oreachable A S U"
and tr: "(s, a, (\<sigma>', p')) \<in> trans A"
and "S (fst s) (fst (\<sigma>', p')) a"
and IH: "\<And>\<sigma> p. s = (\<sigma>, p) \<Longrightarrow> P \<sigma> p"
from this(1) obtain \<sigma> p where "s = (\<sigma>, p)"
and "(\<sigma>, p) \<in> oreachable A S U"
by (metis surjective_pairing)
note this(2)
moreover from IH \<open>s = (\<sigma>, p)\<close> have "P \<sigma> p" .
moreover from tr and \<open>s = (\<sigma>, p)\<close> have "((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A" by simp
moreover from \<open>S (fst s) (fst (\<sigma>', p')) a\<close> and \<open>s = (\<sigma>, p)\<close> have "S \<sigma> \<sigma>' a" by simp
ultimately show "P \<sigma>' p'" by (rule assms(4))
qed
lemma oreachable_weakenE [elim]:
assumes "s \<in> oreachable A PS PU"
and PSQS: "\<And>s s' a. PS s s' a \<Longrightarrow> QS s s' a"
and PUQU: "\<And>s s'. PU s s' \<Longrightarrow> QU s s'"
shows "s \<in> oreachable A QS QU"
using assms(1)
proof (induction)
fix s assume "s \<in> init A"
thus "s \<in> oreachable A QS QU" ..
next
fix s a s'
assume "s \<in> oreachable A QS QU"
and "(s, a, s') \<in> trans A"
and "PS (fst s) (fst s') a"
from \<open>PS (fst s) (fst s') a\<close> have "QS (fst s) (fst s') a" by (rule PSQS)
with \<open>s \<in> oreachable A QS QU\<close> and \<open>(s, a, s') \<in> trans A\<close> show "s' \<in> oreachable A QS QU" ..
next
fix s g'
assume "s \<in> oreachable A QS QU"
and "PU (fst s) g'"
from \<open>PU (fst s) g'\<close> have "QU (fst s) g'" by (rule PUQU)
with \<open>s \<in> oreachable A QS QU\<close> show "(g', snd s) \<in> oreachable A QS QU" ..
qed
definition
act :: "('a \<Rightarrow> bool) \<Rightarrow> 's \<Rightarrow> 's \<Rightarrow> 'a \<Rightarrow> bool"
where
"act I \<equiv> (\<lambda>_ _. I)"
lemma act_simp [iff]: "act I s s' a = I a"
unfolding act_def ..
lemma reachable_in_oreachable [elim]:
fixes s
assumes "s \<in> reachable A I"
shows "s \<in> oreachable A (act I) U"
unfolding act_def using assms proof induction
fix s
assume "s \<in> init A"
thus "s \<in> oreachable A (\<lambda>_ _. I) U" ..
next
fix s a s'
assume "s \<in> oreachable A (\<lambda>_ _. I) U"
and "(s, a, s') \<in> trans A"
and "I a"
thus "s' \<in> oreachable A (\<lambda>_ _. I) U"
by (rule oreachable_local)
qed
subsection "Open Invariance"
definition oinvariant
:: "('g \<times> 'l, 'a) automaton
\<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> bool)
\<Rightarrow> (('g \<times> 'l) \<Rightarrow> bool) \<Rightarrow> bool"
("_ \<Turnstile> (1'((1_),/ (1_) \<rightarrow>')/ _)" [100, 0, 0, 9] 8)
where
"(A \<Turnstile> (S, U \<rightarrow>) P) = (\<forall>s\<in>oreachable A S U. P s)"
lemma oinvariantI [intro]:
fixes T TI S U P
assumes init: "\<And>s. s \<in> init A \<Longrightarrow> P s"
and other: "\<And>g g' l.
\<lbrakk> (g, l) \<in> oreachable A S U; P (g, l); U g g' \<rbrakk> \<Longrightarrow> P (g', l)"
and local: "\<And>s a s'.
\<lbrakk> s \<in> oreachable A S U; P s; (s, a, s') \<in> trans A; S (fst s) (fst s') a \<rbrakk> \<Longrightarrow> P s'"
shows "A \<Turnstile> (S, U \<rightarrow>) P"
unfolding oinvariant_def
proof
fix s
assume "s \<in> oreachable A S U"
thus "P s"
proof induction
fix s assume "s \<in> init A"
thus "P s" by (rule init)
next
fix s a s'
assume "s \<in> oreachable A S U"
and "P s"
and "(s, a, s') \<in> trans A"
and "S (fst s) (fst s') a"
thus "P s'" by (rule local)
next
fix s g'
assume "s \<in> oreachable A S U"
and "P s"
and "U (fst s) g'"
thus "P (g', snd s)"
by - (rule other [where g="fst s"], simp_all)
qed
qed
lemma oinvariant_oreachableI:
assumes "\<And>\<sigma> s. (\<sigma>, s)\<in>oreachable A S U \<Longrightarrow> P (\<sigma>, s)"
shows "A \<Turnstile> (S, U \<rightarrow>) P"
using assms unfolding oinvariant_def by auto
lemma oinvariant_pairI [intro]:
assumes init: "\<And>\<sigma> p. (\<sigma>, p) \<in> init A \<Longrightarrow> P (\<sigma>, p)"
and local: "\<And>\<sigma> p \<sigma>' p' a.
\<lbrakk> (\<sigma>, p) \<in> oreachable A S U; P (\<sigma>, p); ((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A;
S \<sigma> \<sigma>' a \<rbrakk> \<Longrightarrow> P (\<sigma>', p')"
and other: "\<And>\<sigma> \<sigma>' p.
\<lbrakk> (\<sigma>, p) \<in> oreachable A S U; P (\<sigma>, p); U \<sigma> \<sigma>' \<rbrakk> \<Longrightarrow> P (\<sigma>', p)"
shows "A \<Turnstile> (S, U \<rightarrow>) P"
by (rule oinvariantI)
(clarsimp | erule init | erule(3) local | erule(2) other)+
lemma oinvariantD [dest]:
assumes "A \<Turnstile> (S, U \<rightarrow>) P"
and "s \<in> oreachable A S U"
shows "P s"
using assms unfolding oinvariant_def
by clarsimp
lemma oinvariant_initD [dest, elim]:
assumes invP: "A \<Turnstile> (S, U \<rightarrow>) P"
and init: "s \<in> init A"
shows "P s"
proof -
from init have "s \<in> oreachable A S U" ..
with invP show ?thesis ..
qed
lemma oinvariant_weakenE [elim!]:
assumes invP: "A \<Turnstile> (PS, PU \<rightarrow>) P"
and PQ: "\<And>s. P s \<Longrightarrow> Q s"
and QSPS: "\<And>s s' a. QS s s' a \<Longrightarrow> PS s s' a"
and QUPU: "\<And>s s'. QU s s' \<Longrightarrow> PU s s'"
shows "A \<Turnstile> (QS, QU \<rightarrow>) Q"
proof
fix s
assume "s \<in> init A"
with invP have "P s" ..
thus "Q s" by (rule PQ)
next
fix \<sigma> p \<sigma>' p' a
assume "(\<sigma>, p) \<in> oreachable A QS QU"
and "((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A"
and "QS \<sigma> \<sigma>' a"
from this(3) have "PS \<sigma> \<sigma>' a" by (rule QSPS)
from \<open>(\<sigma>, p) \<in> oreachable A QS QU\<close> and QSPS QUPU have "(\<sigma>, p) \<in> oreachable A PS PU" ..
hence "(\<sigma>', p') \<in> oreachable A PS PU" using \<open>((\<sigma>, p), a, (\<sigma>', p')) \<in> trans A\<close> and \<open>PS \<sigma> \<sigma>' a\<close> ..
with invP have "P (\<sigma>', p')" ..
thus "Q (\<sigma>', p')" by (rule PQ)
next
fix \<sigma> \<sigma>' p
assume "(\<sigma>, p) \<in> oreachable A QS QU"
and "Q (\<sigma>, p)"
and "QU \<sigma> \<sigma>'"
from \<open>QU \<sigma> \<sigma>'\<close> have "PU \<sigma> \<sigma>'" by (rule QUPU)
from \<open>(\<sigma>, p) \<in> oreachable A QS QU\<close> and QSPS QUPU have "(\<sigma>, p) \<in> oreachable A PS PU" ..
hence "(\<sigma>', p) \<in> oreachable A PS PU" using \<open>PU \<sigma> \<sigma>'\<close> ..
with invP have "P (\<sigma>', p)" ..
thus "Q (\<sigma>', p)" by (rule PQ)
qed
lemma oinvariant_weakenD [dest]:
assumes "A \<Turnstile> (S', U' \<rightarrow>) P"
and "(\<sigma>, p) \<in> oreachable A S U"
and weakenS: "\<And>s s' a. S s s' a \<Longrightarrow> S' s s' a"
and weakenU: "\<And>s s'. U s s' \<Longrightarrow> U' s s'"
shows "P (\<sigma>, p)"
proof -
from \<open>(\<sigma>, p) \<in> oreachable A S U\<close> have "(\<sigma>, p) \<in> oreachable A S' U'"
by (rule oreachable_weakenE)
(erule weakenS, erule weakenU)
with \<open>A \<Turnstile> (S', U' \<rightarrow>) P\<close> show "P (\<sigma>, p)" ..
qed
lemma close_open_invariant:
assumes oinv: "A \<Turnstile> (act I, U \<rightarrow>) P"
shows "A \<TTurnstile> (I \<rightarrow>) P"
proof
fix s
assume "s \<in> init A"
with oinv show "P s" ..
next
fix \<xi> p \<xi>' p' a
assume sr: "(\<xi>, p) \<in> reachable A I"
and step: "((\<xi>, p), a, (\<xi>', p')) \<in> trans A"
and "I a"
hence "(\<xi>', p') \<in> reachable A I" ..
hence "(\<xi>', p') \<in> oreachable A (act I) U" ..
with oinv show "P (\<xi>', p')" ..
qed
definition local_steps :: "((('i \<Rightarrow> 's1) \<times> 'l1) \<times> 'a \<times> ('i \<Rightarrow> 's2) \<times> 'l2) set \<Rightarrow> 'i set \<Rightarrow> bool"
where "local_steps T J \<equiv>
(\<forall>\<sigma> \<zeta> s a \<sigma>' s'. ((\<sigma>, s), a, (\<sigma>', s')) \<in> T \<and> (\<forall>j\<in>J. \<zeta> j = \<sigma> j)
\<longrightarrow> (\<exists>\<zeta>'. (\<forall>j\<in>J. \<zeta>' j = \<sigma>' j) \<and> ((\<zeta>, s), a, (\<zeta>', s')) \<in> T))"
lemma local_stepsI [intro!]:
assumes "\<And>\<sigma> \<zeta> s a \<sigma>' \<zeta>' s'. \<lbrakk> ((\<sigma>, s), a, (\<sigma>', s')) \<in> T; \<forall>j\<in>J. \<zeta> j = \<sigma> j \<rbrakk>
\<Longrightarrow> (\<exists>\<zeta>'. (\<forall>j\<in>J. \<zeta>' j = \<sigma>' j) \<and> ((\<zeta>, s), a, (\<zeta>', s')) \<in> T)"
shows "local_steps T J"
unfolding local_steps_def using assms by clarsimp
lemma local_stepsE [elim, dest]:
assumes "local_steps T J"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> T"
and "\<forall>j\<in>J. \<zeta> j = \<sigma> j"
shows "\<exists>\<zeta>'. (\<forall>j\<in>J. \<zeta>' j = \<sigma>' j) \<and> ((\<zeta>, s), a, (\<zeta>', s')) \<in> T"
using assms unfolding local_steps_def by blast
definition other_steps :: "(('i \<Rightarrow> 's) \<Rightarrow> ('i \<Rightarrow> 's) \<Rightarrow> bool) \<Rightarrow> 'i set \<Rightarrow> bool"
where "other_steps U J \<equiv> \<forall>\<sigma> \<sigma>'. U \<sigma> \<sigma>' \<longrightarrow> (\<forall>j\<in>J. \<sigma>' j = \<sigma> j)"
lemma other_stepsI [intro!]:
assumes "\<And>\<sigma> \<sigma>' j. \<lbrakk> U \<sigma> \<sigma>'; j \<in> J \<rbrakk> \<Longrightarrow> \<sigma>' j = \<sigma> j"
shows "other_steps U J"
using assms unfolding other_steps_def by simp
lemma other_stepsE [elim]:
assumes "other_steps U J"
and "U \<sigma> \<sigma>'"
shows "\<forall>j\<in>J. \<sigma>' j = \<sigma> j"
using assms unfolding other_steps_def by simp
definition subreachable
where "subreachable A U J \<equiv> \<forall>I. \<forall>s \<in> oreachable A (\<lambda>s s'. I) U.
(\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I)"
lemma subreachableI [intro]:
assumes "local_steps (trans A) J"
and "other_steps U J"
shows "subreachable A U J"
unfolding subreachable_def
proof (rule, rule)
fix I s
assume "s \<in> oreachable A (\<lambda>s s'. I) U"
thus "(\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I)"
proof induction
fix s
assume "s \<in> init A"
hence "(fst s, snd s) \<in> reachable A I"
by simp (rule reachable_init)
moreover have "\<forall>j\<in>J. (fst s) j = (fst s) j"
by simp
ultimately show "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I"
by auto
next
fix s a s'
assume "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I"
and "(s, a, s') \<in> trans A"
and "I a"
then obtain \<zeta> where "\<forall>j\<in>J. \<zeta> j = (fst s) j"
and "(\<zeta>, snd s) \<in> reachable A I" by auto
from \<open>(s, a, s') \<in> trans A\<close> have "((fst s, snd s), a, (fst s', snd s')) \<in> trans A"
by simp
with \<open>local_steps (trans A) J\<close> obtain \<zeta>' where "\<forall>j\<in>J. \<zeta>' j = (fst s') j"
and "((\<zeta>, snd s), a, (\<zeta>', snd s')) \<in> trans A"
using \<open>\<forall>j\<in>J. \<zeta> j = (fst s) j\<close> by - (drule(2) local_stepsE, clarsimp)
from \<open>(\<zeta>, snd s) \<in> reachable A I\<close>
and \<open>((\<zeta>, snd s), a, (\<zeta>', snd s')) \<in> trans A\<close>
and \<open>I a\<close>
have "(\<zeta>', snd s') \<in> reachable A I" ..
with \<open>\<forall>j\<in>J. \<zeta>' j = (fst s') j\<close>
show "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s') j) \<and> (\<sigma>, snd s') \<in> reachable A I" by auto
next
fix s \<sigma>'
assume "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I"
and "U (fst s) \<sigma>'"
then obtain \<sigma> where "\<forall>j\<in>J. \<sigma> j = (fst s) j"
and "(\<sigma>, snd s) \<in> reachable A I" by auto
from \<open>other_steps U J\<close> and \<open>U (fst s) \<sigma>'\<close> have "\<forall>j\<in>J. \<sigma>' j = (fst s) j"
by - (erule(1) other_stepsE)
with \<open>\<forall>j\<in>J. \<sigma> j = (fst s) j\<close> have "\<forall>j\<in>J. \<sigma> j = \<sigma>' j"
by clarsimp
with \<open>(\<sigma>, snd s) \<in> reachable A I\<close>
show "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = fst (\<sigma>', snd s) j) \<and> (\<sigma>, snd (\<sigma>', snd s)) \<in> reachable A I"
by auto
qed
qed
lemma subreachableE [elim]:
assumes "subreachable A U J"
and "s \<in> oreachable A (\<lambda>s s'. I) U"
shows "\<exists>\<sigma>. (\<forall>j\<in>J. \<sigma> j = (fst s) j) \<and> (\<sigma>, snd s) \<in> reachable A I"
using assms unfolding subreachable_def by simp
lemma subreachableE_pair [elim]:
assumes "subreachable A U J"
and "(\<sigma>, s) \<in> oreachable A (\<lambda>s s'. I) U"
shows "\<exists>\<zeta>. (\<forall>j\<in>J. \<zeta> j = \<sigma> j) \<and> (\<zeta>, s) \<in> reachable A I"
using assms unfolding subreachable_def by (metis fst_conv snd_conv)
lemma subreachable_otherE [elim]:
assumes "subreachable A U J"
and "(\<sigma>, l) \<in> oreachable A (\<lambda>s s'. I) U"
and "U \<sigma> \<sigma>'"
shows "\<exists>\<zeta>'. (\<forall>j\<in>J. \<zeta>' j = \<sigma>' j) \<and> (\<zeta>', l) \<in> reachable A I"
proof -
from \<open>(\<sigma>, l) \<in> oreachable A (\<lambda>s s'. I) U\<close> and \<open>U \<sigma> \<sigma>'\<close>
have "(\<sigma>', l) \<in> oreachable A (\<lambda>s s'. I) U"
by - (rule oreachable_other')
with \<open>subreachable A U J\<close> show ?thesis
by auto
qed
lemma oinvariant_anyact:
assumes "A \<Turnstile> (act TT, U \<rightarrow>) P"
shows "A \<Turnstile> (S, U \<rightarrow>) P"
using assms by rule auto
definition
ostep_invariant
:: "('g \<times> 'l, 'a) automaton
\<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('g \<Rightarrow> 'g \<Rightarrow> bool)
\<Rightarrow> (('g \<times> 'l, 'a) transition \<Rightarrow> bool) \<Rightarrow> bool"
("_ \<Turnstile>\<^sub>A (1'((1_),/ (1_) \<rightarrow>')/ _)" [100, 0, 0, 9] 8)
where
"(A \<Turnstile>\<^sub>A (S, U \<rightarrow>) P) =
(\<forall>s\<in>oreachable A S U. (\<forall>a s'. (s, a, s') \<in> trans A \<and> S (fst s) (fst s') a \<longrightarrow> P (s, a, s')))"
lemma ostep_invariant_def':
"(A \<Turnstile>\<^sub>A (S, U \<rightarrow>) P) = (\<forall>s\<in>oreachable A S U.
(\<forall>a s'. (s, a, s') \<in> trans A \<and> S (fst s) (fst s') a \<longrightarrow> P (s, a, s')))"
unfolding ostep_invariant_def by auto
lemma ostep_invariantI [intro]:
assumes *: "\<And>\<sigma> s a \<sigma>' s'. \<lbrakk> (\<sigma>, s)\<in>oreachable A S U; ((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A; S \<sigma> \<sigma>' a \<rbrakk>
\<Longrightarrow> P ((\<sigma>, s), a, (\<sigma>', s'))"
shows "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) P"
unfolding ostep_invariant_def
using assms by auto
lemma ostep_invariantD [dest]:
assumes "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) P"
and "(\<sigma>, s)\<in>oreachable A S U"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "S \<sigma> \<sigma>' a"
shows "P ((\<sigma>, s), a, (\<sigma>', s'))"
using assms unfolding ostep_invariant_def' by clarsimp
lemma ostep_invariantE [elim]:
assumes "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) P"
and "(\<sigma>, s)\<in>oreachable A S U"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "S \<sigma> \<sigma>' a"
and "P ((\<sigma>, s), a, (\<sigma>', s')) \<Longrightarrow> Q"
shows "Q"
using assms by auto
lemma ostep_invariant_weakenE [elim!]:
assumes invP: "A \<Turnstile>\<^sub>A (PS, PU \<rightarrow>) P"
and PQ: "\<And>t. P t \<Longrightarrow> Q t"
and QSPS: "\<And>\<sigma> \<sigma>' a. QS \<sigma> \<sigma>' a \<Longrightarrow> PS \<sigma> \<sigma>' a"
and QUPU: "\<And>\<sigma> \<sigma>'. QU \<sigma> \<sigma>' \<Longrightarrow> PU \<sigma> \<sigma>'"
shows "A \<Turnstile>\<^sub>A (QS, QU \<rightarrow>) Q"
proof
fix \<sigma> s \<sigma>' s' a
assume "(\<sigma>, s) \<in> oreachable A QS QU"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "QS \<sigma> \<sigma>' a"
from \<open>QS \<sigma> \<sigma>' a\<close> have "PS \<sigma> \<sigma>' a" by (rule QSPS)
from \<open>(\<sigma>, s) \<in> oreachable A QS QU\<close> have "(\<sigma>, s) \<in> oreachable A PS PU" using QSPS QUPU ..
with invP have "P ((\<sigma>, s), a, (\<sigma>', s'))" using \<open>((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A\<close> \<open>PS \<sigma> \<sigma>' a\<close> ..
thus "Q ((\<sigma>, s), a, (\<sigma>', s'))" by (rule PQ)
qed
lemma ostep_invariant_weaken_with_invariantE [elim]:
assumes pinv: "A \<Turnstile> (S, U \<rightarrow>) P"
and qinv: "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) Q"
and wr: "\<And>\<sigma> s a \<sigma>' s'. \<lbrakk> P (\<sigma>, s); P (\<sigma>', s'); Q ((\<sigma>, s), a, (\<sigma>', s')); S \<sigma> \<sigma>' a \<rbrakk>
\<Longrightarrow> R ((\<sigma>, s), a, (\<sigma>', s'))"
shows "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) R"
proof
fix \<sigma> s a \<sigma>' s'
assume sr: "(\<sigma>, s) \<in> oreachable A S U"
and tr: "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "S \<sigma> \<sigma>' a"
hence "(\<sigma>', s') \<in> oreachable A S U" ..
with pinv have "P (\<sigma>', s')" ..
from pinv and sr have "P (\<sigma>, s)" ..
from qinv sr tr \<open>S \<sigma> \<sigma>' a\<close> have "Q ((\<sigma>, s), a, (\<sigma>', s'))" ..
with \<open>P (\<sigma>, s)\<close> and \<open>P (\<sigma>', s')\<close> show "R ((\<sigma>, s), a, (\<sigma>', s'))" using \<open>S \<sigma> \<sigma>' a\<close> by (rule wr)
qed
lemma ostep_to_invariantI:
assumes sinv: "A \<Turnstile>\<^sub>A (S, U \<rightarrow>) Q"
and init: "\<And>\<sigma> s. (\<sigma>, s) \<in> init A \<Longrightarrow> P (\<sigma>, s)"
and local: "\<And>\<sigma> s \<sigma>' s' a.
\<lbrakk> (\<sigma>, s) \<in> oreachable A S U;
P (\<sigma>, s);
Q ((\<sigma>, s), a, (\<sigma>', s'));
S \<sigma> \<sigma>' a \<rbrakk> \<Longrightarrow> P (\<sigma>', s')"
and other: "\<And>\<sigma> \<sigma>' s. \<lbrakk> (\<sigma>, s) \<in> oreachable A S U; U \<sigma> \<sigma>'; P (\<sigma>, s) \<rbrakk> \<Longrightarrow> P (\<sigma>', s)"
shows "A \<Turnstile> (S, U \<rightarrow>) P"
proof
fix \<sigma> s assume "(\<sigma>, s) \<in> init A" thus "P (\<sigma>, s)" by (rule init)
next
fix \<sigma> s \<sigma>' s' a
assume "(\<sigma>, s) \<in> oreachable A S U"
and "P (\<sigma>, s)"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "S \<sigma> \<sigma>' a"
show "P (\<sigma>', s')"
proof -
from sinv and \<open>(\<sigma>, s)\<in>oreachable A S U\<close> and \<open>((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A\<close> and \<open>S \<sigma> \<sigma>' a\<close>
have "Q ((\<sigma>, s), a, (\<sigma>', s'))" ..
with \<open>(\<sigma>, s)\<in>oreachable A S U\<close> and \<open>P (\<sigma>, s)\<close> show "P (\<sigma>', s')"
using \<open>S \<sigma> \<sigma>' a\<close> by (rule local)
qed
next
fix \<sigma> \<sigma>' l
assume "(\<sigma>, l) \<in> oreachable A S U"
and "U \<sigma> \<sigma>'"
and "P (\<sigma>, l)"
thus "P (\<sigma>', l)" by (rule other)
qed
lemma open_closed_step_invariant:
assumes "A \<TTurnstile>\<^sub>A (I \<rightarrow>) P"
and "local_steps (trans A) J"
and "other_steps U J"
and localp: "\<And>\<sigma> \<zeta> a \<sigma>' \<zeta>' s s'.
\<lbrakk> \<forall>j\<in>J. \<sigma> j = \<zeta> j; \<forall>j\<in>J. \<sigma>' j = \<zeta>' j; P ((\<sigma>, s), a, (\<sigma>', s')) \<rbrakk>
\<Longrightarrow> P ((\<zeta>, s), a, (\<zeta>', s'))"
shows "A \<Turnstile>\<^sub>A (act I, U \<rightarrow>) P"
proof
fix \<sigma> s a \<sigma>' s'
assume or: "(\<sigma>, s) \<in> oreachable A (act I) U"
and tr: "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans A"
and "act I \<sigma> \<sigma>' a"
from \<open>act I \<sigma> \<sigma>' a\<close> have "I a" ..
from \<open>local_steps (trans A) J\<close> and \<open>other_steps U J\<close> have "subreachable A U J" ..
then obtain \<zeta> where "\<forall>j\<in>J. \<zeta> j = \<sigma> j"
and "(\<zeta>, s) \<in> reachable A I"
using or unfolding act_def
by (auto dest!: subreachableE_pair)
from \<open>local_steps (trans A) J\<close> and tr and \<open>\<forall>j\<in>J. \<zeta> j = \<sigma> j\<close>
obtain \<zeta>' where "\<forall>j\<in>J. \<zeta>' j = \<sigma>' j"
and "((\<zeta>, s), a, (\<zeta>', s')) \<in> trans A"
by auto
from \<open>A \<TTurnstile>\<^sub>A (I \<rightarrow>) P\<close> and \<open>(\<zeta>, s) \<in> reachable A I\<close>
and \<open>((\<zeta>, s), a, (\<zeta>', s')) \<in> trans A\<close>
and \<open>I a\<close>
have "P ((\<zeta>, s), a, (\<zeta>', s'))" ..
with \<open>\<forall>j\<in>J. \<zeta> j = \<sigma> j\<close> and \<open>\<forall>j\<in>J. \<zeta>' j = \<sigma>' j\<close> show "P ((\<sigma>, s), a, (\<sigma>', s'))"
by (rule localp)
qed
lemma oinvariant_step_anyact:
assumes "p \<Turnstile>\<^sub>A (act TT, U \<rightarrow>) P"
shows "p \<Turnstile>\<^sub>A (S, U \<rightarrow>) P"
using assms by rule auto
subsection "Standard assumption predicates "
text \<open>otherwith\<close>
definition otherwith :: "('s \<Rightarrow> 's \<Rightarrow> bool)
\<Rightarrow> 'i set
\<Rightarrow> (('i \<Rightarrow> 's) \<Rightarrow> 'a \<Rightarrow> bool)
\<Rightarrow> ('i \<Rightarrow> 's) \<Rightarrow> ('i \<Rightarrow> 's) \<Rightarrow> 'a \<Rightarrow> bool"
where "otherwith Q I P \<sigma> \<sigma>' a \<equiv> (\<forall>i. i\<notin>I \<longrightarrow> Q (\<sigma> i) (\<sigma>' i)) \<and> P \<sigma> a"
lemma otherwithI [intro]:
assumes other: "\<And>j. j\<notin>I \<Longrightarrow> Q (\<sigma> j) (\<sigma>' j)"
and sync: "P \<sigma> a"
shows "otherwith Q I P \<sigma> \<sigma>' a"
unfolding otherwith_def using assms by simp
lemma otherwithE [elim]:
assumes "otherwith Q I P \<sigma> \<sigma>' a"
and "\<lbrakk> P \<sigma> a; \<forall>j. j\<notin>I \<longrightarrow> Q (\<sigma> j) (\<sigma>' j) \<rbrakk> \<Longrightarrow> R \<sigma> \<sigma>' a"
shows "R \<sigma> \<sigma>' a"
using assms unfolding otherwith_def by simp
lemma otherwith_actionD [dest]:
assumes "otherwith Q I P \<sigma> \<sigma>' a"
shows "P \<sigma> a"
using assms by auto
lemma otherwith_syncD [dest]:
assumes "otherwith Q I P \<sigma> \<sigma>' a"
shows "\<forall>j. j\<notin>I \<longrightarrow> Q (\<sigma> j) (\<sigma>' j)"
using assms by auto
lemma otherwithEI [elim]:
assumes "otherwith P I PO \<sigma> \<sigma>' a"
and "\<And>\<sigma> a. PO \<sigma> a \<Longrightarrow> QO \<sigma> a"
shows "otherwith P I QO \<sigma> \<sigma>' a"
using assms(1) unfolding otherwith_def
by (clarsimp elim!: assms(2))
lemma all_but:
assumes "\<And>\<xi>. S \<xi> \<xi>"
and "\<sigma>' i = \<sigma> i"
and "\<forall>j. j \<noteq> i \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
shows "\<forall>j. S (\<sigma> j) (\<sigma>' j)"
using assms by metis
lemma all_but_eq [dest]:
assumes "\<sigma>' i = \<sigma> i"
and "\<forall>j. j \<noteq> i \<longrightarrow> \<sigma> j = \<sigma>' j"
shows "\<sigma> = \<sigma>'"
using assms by - (rule ext, metis)
text \<open>other\<close>
definition other :: "('s \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> 'i set \<Rightarrow> ('i \<Rightarrow> 's) \<Rightarrow> ('i \<Rightarrow> 's) \<Rightarrow> bool"
where "other P I \<sigma> \<sigma>' \<equiv> \<forall>i. if i\<in>I then \<sigma>' i = \<sigma> i else P (\<sigma> i) (\<sigma>' i)"
lemma otherI [intro]:
assumes local: "\<And>i. i\<in>I \<Longrightarrow> \<sigma>' i = \<sigma> i"
and other: "\<And>j. j\<notin>I \<Longrightarrow> P (\<sigma> j) (\<sigma>' j)"
shows "other P I \<sigma> \<sigma>'"
using assms unfolding other_def by clarsimp
lemma otherE [elim]:
assumes "other P I \<sigma> \<sigma>'"
and "\<lbrakk> \<forall>i\<in>I. \<sigma>' i = \<sigma> i; \<forall>j. j\<notin>I \<longrightarrow> P (\<sigma> j) (\<sigma>' j) \<rbrakk> \<Longrightarrow> R \<sigma> \<sigma>'"
shows "R \<sigma> \<sigma>'"
using assms unfolding other_def by simp
lemma other_localD [dest]:
"other P {i} \<sigma> \<sigma>' \<Longrightarrow> \<sigma>' i = \<sigma> i"
by auto
lemma other_otherD [dest]:
"other P {i} \<sigma> \<sigma>' \<Longrightarrow> \<forall>j. j\<noteq>i \<longrightarrow> P (\<sigma> j) (\<sigma>' j)"
by auto
lemma other_bothE [elim]:
assumes "other P {i} \<sigma> \<sigma>'"
obtains "\<sigma>' i = \<sigma> i" and "\<forall>j. j\<noteq>i \<longrightarrow> P (\<sigma> j) (\<sigma>' j)"
using assms by auto
lemma weaken_local [elim]:
assumes "other P I \<sigma> \<sigma>'"
and PQ: "\<And>\<xi> \<xi>'. P \<xi> \<xi>' \<Longrightarrow> Q \<xi> \<xi>'"
shows "other Q I \<sigma> \<sigma>'"
using assms unfolding other_def by auto
definition global :: "((nat \<Rightarrow> 's) \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> 's) \<times> 'local \<Rightarrow> bool"
where "global P \<equiv> (\<lambda>(\<sigma>, _). P \<sigma>)"
lemma globalsimp [simp]: "global P s = P (fst s)"
unfolding global_def by (simp split: prod.split)
definition globala :: "((nat \<Rightarrow> 's, 'action) transition \<Rightarrow> bool)
\<Rightarrow> ((nat \<Rightarrow> 's) \<times> 'local, 'action) transition \<Rightarrow> bool"
where "globala P \<equiv> (\<lambda>((\<sigma>, _), a, (\<sigma>', _)). P (\<sigma>, a, \<sigma>'))"
lemma globalasimp [simp]: "globala P s = P (fst (fst s), fst (snd s), fst (snd (snd s)))"
unfolding globala_def by (simp split: prod.split)
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/AWN/OInvariants.thy"}
|
#!/usr/bin/env python
import math
import average_vector
from scipy import spatial # Cosine similarity calculation
def getDistanceAverageEpsilonNeighborhoodAndNegative( source_word, eps_plus, eps_minus, model, np ):
"""
Get distance (angle by cosine similarity)
between 1. epsilon-neighborhood of word w (vector v)
and 2. epsilon-neighborhood of vector -v (negative mirror of word w)
Parameters
----------
source_word : String
Source word to be the center of epsilon-neighborhood of similar words.
eps_plus: float
filter to include into the positive neighborhood set only words, where dist(source_word, word) < Eps+, 0.3 too noisy... try 0.45
eps_minus: float
filter to include into the negative neighborhood set only words, where dist(source_word, word) < Eps-, try 0.3
model : object
Word2Vec model.
np : object
numpy library.
Returns
-------
float
Cosine (distance) between average vectors of two sets: positive set near vector v (i.e. word w) and negative set around -v.
0.0, if one of neighborhood sets is empty
"""
# 1. Find epsilon-neighborhood of word w (vector v)
# -> eps(w) = word_1, ... word_n1 (gets model.most_similar == top_n1 similar words, distance from w <= Epsilon)
# 2. Word w -> vector v -> vector -v -> word -w.
# 3. Find epsilon-neighborhood of word -w (vector -v)
# -> eps(-w) = -word_1, ... -word_n2 (gets model.most_similar == top_n similar words, distance from -w <= Epsilon)
# 4. sim( eps(w), eps(-w) ) =
# = model.n_similarity ( word_1, ... word_n1, -word_1, ... -word_n2) = result
# 1. Find epsilon-neighborhood of word w (vector v)
# -> eps(w) = word_1, ... word_n1 (gets model.most_similar == top_n1 similar words, distance from w <= Epsilon)
topn = 1; # 10;
most_similar_words_source = model.most_similar( source_word, [ ], topn)
#most_similar_words = lib.filter_vocab_words.filterVocabWordSimilarity( most_similar_words_source, model.vocab )
#print string_util.joinUtf8( ",", words ) # after filter, now there are only words with vectors
# debug: print similarity (to source_word) and word itself
most_similar_words = []
for sim_w in most_similar_words_source:
word = sim_w[0]
sim = sim_w[1]
if abs(sim) > eps_plus:
most_similar_words.append( sim_w )
# sim( eps(w), eps(-w) ) == 0, if one of neighborhood sets is empty
if 0 == len( most_similar_words ):
return 0.0
# 2. Word w -> vector v -> vector -v -> word -w.
# 3. Find epsilon-neighborhood of word -w (vector -v)
# -> eps(-w) = -word_1, ... -word_n2 (gets model.most_similar == top_n similar words, distance from -w <= Epsilon)
negative_similar_words = []
for positive_word in most_similar_words:
vector = model [ positive_word[0] ]
negative_v = np.negative( vector )
# debug: print huge nn-model vector
#print "vector = model[ word ] = {}".format( vector )
#print
#print "vector = model[ word ] = {}".format( negative_v )
negative_similar_words_source = model.most_similar( [ negative_v ], [], topn)
for sim_w in negative_similar_words_source:
word = sim_w[0]
sim = sim_w[1]
if abs(sim) > eps_minus:
negative_similar_words.append( sim_w )
# sim( eps(w), eps(-w) ) == 0, if one of neighborhood sets is empty
if 0 == len( negative_similar_words ):
return 0.0
# Print section
print
print u"Nearest words to the word: '{}'".format( source_word )
for sim_w in most_similar_words:
word = sim_w[0]
sim = sim_w[1]
print u"{} '{}'".format( sim, word )
print
print u"--- Nearest words to the negative vector (for each word in positive set):"
for sim_w in negative_similar_words:
word = sim_w[0]
sim = sim_w[1]
print u"{} '{}'".format( sim, word )
# 4. sim( eps(w), eps(-w) ) =
# = model.n_similarity ( word_1, ... word_n1, -word_1, ... -word_n2) = result
average_eps_positive = average_vector.getAverageVectorForModelWords( most_similar_words, model, np )
average_eps_negative = average_vector.getAverageVectorForModelWords( negative_similar_words, model, np )
result = 1 - spatial.distance.cosine( average_eps_positive, average_eps_negative )
print
print "Similarity from positive to negative set sim( eps(w), eps(-w) ) = {}".format( result )
print "---------------------------------------------------------------------\n"
return result
def getDistanceToNearestNegative( source_word, model, np, word_syn ):
"""
Get distance (angle by cosine similarity)
between 1. word w (vector v)
and 2. nearest vector (word) to vector -v (negative mirror of word w)
Parameters
----------
source_word : String it should be presented in RusVectores
model : object Word2Vec model
np : object numpy library
Returns
-------
float
Cosine (distance) between vector v (i.e. word w) and a word nearest to the negative vector -v.
"""
# 1. Get the word w (vector v)
# 2. Word w -> vector v -> vector -v -> word -w.
# 3. Find a word which has vector nearest to the vector -v (vector v of word w)
# -> v_near_negative = model.most_similar (top_n = 1, similar words, distance from -w <= Epsilon)
# 4. result = sim( v, v_near_negative )
v_word = model [ source_word.lower()] # +"_NOUN"
# debug: print huge nn-model vector
#print "vector = model[ word ] = {}".format( vector )
#print
#print "vector = model[ word ] = {}".format( negative_v )
# 2. Word w -> vector v -> vector -v -> word -w.
negative_v = np.negative( v_word )
# 3. Find a word which has vector nearest to the vector -v (vector v of word w)
# -> v_near_negative = model.most_similar (top_n = 1, similar words, distance from -w <= Epsilon)
# negative_similar_words = []
topn = 1
negative_similar_words = model.most_similar( [ negative_v ], [], topn)
# sim( v, similar( negative_v )) == 0, if one of neighborhood sets is empty
if 0 == len( negative_similar_words ):
return 0.0
# therefore len > 0
negative_nearest_word = negative_similar_words[0][0]
sim = negative_similar_words[0][1]
if negative_nearest_word not in word_syn: # if negative_nearest_word is normal word, then it is presented in synonyms of Russian Wiktionary
return 0.0
negative_nearest_v = model [ negative_nearest_word ]
#result = 1 - spatial.distance.cosine( v_word, negative_nearest_v )
gr1 = [source_word] * 1
gr2 = [negative_nearest_word] * 1
result = model.n_similarity(gr1, gr2 )
# Print section
#print u"Nearest({})=({}), sim(-v, -near word) = {}, sim(v, -near) = {}".format( source_word, negative_nearest_word, sim, result )
#print u"v near sim(-v, near) sim(v, near)" # .format( source_word, negative_nearest_word, sim, result )
print u"{} {} {} {}".format( source_word, negative_nearest_word, sim, result )
return result
|
{"hexsha": "01e82042c5ee93417c5e64979c5ae290c413a8a8", "size": 7467, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lib/epsilon_neighborhood.py", "max_stars_repo_name": "componavt/wcorpus.py", "max_stars_repo_head_hexsha": "4433c8de62ffb8e3c9cec6eb8a23dd64e5349700", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/epsilon_neighborhood.py", "max_issues_repo_name": "componavt/wcorpus.py", "max_issues_repo_head_hexsha": "4433c8de62ffb8e3c9cec6eb8a23dd64e5349700", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/epsilon_neighborhood.py", "max_forks_repo_name": "componavt/wcorpus.py", "max_forks_repo_head_hexsha": "4433c8de62ffb8e3c9cec6eb8a23dd64e5349700", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5079365079, "max_line_length": 145, "alphanum_fraction": 0.6109548681, "include": true, "reason": "from scipy", "num_tokens": 2010}
|
import random
import cv2
import numpy as np
import math
def ellipse_bbox(h, k, a, b, theta):
ux = a * math.cos(theta)
uy = a * math.sin(theta)
vx = b * math.cos(theta + math.pi / 2)
vy = b * math.sin(theta + math.pi / 2)
box_halfwidth = np.ceil(math.sqrt(ux ** 2 + vx ** 2))
box_halfheight = np.ceil(math.sqrt(uy ** 2 + vy ** 2))
return (int(h + box_halfwidth), int(k + box_halfheight))
# Rotated elliptical gradient - faster, vectorized numpy approach
def make_gradient_v2(width, height, h, k, a, b, theta):
# Precalculate constants
st, ct = math.sin(theta), math.cos(theta)
aa, bb = a ** 2, b ** 2
# Generate (x,y) coordinate arrays
y, x = np.mgrid[-k:height - k, -h:width - h]
# Calculate the weight for each pixel
weights = (((x * ct + y * st) ** 2) / aa) + (((x * st - y * ct) ** 2) / bb)
return np.clip(1.0 - weights, 0, 1)
def draw_snow(a, b, theta, channels):
# Calculate the image size needed to draw this and center the ellipse
(h, k) = ellipse_bbox(0, 0, a, b, theta) # Ellipse center
width, height = (h * 2, k * 2) # Canvas size
# Generate the gradient and scale it to 8bit grayscale range
intensity = np.uint8(make_gradient_v2(width, height, h, k, a, b, theta) * 255 * (0.2 + random.random() * 0.8))
# Turn it into a BGRA image
result = cv2.merge([intensity] * channels)
return result
def add_snow(img, pixels_per_snow_max=900, pixels_per_snow_min=700, max_size=3, min_size=1):
# reading the image
image = cv2.imread(img)
# resizing the image according to our need resize() function takes 2 parameters,
# the image and the dimensions
# Extracting the height and width of an image
rows, cols, channels = image.shape
area = rows * cols
snows = area // random.randint(pixels_per_snow_min, pixels_per_snow_max)
output = image.copy()
for _ in range(snows):
a, b = (random.randint(min_size, max_size), random.randint(min_size, max_size)) # Semi-major and semi-minor axis
theta = math.radians(90.0 * random.random()) # Ellipse rotation (radians)
snow = draw_snow(a, b, theta, channels)
# displaying the orignal image
x, y = random.randint(0, cols - snow.shape[1]), random.randint(0, rows - snow.shape[0])
w = np.ones_like(snow)
weighted_square = output[y:y + snow.shape[0], x:x + snow.shape[1], :] * (w - snow/255)
output[y:y + snow.shape[0], x:x + snow.shape[1], :] = np.add(weighted_square.astype(np.uint8), snow, dtype=np.uint8)
print(np.max(image))
# displaying the orignal image
cv2.imshow('Original', image)
# displaying the vignette filter image
cv2.imshow('VIGNETTE', output)
cv2.waitKey(0)
if __name__ == "__main__":
add_snow('0.png')
add_snow('0.png')
add_snow('0.png')
add_snow('0.png')
add_snow('1.png')
add_snow('1.png')
add_snow('1.png')
add_snow('1.png')
add_snow('2.png')
add_snow('2.png')
add_snow('2.png')
add_snow('2.png')
add_snow('3.png')
add_snow('3.png')
add_snow('3.png')
add_snow('3.png')
|
{"hexsha": "4e42172ee1b68c5e0b9d4f450dabf11fa2c6f4d1", "size": 3131, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/snow.py", "max_stars_repo_name": "LeikvollE/pytorch-superpoint", "max_stars_repo_head_hexsha": "52144a760e0cc46259e57397a5a55f0585fe6d0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/snow.py", "max_issues_repo_name": "LeikvollE/pytorch-superpoint", "max_issues_repo_head_hexsha": "52144a760e0cc46259e57397a5a55f0585fe6d0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/snow.py", "max_forks_repo_name": "LeikvollE/pytorch-superpoint", "max_forks_repo_head_hexsha": "52144a760e0cc46259e57397a5a55f0585fe6d0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2783505155, "max_line_length": 124, "alphanum_fraction": 0.6237623762, "include": true, "reason": "import numpy", "num_tokens": 957}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/5 10:42 AM
# @Author : Ject.Y
# @Site :
# @File : create_h5_cls.py
# @Software: PyCharm
# @Copyright: BSD2.0
# @Function : Genarate HDF5 data for Face classification
# How to run : run
import h5py, os
import numpy as np
import random
import cv2
# ============================= To be configure ==================================
POS_TXT = 'pos.txt' # postive label file path
NEG_TXT = 'neg.txt' # negative file path
HDF5_SAVE_PATH = 'HDF5' # path to save HDF5 files
SIZE = 24 # fixed size to all images
image_base_path = '' # image base path, image path = image base path + label path
counts = 2 #the number of training data files
counts2 = 2 #the number of validation data files
#==================================================================================
# read the pos txt
with open(POS_TXT, 'r') as T:
pos_lines = T.readlines()
# read neg txt
with open(NEG_TXT, 'r') as T_:
neg_lines = T_.readlines()
lines = []
pos_count = len(pos_lines)
neg_count = len(neg_lines)
if neg_count > (pos_count * 3):
neg_count = pos_count * 3
neg_lines = neg_lines[:neg_count]
lines.extend(pos_lines)
lines.extend(neg_lines)
# get the train and validate data size
random.shuffle(lines)
random.shuffle(lines)
random.shuffle(lines)
random.shuffle(lines)
random.shuffle(lines)
random.shuffle(lines)
# cout the training and validaition data
train_data = lines[:int((len(lines) * 0.8))]
val_data = lines[len(train_data):]
f_train = open('train_cls_h5.txt','w')
f_val = open('val_cls_h5.txt','w')
signCout = int(len(train_data) / float(counts))
numCount = 0
while numCount < counts:
startCount = numCount * signCout
endCount = startCount + signCout
if numCount == (counts - 1):
endCount = len(train_data)
train_datas = train_data[startCount:endCount]
train_X = np.zeros((len(train_datas), 3, SIZE, SIZE), dtype='f4')
train_y = np.zeros((len(train_datas), 1), dtype='f4')
for i, l in enumerate(train_datas):
if i % 1000 == 0:
print "Processing %dth image of training dataset %d" % ((i + 1), len(train_datas))
sp = l.strip().split(' ')
img = cv2.imread(
image_base_path + sp[0])
img = cv2.resize(img,(SIZE,SIZE))
img = (img - 127.5) / 127.5
transposed_img = img.transpose((2, 0, 1)) # RGB->BGR
train_X[i] = transposed_img
train_y[i] = float(sp[1])
print "Generate the training HDF5 images Dataset..."
with h5py.File(HDF5_SAVE_PATH + '/train_cls%d.h5'%numCount, 'w') as H:
H.create_dataset('X', data=train_X) # note the name X given to the dataset!
H.create_dataset('y', data=train_y) # note the name y given to the dataset!
f_train.write(HDF5_SAVE_PATH + '/train_cls%d.h5\n'%numCount)
numCount += 1
signCout = int(len(val_data) / float(counts2))
numCount = 0
while numCount < counts2:
startCount = numCount * signCout
endCount = startCount + signCout
if numCount == (counts2 - 1):
endCount = len(val_data)
val_datas = val_data[startCount:endCount]
val_X = np.zeros((len(val_datas), 3, SIZE, SIZE), dtype='f4')
val_y = np.zeros((len(val_datas), 1), dtype='f4')
for i, l in enumerate(val_datas):
if i % 1000 == 0:
print "Processing %dth image of validata dataset %d" % ((i + 1), len(val_datas))
sp = l.strip().split(' ')
img = cv2.imread(
image_base_path + sp[0])
img = cv2.resize(img, (SIZE, SIZE))
img = (img - 127.5) / 127.5
transposed_img = img.transpose((2, 0, 1)) # RGB->BGR
val_X[i] = transposed_img
val_y[i] = float(sp[1])
print "Generate the validation HDF5 images Dataset..."
with h5py.File(HDF5_SAVE_PATH + '/validation_cls%d.h5' % numCount, 'w') as H:
H.create_dataset('X', data=val_X) # note the name X given to the dataset!
H.create_dataset('y', data=val_y) # note the name y given to the dataset!
f_val.write(HDF5_SAVE_PATH + '/validation_cls%d.h5\n' % numCount)
numCount += 1
print 'Done'
quit()
|
{"hexsha": "31c5fd6fb2b0eeb00d9efaf7fa3d3a8cf33c7abf", "size": 4148, "ext": "py", "lang": "Python", "max_stars_repo_path": "train/Fstage/create_h5_cls.py", "max_stars_repo_name": "yungs2017/CNNFaceDetection", "max_stars_repo_head_hexsha": "38aa7d052beb8993bf3b5282d03d68ab8cca4439", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train/Fstage/create_h5_cls.py", "max_issues_repo_name": "yungs2017/CNNFaceDetection", "max_issues_repo_head_hexsha": "38aa7d052beb8993bf3b5282d03d68ab8cca4439", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train/Fstage/create_h5_cls.py", "max_forks_repo_name": "yungs2017/CNNFaceDetection", "max_forks_repo_head_hexsha": "38aa7d052beb8993bf3b5282d03d68ab8cca4439", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.40625, "max_line_length": 95, "alphanum_fraction": 0.6166827387, "include": true, "reason": "import numpy", "num_tokens": 1206}
|
function compute_derivatives(obj::ComplexOrdinaryDifferentialEquation, arg0::jdouble, arg1::Vector{JComplex})
return jcall(obj, "computeDerivatives", Vector{JComplex}, (jdouble, Vector{JComplex}), arg0, arg1)
end
function get_dimension(obj::ComplexOrdinaryDifferentialEquation)
return jcall(obj, "getDimension", jint, ())
end
function init(obj::ComplexOrdinaryDifferentialEquation, arg0::jdouble, arg1::Vector{JComplex}, arg2::jdouble)
return jcall(obj, "init", void, (jdouble, Vector{JComplex}, jdouble), arg0, arg1, arg2)
end
|
{"hexsha": "56a644edd3b97cff15305b767c97193058a71ca4", "size": 543, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/HipparchusWrapper/OdeWrapper/complex_ordinary_differential_equation.jl", "max_stars_repo_name": "JuliaAstrodynamics/Orekit.jl", "max_stars_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-07T12:26:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T16:02:35.000Z", "max_issues_repo_path": "gen/HipparchusWrapper/OdeWrapper/complex_ordinary_differential_equation.jl", "max_issues_repo_name": "JuliaSpace/Orekit.jl", "max_issues_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-05T10:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T05:17:19.000Z", "max_forks_repo_path": "gen/HipparchusWrapper/OdeWrapper/complex_ordinary_differential_equation.jl", "max_forks_repo_name": "JuliaSpace/Orekit.jl", "max_forks_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7692307692, "max_line_length": 109, "alphanum_fraction": 0.7716390424, "num_tokens": 156}
|
# Splitting the data
# December 22nd 2020
'''This script splits the data.
Usage: split_data.py --clean_train_path=<clean_train_path>
Options:
--clean_train_path=<clean_train_path> : Relative file path for the cleaned train csv
'''
import numpy as np
import pandas as pd
from docopt import docopt
from sklearn.dummy import DummyRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# parse/define command line arguments here
opt = docopt(__doc__)
def main(clean_train_path):
df = pd.read_csv(clean_train_path)
X = df.drop(columns = "SalePrice")
y = df[["SalePrice"]]
# split data with a random state to ensure reproducibility
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=10)
X_train.to_csv("data/X_train.csv", index = False)
X_valid.to_csv("data/X_valid.csv", index = False)
y_train.to_csv("data/y_train.csv", index = False)
y_valid.to_csv("data/y_valid.csv", index = False)
# call main function
if __name__ == "__main__":
main(opt["--clean_train_path"])
|
{"hexsha": "204ce36f09338e5045f7f6c94221e81a558c78e0", "size": 1118, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/split_data.py", "max_stars_repo_name": "nphaterp/house_price_prediction", "max_stars_repo_head_hexsha": "376c9a052e5de552e431210ead548d0a5b238377", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/split_data.py", "max_issues_repo_name": "nphaterp/house_price_prediction", "max_issues_repo_head_hexsha": "376c9a052e5de552e431210ead548d0a5b238377", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-12-21T22:16:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-07T02:58:05.000Z", "max_forks_repo_path": "src/split_data.py", "max_forks_repo_name": "nphaterp/house_price_prediction", "max_forks_repo_head_hexsha": "376c9a052e5de552e431210ead548d0a5b238377", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T17:05:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T17:05:04.000Z", "avg_line_length": 23.7872340426, "max_line_length": 95, "alphanum_fraction": 0.7334525939, "include": true, "reason": "import numpy", "num_tokens": 287}
|
From iris.proofmode Require Import proofmode.
From iris.program_logic Require Import weakestpre adequacy lifting.
From stdpp Require Import base.
From cap_machine Require Export logrel.
From cap_machine.ftlr Require Import ftlr_base.
From cap_machine.rules Require Import rules_Jmp.
Section fundamental.
Context {Σ:gFunctors} {memg:memG Σ} {regg:regG Σ} {sealsg: sealStoreG Σ}
{nainv: logrel_na_invs Σ}
`{MachineParameters}.
Notation D := ((leibnizO Word) -n> iPropO Σ).
Notation R := ((leibnizO Reg) -n> iPropO Σ).
Implicit Types w : (leibnizO Word).
Implicit Types interp : (D).
Lemma jmp_case (r : leibnizO Reg) (p : Perm)
(b e a : Addr) (w : Word) (r0 : RegName) (P : D):
ftlr_instr r p b e a w (Jmp r0) P.
Proof.
intros Hp Hsome i Hbae Hi.
iIntros "#IH #Hinv #Hinva #Hreg #Hread Hown Ha HP Hcls HPC Hmap".
rewrite delete_insert_delete.
destruct (reg_eq_dec PC r0).
* subst r0.
iApply (wp_jmp_successPC with "[HPC Ha]"); eauto; first iFrame.
iNext. iIntros "[HPC Ha] /=".
(* reconstruct invariant *)
iMod ("Hcls" with "[Ha HP]") as "_";[iExists w;iFrame|].
iModIntro.
iApply wp_pure_step_later; auto.
(* reconstruct registers *)
iNext.
iDestruct ((big_sepM_delete _ _ PC) with "[HPC Hmap]") as "Hmap /=";
[apply lookup_insert|rewrite delete_insert_delete;iFrame|]. simpl.
(* apply IH *)
iIntros "_".
iApply ("IH" $! _ _ b e a with "[] [] [Hmap] [$Hown]"); eauto.
{ iPureIntro. apply Hsome. }
destruct Hp as [-> | ->]; iFrame.
* specialize Hsome with r0 as Hr0.
destruct Hr0 as [wsrc Hsomesrc].
iDestruct ((big_sepM_delete _ _ r0) with "Hmap") as "[Hsrc Hmap]"; eauto.
rewrite (lookup_delete_ne r PC r0); eauto.
iApply (wp_jmp_success with "[$HPC $Ha $Hsrc]"); eauto.
iNext. iIntros "[HPC [Ha Hsrc]] /=".
iApply wp_pure_step_later; auto.
(* reconstruct regions *)
iDestruct ((big_sepM_delete _ _ r0) with "[Hsrc Hmap]") as "Hmap /=";
[apply lookup_insert|rewrite delete_insert_delete;iFrame|]. simpl.
rewrite -delete_insert_ne // insert_id; auto.
iMod ("Hcls" with "[HP Ha]");[iExists w;iFrame|iModIntro].
(* Needed because IH disallows non-capability values *)
destruct wsrc as [ | [p' b' e' a' | ] | ]; cycle 1.
{
rewrite /updatePcPerm.
(* Special case for E-values*)
destruct (decide (p' = E)) as [-> | HneE].
-
unshelve iDestruct ("Hreg" $! r0 _ _ Hsomesrc) as "HPCv"; auto.
iClear "Hinv".
rewrite fixpoint_interp1_eq; simpl.
iDestruct (big_sepM_insert _ _ PC with "[$Hmap $HPC]") as "Hmap"; [apply lookup_delete|]. rewrite insert_delete_insert; auto.
iDestruct ("HPCv" with "[$Hmap $Hown]") as "Hcont"; auto.
- iAssert (PC ↦ᵣ WCap p' b' e' a')%I with "[HPC]" as "HPC".
{ destruct p'; auto. congruence. }
iDestruct (big_sepM_insert _ _ PC with "[$Hmap $HPC]") as "Hmap"; [apply lookup_delete|]. rewrite insert_delete_insert; auto.
iNext; iIntros "_".
iApply ("IH" $! (<[PC:=WCap p' b' e' a']> r) with "[%] [] [Hmap] [$Hown]").
{ cbn. intros. by repeat (rewrite lookup_insert_is_Some'; right). }
{ iIntros (ri v Hri Hvs).
rewrite lookup_insert_ne in Hvs; auto.
destruct (decide (ri = r0)).
{ subst ri.
rewrite Hsomesrc in Hvs; inversion Hvs; subst; clear Hvs.
unshelve iSpecialize ("Hreg" $! r0 _ _ Hsomesrc); eauto. }
{ repeat (rewrite lookup_insert_ne in Hvs); auto.
iApply "Hreg"; auto. } }
{ rewrite insert_insert. iApply "Hmap". }
iModIntro.
unshelve iSpecialize ("Hreg" $! r0 _ _ Hsomesrc); eauto.
}
(* Non-capability cases *)
all: iNext; iIntros "_".
all: rewrite /updatePcPerm; iApply (wp_bind (fill [SeqCtx]));
iApply (wp_notCorrectPC with "HPC"); [intros HFalse; inversion HFalse| ].
all: repeat iNext; iIntros "HPC /=".
all: iApply wp_pure_step_later; auto.
all: iNext; iIntros "_".
all: iApply wp_value.
all: iIntros; discriminate.
Qed.
End fundamental.
|
{"author": "logsem", "repo": "cerise", "sha": "a578f42e55e6beafdcdde27b533db6eaaef32920", "save_path": "github-repos/coq/logsem-cerise", "path": "github-repos/coq/logsem-cerise/cerise-a578f42e55e6beafdcdde27b533db6eaaef32920/theories/ftlr/Jmp.v"}
|
import gym
import numpy as np
import matplotlib.pyplot as plt
def value_iteration():
V_states = np.zeros(n_states) # init values as zero
theta = 1e-8
gamma = 0.8
# TODO: implement the value iteration algorithm and return the policy
# Hint: env.P[state][action] gives you tuples (p, n_state, r, is_terminal), which tell you the probability p that you end up in the next state n_state and receive reward r
iterations = 0
policy = np.zeros(n_states, dtype=np.int)
while True:
delta = 0.0
iterations += 1
for state in range(n_states):
v = V_states[state]
max_action_val = -9999
for action in range(n_actions):
summation = 0.0
for p, n_state, r, is_terminal in env.P[state][action]:
summation += p * (r + gamma* V_states[n_state])
if summation > max_action_val:
max_action_val = summation
policy[state] = action
V_states[state] = max_action_val
delta = max(delta, abs(v-V_states[state]))
if delta < theta:
break
print("steps to converge", iterations)
print("optimal value function",V_states)
## computing optimal policy
return V_states
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
result = np.where(Q[state,:] == np.amax(Q[state,:]))
#m = max(Q[state,:])
#max_indices = [i for i, j in enumerate(Q[state,:]) if j == m]
action = np.random.choice(result[0])
return action
def nstep_sarsa(env, n=1, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
""" TODO: implement the n-step sarsa algorithm """
Q = np.zeros((env.observation_space.n, env.action_space.n))
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
t = 0
T = np.inf
a = choose_abs_greedy_action(s, Q, epsilon)
actions = [a]
states = [s]
rewards = [0]
while True:
if t < T:
s_, r, done, _ = env.step(a)
states.append(s_)
rewards.append(r)
if done:
T = t + 1
else:
a = choose_abs_greedy_action(s_, Q, epsilon)
actions.append(a)
# tau -which timestamp to update if t=5 than tau=2 nd time stamp to be updated
tau = t - n + 1
if tau >= 0:
G = 0
for i in range(tau + 1, min(tau + n + 1, T + 1)):
G += np.power(gamma, i - tau - 1) * rewards[i]
if tau + n < T:
state_action = (states[tau + n], actions[tau + n])
G += np.power(gamma, n) * Q[state_action[0]][state_action[1]]
state_action = (states[tau], actions[tau])
Q[state_action[0]][state_action[1]] += alpha * (G - Q[state_action[0]][state_action[1]])
if tau == T - 1:
break
t += 1
return Q
pass
env=gym.make('FrozenLake-v0', map_name="8x8")
n_states = env.observation_space.n
n_actions = env.action_space.n
# getting actual state values from dp
actual_state_values = value_iteration()
#print(actual_state_values)
# TODO: run multiple times, evaluate the performance for different n and alpha
#Q = nstep_sarsa(env)
#print("####")
#print(Q)
alpha_range = np.linspace(0, 1, 6)
n_range = np.power(2, range(10))
sq_errors = {}
for n in n_range:
ers = []
for alpha in alpha_range:
print("running estimation for alpha={} and n={}".format(alpha, n))
current_Q = nstep_sarsa(env, n=n, alpha=alpha)
print("*****")
#print(current_Q)
#estimate_state_values = [np.mean(list(v.values())) for v in current_Q.values()]
estimate_state_values = [np.mean(v) for v in current_Q]
ers.append(np.mean([er ** 2 for er in actual_state_values - np.array(estimate_state_values)]))
sq_errors[n] = ers
plt.figure(figsize=[10, 6])
for n in n_range:
plt.plot(alpha_range, sq_errors[n], label="n={}".format(n))
plt.xlabel('learning rate')
plt.ylabel('RMS error')
plt.legend()
plt.show()
|
{"hexsha": "3ee52a30e4f470b92f22d246f47b05171ac778ac", "size": 4143, "ext": "py", "lang": "Python", "max_stars_repo_path": "ex06-nstep/ex06-nstep.py", "max_stars_repo_name": "vijaykumarprabhu/rl-course", "max_stars_repo_head_hexsha": "cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ex06-nstep/ex06-nstep.py", "max_issues_repo_name": "vijaykumarprabhu/rl-course", "max_issues_repo_head_hexsha": "cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex06-nstep/ex06-nstep.py", "max_forks_repo_name": "vijaykumarprabhu/rl-course", "max_forks_repo_head_hexsha": "cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-26T20:11:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-26T20:11:21.000Z", "avg_line_length": 29.176056338, "max_line_length": 175, "alphanum_fraction": 0.6150132754, "include": true, "reason": "import numpy", "num_tokens": 1158}
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import sys
i = sys.argv[1]
uf = sys.argv[2]
size = sys.argv[3]
page = sys.argv[4]
latency_path = "./log/latency_" + str(i) + "_" + str(uf) + "k_" + str(size) + "_" + str(page) + "_0.log"
prepare_path = "./log/" + str(i) + "_" + str(uf) + "k_" + str(size) + "_" + str(page) + "_overhead.log"
latency_file = open(latency_path)
prepare_file = open(prepare_path)
latencys = []
prepares = []
overheads = []
for eachline in latency_file.readlines():
latencys.append(float(eachline))
for el in prepare_file.readlines():
el.replace(' ', '')
prepare, overhead, total = el.split("\t")
prepares.append(float(prepare))
overheads.append(float(overhead))
mean1 = (np.sum(latencys) - np.sum(prepares) / 1000.0) / (len(latencys) - len(prepares))
mean2 = np.mean(prepares)
mean3 = np.mean(overheads)
str2 = "%d\t%d\t%d" % (mean1, mean2, mean3)
print(str2)
|
{"hexsha": "92853417f679d78bfa38c1ee138b4f26f189512c", "size": 999, "ext": "py", "lang": "Python", "max_stars_repo_path": "cmake-build-debug/result.py", "max_stars_repo_name": "bombework/FrequentSnapshot", "max_stars_repo_head_hexsha": "cd1266a2c7dbc44b0ab38637d1704b54175da895", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-28T05:25:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-28T05:25:57.000Z", "max_issues_repo_path": "cmake-build-debug/result.py", "max_issues_repo_name": "bombe-org/FrequentSnapshot", "max_issues_repo_head_hexsha": "cd1266a2c7dbc44b0ab38637d1704b54175da895", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmake-build-debug/result.py", "max_forks_repo_name": "bombe-org/FrequentSnapshot", "max_forks_repo_head_hexsha": "cd1266a2c7dbc44b0ab38637d1704b54175da895", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-12T04:37:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-12T04:37:16.000Z", "avg_line_length": 27.0, "max_line_length": 105, "alphanum_fraction": 0.6156156156, "include": true, "reason": "import numpy", "num_tokens": 303}
|
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
# import seaborn as sns
from tqdm import tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
# nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
import pickle
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
from sklearn.metrics.pairwise import cosine_similarity
from numba import jit, cuda
words=pd.read_csv("./words.csv")
snlitrain_l_list = []
pandas_json_attempt = []
with open('./snli_1.0_test.jsonl') as snlitrain_file_pointer:
for item in snlitrain_file_pointer:
snlitrain_l_list.append(item)
data = []
for item in snlitrain_l_list:
data.append(json.loads(item))
df_snlitrain = pd.DataFrame.from_dict(data)
nlp = spacy.load("en_trf_bertbaseuncased_lg")
@jit
def func():
x=[]
for l in tqdm(range(len(data))):
f=0
token_l=nlp(data[l])
for m in tqdm(range(len(data))):
if(m!=l):
token_m=nlp(data[m])
slm=token_l.similarity(token_m)
diff=SIML-slm
if(diff<=0):
f=f+1
x.append(f)
t = pd.Series(x)
tf1=t.var()
return tf1
@jit
def amaxelements(list1, N):
sa=0
for i in range(0, N):
max1 = 0
for j in range(len(list1)):
if list1[j] > max1:
max1 = list1[j];
list1.remove(max1);
sa=sa+max1
return sa
@jit
def func2():
a=3
sl=0
for l in tqdm(range(len(data))):
x=[]
token_l=nlp(data[l])
for m in tqdm(range(len(data))):
if(m!=l):
token_m=nlp(data[m])
slm=token_l.similarity(token_m)
diff1=SIML-slm
diff2=abs(diff1)
diff=abs(diff1-diff2)
x.append(diff)
sl=sl+amaxelements(x,a)
tf2=sl/5
data=words['fullnopunc']
SIML=0.8
tf1 = func()
tf2 = func2()
print(tf1)
print(tf2)
dqic3=tf1+tf2
print(dqic3)
|
{"hexsha": "e6acef970b439f71bc0061f34b020b67a66415d6", "size": 2323, "ext": "py", "lang": "Python", "max_stars_repo_path": "p3.py", "max_stars_repo_name": "swarooprm/DQI", "max_stars_repo_head_hexsha": "8de54cc60e489af49d063fb6a14235b9abcc2839", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-03T09:49:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-03T09:49:32.000Z", "max_issues_repo_path": "p3.py", "max_issues_repo_name": "swarooprm/DQI", "max_issues_repo_head_hexsha": "8de54cc60e489af49d063fb6a14235b9abcc2839", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "p3.py", "max_forks_repo_name": "swarooprm/DQI", "max_forks_repo_head_hexsha": "8de54cc60e489af49d063fb6a14235b9abcc2839", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5092592593, "max_line_length": 71, "alphanum_fraction": 0.6026689625, "include": true, "reason": "import numpy,from numba", "num_tokens": 627}
|
program t
external a,b,c
print *,'ok'
end program t
|
{"hexsha": "314a3d957ed0bde0b4ae6e7ca2e8cbf96fd6e448", "size": 56, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/t0172r/t.f90", "max_stars_repo_name": "maddenp/ppp", "max_stars_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-13T16:32:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T12:37:58.000Z", "max_issues_repo_path": "tests/t0172r/t.f90", "max_issues_repo_name": "maddenp/ppp", "max_issues_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/t0172r/t.f90", "max_forks_repo_name": "maddenp/ppp", "max_forks_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-30T17:02:27.000Z", "max_forks_repo_forks_event_max_datetime": "2015-08-03T16:29:41.000Z", "avg_line_length": 11.2, "max_line_length": 16, "alphanum_fraction": 0.6607142857, "num_tokens": 19}
|
from random import choice
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_gradient_magnitude
from wordcloud import WordCloud, ImageColorGenerator
COLORMAP = 'ocean'
COLORS = (
'#0F468C',
'#1665CC',
'#072040'
)
BACKGROUND_COLOR = '#ffffff'
HEIGHT = 768
WIDTH = 1536
PREFER_HORIZONTAL = 1
# RANDOM_STATE = 50
REPEAT = True
def generate_mask(image_path):
colors = np.array(Image.open(image_path))
mask = colors.copy()
edges = np.mean(
[gaussian_gradient_magnitude(colors[:, :, i] / 255.0, 2)
for i in range(3)], axis=0)
mask[edges > 0.8] = 255
return colors, mask
def main(word_dic, image_path, font_path, use_image_colors):
colors, mask = generate_mask(image_path)
wordcloud = WordCloud(mask=mask,
font_path=font_path,
regexp=r'\w+( [\w]+)?',
# colormap=COLORMAP,
color_func=lambda *args, **kwargs: choice(COLORS),
background_color=BACKGROUND_COLOR,
height=HEIGHT,
width=WIDTH,
prefer_horizontal=PREFER_HORIZONTAL,
# random_state=RANDOM_STATE,
repeat=REPEAT).generate_from_frequencies(word_dic)
if use_image_colors:
image_colors = ImageColorGenerator(colors)
wordcloud.recolor(color_func=image_colors)
return wordcloud
|
{"hexsha": "701a0d013a4bf65357540cbbd771ece4139df880", "size": 1482, "ext": "py", "lang": "Python", "max_stars_repo_path": "wordcloud_generator/generate.py", "max_stars_repo_name": "liviakuhn/wordcloud-generator", "max_stars_repo_head_hexsha": "b0f28f57361fa7f801b9179afb5c6b2a1cd2d37c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wordcloud_generator/generate.py", "max_issues_repo_name": "liviakuhn/wordcloud-generator", "max_issues_repo_head_hexsha": "b0f28f57361fa7f801b9179afb5c6b2a1cd2d37c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wordcloud_generator/generate.py", "max_forks_repo_name": "liviakuhn/wordcloud-generator", "max_forks_repo_head_hexsha": "b0f28f57361fa7f801b9179afb5c6b2a1cd2d37c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.875, "max_line_length": 76, "alphanum_fraction": 0.6032388664, "include": true, "reason": "import numpy,from scipy", "num_tokens": 331}
|
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE UniqueIdMapTest
#include <boost/test/unit_test.hpp>
#include <iostream>
#include "utils/UniqueIdMap.hpp"
using namespace pcw;
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(UniqueIds) {
UniqueIdMap<std::string> ids;
BOOST_CHECK_EQUAL(ids["first"].first, 1);
BOOST_CHECK_EQUAL(ids["second"].first, 2);
BOOST_CHECK_EQUAL(ids["third"].first, 3);
BOOST_CHECK_EQUAL(ids["first"].first, 1);
BOOST_CHECK_EQUAL(ids["second"].first, 2);
BOOST_CHECK_EQUAL(ids["third"].first, 3);
}
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(NewIds) {
UniqueIdMap<std::string> ids;
BOOST_CHECK_EQUAL(ids["first"].second, true);
BOOST_CHECK_EQUAL(ids["second"].second, true);
BOOST_CHECK_EQUAL(ids["third"].second, true);
BOOST_CHECK_EQUAL(ids["first"].second, false);
BOOST_CHECK_EQUAL(ids["second"].second, false);
BOOST_CHECK_EQUAL(ids["third"].second, false);
}
|
{"hexsha": "feeff9953bc669f1fa398da02e2a0527301fbf57", "size": 1024, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "rest/src/utils/tests/TestUniqueIdMap.cpp", "max_stars_repo_name": "cisocrgroup/pocoweb", "max_stars_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-04-09T20:46:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T17:29:02.000Z", "max_issues_repo_path": "rest/src/utils/tests/TestUniqueIdMap.cpp", "max_issues_repo_name": "cisocrgroup/pocoweb", "max_issues_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 61.0, "max_issues_repo_issues_event_min_datetime": "2018-01-03T09:49:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T12:26:11.000Z", "max_forks_repo_path": "rest/src/utils/tests/TestUniqueIdMap.cpp", "max_forks_repo_name": "cisocrgroup/pocoweb", "max_forks_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-01-10T15:44:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-19T13:39:53.000Z", "avg_line_length": 33.0322580645, "max_line_length": 80, "alphanum_fraction": 0.6376953125, "num_tokens": 218}
|
#!/usr/bin/env python
# Black-Scholes PDE solving using DGM paper
# __author__ = "Abdollah Rida"
# __email__ = "abdollah.rida@berkeley.edu"
# Import needed packages
import numpy as np
import scipy.stats as spstats
from __params__ import *
# Black-Scholes European call price
# Analytical known solution
def lambd_H(t):
'''
lambda_H term for EU Call price under fBS
Args:
----
t: time
'''
global H
return 2*H*t**(2*H - 1)
def dp(S, K, r, sigma, t):
global H
log = np.log(S/K)
num = (r + lambd_H(t)/2 * sigma**2) * (T - t)
denom = sigma * np.sqrt(lambd_H(t) * (T - t))
return (log + num)/denom
def dm(S, K, r, sigma, t):
global H
log = np.log(S/K)
num = (r - lambd_H(t)/2 * sigma**2) * (T - t)
denom = sigma * np.sqrt(lambd_H(t) * (T - t))
return (log + num)/denom
def BlackScholesCall(S, K, r, sigma, t):
'''
Analytical solution for European call option price under
Black-Scholes model
Args:
----
S: spot price
K: strike price
r: risk-free interest rate
sigma: volatility
t: time
'''
global H
# first term
ft = S * spstats.norm.cdf(dp(S, K, r, sigma,t))
# second term
st = K * np.exp(-r * (T-t)) * spstats.norm.cdf(dm(S, K, r, sigma,t))
callPrice = ft - st
return callPrice
|
{"hexsha": "f8f94efcbb50d5b5463569475d72e381672969f8", "size": 1378, "ext": "py", "lang": "Python", "max_stars_repo_path": "f_BS/f_BS/f_Call.py", "max_stars_repo_name": "AbdollahRida/MathFi", "max_stars_repo_head_hexsha": "bf392e76793940c477c73016f44c5192e902c6b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-15T12:54:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-15T12:54:27.000Z", "max_issues_repo_path": "f_BS/f_BS/f_Call.py", "max_issues_repo_name": "AbdollahRida/MathFi", "max_issues_repo_head_hexsha": "bf392e76793940c477c73016f44c5192e902c6b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "f_BS/f_BS/f_Call.py", "max_forks_repo_name": "AbdollahRida/MathFi", "max_forks_repo_head_hexsha": "bf392e76793940c477c73016f44c5192e902c6b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.2647058824, "max_line_length": 72, "alphanum_fraction": 0.564586357, "include": true, "reason": "import numpy,import scipy", "num_tokens": 444}
|
from PIL import Image
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Generate pixel portraits from an image.')
parser.add_argument('file', type=str, help='Input file.')
parser.add_argument('-out', type=str, default='', help='Output file.')
parser.add_argument('-compression', type=int, default=20, help='Intensity of color compression. 10-50 recommended.')
args = parser.parse_args()
COMPRESSION = args.compression
im = Image.open(args.file)
im = im.resize((64, 64))
r, g, b = np.array(im).T
r //= COMPRESSION
r *= COMPRESSION
g //= COMPRESSION
g *= COMPRESSION
b //= COMPRESSION
b *= COMPRESSION
im = Image.fromarray(np.dstack((r.T, g.T, b.T)))
im = im.resize((512, 512))
im.save(args.out)
|
{"hexsha": "4d39902eeaf64a8217231f06d910b55e8b2f62ef", "size": 730, "ext": "py", "lang": "Python", "max_stars_repo_path": "run.py", "max_stars_repo_name": "ErikBoesen/portrify", "max_stars_repo_head_hexsha": "ab14ab6112b915364b50dd3b35c223cb79f50376", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-11-05T22:25:22.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-12T12:30:12.000Z", "max_issues_repo_path": "run.py", "max_issues_repo_name": "ErikBoesen/portrify", "max_issues_repo_head_hexsha": "ab14ab6112b915364b50dd3b35c223cb79f50376", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run.py", "max_forks_repo_name": "ErikBoesen/portrify", "max_forks_repo_head_hexsha": "ab14ab6112b915364b50dd3b35c223cb79f50376", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.037037037, "max_line_length": 116, "alphanum_fraction": 0.7178082192, "include": true, "reason": "import numpy", "num_tokens": 187}
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Class definitions for declaritive (vs imperative) `Tensors` & `Variables`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import name_util
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'DeferredTensor',
'TransformedVariable',
]
JAX_MODE = False
NUMPY_MODE = False
_identity = lambda x: x
def _numpy_text(tensor):
"""Human readable representation of a tensor's numpy value."""
if dtype_util.is_numpy_compatible(tensor.dtype):
value = np.array(tensor)
if value.shape:
text = repr(value)
else:
text = str(value)
else:
text = '<unprintable>'
if '\n' in text:
text = '\n' + text
return text
def _wrap_method(attr):
"""Wraps a method to operate on the concretized value.
Args:
attr: Python `str` representing the `attr` to inject a new notion of `self`.
Returns:
dependency_injected_function: Python `callable`
corresponding to `type(self).attr` but implemented by `new_fn`.
"""
def new_fn_like_old_fn(self, *args, **kwargs):
value = self._value() # pylint: disable=protected-access
old_fn = getattr(type(value), attr)
return old_fn(value, *args, **kwargs)
return new_fn_like_old_fn
def _tensorize(d, dtype=None, name=None, as_ref=False):
"""Tensor conversion function presuming `hasattr(d, '_value')`."""
return d._value(dtype, name, as_ref) # pylint: disable=protected-access
class TensorMetaClass(type):
"""A type of class which will make objects which act like Tensors."""
def __new__(mcs, name, bases, attrs):
operators = set(tf.Tensor.OVERLOADABLE_OPERATORS)
operators.difference_update({'__eq__', '__ne__'})
operators.update({'__iter__'})
attrs.update((attr, _wrap_method(attr)) for attr in operators)
# Support methods for __iter__ and __bool__
private_methods = {
name for name in dir(tf.Tensor) if name.startswith('_disallow')
}
attrs.update(
(attr, _wrap_method(attr))
for attr in private_methods)
if JAX_MODE or NUMPY_MODE:
other_attrs = {'__array_priority__'}
if six.PY2:
other_attrs.add('__nonzero__')
else:
other_attrs.add('__bool__')
attrs.update((attr, getattr(np.ndarray, attr)) for attr in other_attrs)
else:
attrs.update(
(attr, getattr(tf.Tensor, attr))
for attr in {'__bool__', '__array_priority__', '__nonzero__'})
cls = super(TensorMetaClass, mcs).__new__(mcs, name, bases, attrs)
tf.register_tensor_conversion_function(cls, conversion_func=_tensorize)
return cls
NONE_SPECIFIED = 'None'
class DeferredTensor(six.with_metaclass(TensorMetaClass, tf.Module)):
"""Variable tracking object which applies function upon `convert_to_tensor`.
#### Example
```python
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
# Note: it'd be better to use `tfp.util.TransformedVariable`;
# this example is for illustration only.
trainable_normal = tfd.Normal(
loc=tf.Variable(0.),
scale=tfp.util.DeferredTensor(tf.Variable(0.), tf.math.exp))
trainable_normal.loc
# ==> <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
trainable_normal.scale
# ==> <DeferredTensor: dtype=float32, shape=[], fn=exp>
# Operators work with `DeferredTensor`.
trainable_normal.scale + 1.
# ==> 2.
with tf.GradientTape() as tape:
negloglik = -trainable_normal.log_prob(0.5)
g = tape.gradient(negloglik, trainable_normal.trainable_variables)
# ==> (-0.5, 0.75)
```
Which we could then fit as:
```python
opt = tf.optimizers.Adam(learning_rate=0.05)
loss = tf.function(lambda: -trainable_normal.log_prob(0.5), autograph=True)
for _ in range(int(1e3)):
opt.minimize(loss, trainable_normal.trainable_variables)
trainable_normal.mean()
# ==> 0.5
trainable_normal.stddev()
# ==> (approximately) 0.0075
```
It is also possible to parameterize a `DeferredTensor` with a bijector, e.g.:
```python
# Note: it'd be better to use `tfp.util.TransformedVariable`;
# this example is for illustration only.
d = tfd.Normal(loc=0.,
scale=tfp.util.DeferredTensor(tf.Variable([0.54, 1.85]),
tfb.Softplus()))
d.stddev()
# ==> [1., 2.]
tf.convert_to_tensor(d.scale)
# ==> [1., 2.]
```
"""
def __init__(self, pretransformed_input, transform_fn, dtype=None,
shape=NONE_SPECIFIED, also_track=None, name=None):
"""Creates the `DeferredTensor` object.
Args:
pretransformed_input: object with `shape`, `dtype` properties (typically a
`tf.Variable`) passed into `transform_fn` when this object is acted upon
in a `Tensor` context, eg, `tf.convert_to_tensor`, `+`, `tf.math.exp`,
etc.
transform_fn: Python `callable` or `tfp.bijectors.Bijector`-like instance.
When `callable`, should take `pretransformed_input` and
return a `Tensor` (representing by this object).
dtype: Equivalent to what would otherwise be
`transform_fn(pretransformed_input).dtype`.
Default value: `None` (i.e.,
`getattr(transform_fn, 'dtype', None) or pretransformed_input.dtype`).
shape: Equivalent to what would otherwise be
`transform_fn(pretransformed_input).shape`.
Default value: `'None'` (i.e.,
`getattr(transform_fn, 'forward_event_shape', lambda x: x)(
pretransformed_input.shape)`).
also_track: Optional instance or structure of instances of `tf.Variable`
and/or `tf.Module`, containing any additional trainable variables that
the `transform_fn` may access beyond the given
`pretransformed_input`. This ensures that such variables
will be correctly tracked in `self.trainable_variables`.
Default value: `None`.
name: Python `str` representing this object's `name`; used only in graph
mode.
Default value: `None` (i.e.,
`(getattr(transform_fn, 'name', None) or
transform_fn.__name__ + '_' + pretransformed_input.name)`).
Raises:
TypeError: if `transform_fn` is not `callable`.
TypeError: if `pretransformed_input` lacks `dtype` and/or `shape`
properties (and `dtype` and/or `shape` arguments are unspecified).
"""
pretransformed_input = tensor_util.convert_nonref_to_tensor(
pretransformed_input,
name='pretransformed_input')
if dtype is None:
dtype = (getattr(transform_fn, 'dtype', None) or
dtype_util.base_dtype(pretransformed_input.dtype))
try:
dtype = None if dtype is None else tf.as_dtype(dtype)
except TypeError:
raise TypeError('Argument `dtype` must be convertible to a '
'`tf.dtypes.DType`; saw "{}" of type "{}".'.format(
repr(dtype), type(dtype)))
if shape == NONE_SPECIFIED:
shape = getattr(transform_fn, 'forward_event_shape', _identity)
shape = shape(pretransformed_input.shape)
try:
shape = tf.TensorShape(shape)
except TypeError:
raise TypeError('Argument `shape` must be convertible to a '
'`tf.TensorShape`; saw "{}".'.format(shape))
name = name or getattr(transform_fn, 'name', None)
if not name:
name = '_'.join([
transform_fn.__name__,
getattr(pretransformed_input, 'name', '')])
name = name_util.strip_invalid_chars(name)
name = name_util.camel_to_lower_snake(name)
name = name_util.get_name_scope_name(name)
name = name_util.strip_invalid_chars(name)
if hasattr(transform_fn, 'forward'):
fwd_name = '"{}"'.format(transform_fn.name)
else:
fwd_name = transform_fn.__name__
if not callable(transform_fn):
raise TypeError('Argument `transform_fn` must be `callable`.')
super(DeferredTensor, self).__init__(name=name)
self._pretransformed_input = pretransformed_input
self._transform_fn = transform_fn
self._dtype = dtype
self._shape = shape
self._also_track = also_track
self._name = name
self._fwd_name = fwd_name
# Secret handshake with tf.is_tensor to return True for DT.
#
# Works around an exception in LinearOperator (which in 2.0.0 checks only
# `tf.is_tensor`, not also `linear_operator_util.is_ref`:
# ValueError: Graph parent item 0 is not a Tensor;
# <DeferredTensor: dtype=float32, shape=[2], fn=exp>.
# TODO(b/140157055): Remove this shim after LinOp is patched in 2.0.
self.is_tensor_like = True
@property
def transform_fn(self):
"""Function which characterizes the `Tensor`ization of this object."""
if hasattr(self._transform_fn, 'forward'):
return self._transform_fn.forward
return self._transform_fn
@property
def pretransformed_input(self):
"""Input to `transform_fn`."""
return self._pretransformed_input
@property
def dtype(self):
"""Represents the type of the elements in a `Tensor`."""
return self._dtype
@property
def shape(self):
"""Represents the shape of a `Tensor`."""
return self._shape
# TODO(b/140157055): Remove this shim.
def get_shape(self):
"""Legacy means of getting Tensor shape, for compat with 2.0.0 LinOp."""
return self._shape
@property
def also_track(self):
"""Additional variables tracked by tf.Module in self.trainable_variables."""
return self._also_track
@property
def name(self):
"""The string name of this object."""
return self._name
def numpy(self):
"""Returns (copy of) deferred values as a NumPy array or scalar."""
value = self._value()
if not tf.executing_eagerly():
raise NotImplementedError(
'DeferredTensor.numpy() is only supported in eager execution mode.')
return np.array(value)
def set_shape(self, shape):
"""Updates the shape of this pretransformed_input.
This method can be called multiple times, and will merge the given `shape`
with the current shape of this object. It can be used to provide additional
information about the shape of this object that cannot be inferred from the
graph alone.
Args:
shape: A `TensorShape` representing the shape of this
`pretransformed_input`, a `TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of this
`pretransformed_input`.
"""
self._shape = self._shape.merge_with(shape)
def __repr__(self):
if tf.executing_eagerly():
try:
value = self._value()
except Exception as e: # pylint: disable=broad-except
value = e
value_str = ', numpy={}'.format(value if isinstance(value, Exception)
else _numpy_text(value))
else:
value_str = ''
return '<{}: dtype={}, shape={}, fn={}{}>'.format(
type(self).__name__,
dtype_util.name(self.dtype) if self.dtype else '?',
str(
tensorshape_util.as_list(self.shape)
if tensorshape_util.rank(self.shape) is not None else '?').replace(
'None', '?'), self._fwd_name, value_str)
def __getitem__(self, i):
return self._value()[i]
def _value(self, dtype=None, name=None, as_ref=False):
y = self.transform_fn(self.pretransformed_input) # pylint: disable=not-callable
if dtype_util.base_dtype(y.dtype) != self.dtype:
raise TypeError(
'Actual dtype ({}) does not match deferred dtype ({}).'.format(
dtype_util.name(dtype_util.base_dtype(y.dtype)),
dtype_util.name(self.dtype)))
if not tensorshape_util.is_compatible_with(y.shape, self.shape):
raise TypeError(
'Actual shape ({}) is incompatible with deferred shape ({}).'.format(
y.shape, self.shape))
return tf.convert_to_tensor(y, dtype=dtype, name=name)
def __array__(self, dtype=None):
if not tf.executing_eagerly():
raise NotImplementedError(
'Cannot convert a symbolic (graph mode) `DeferredTensor` to a '
'numpy array.')
return np.array(self._value(dtype=dtype))
class TransformedVariable(DeferredTensor):
"""Variable tracking object which applies a bijector upon `convert_to_tensor`.
#### Example
```python
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
trainable_normal = tfd.Normal(
loc=tf.Variable(0.),
scale=tfp.util.TransformedVariable(1., bijector=tfb.Exp()))
trainable_normal.loc
# ==> <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
trainable_normal.scale
# ==> <TransformedVariable: dtype=float32, shape=[], fn=exp>
tf.convert_to_tensor(trainable_normal.scale)
# ==> 1.
# Operators work with `TransformedVariable`.
trainable_normal.scale + 1.
# ==> 2.
with tf.GradientTape() as tape:
negloglik = -trainable_normal.log_prob(0.5)
g = tape.gradient(negloglik, trainable_normal.trainable_variables)
# ==> (-0.5, 0.75)
```
Which we could then fit as:
```python
opt = tf.optimizers.Adam(learning_rate=0.05)
loss = tf.function(lambda: -trainable_normal.log_prob(0.5))
for _ in range(int(1e3)):
opt.minimize(loss, trainable_normal.trainable_variables)
trainable_normal.mean()
# ==> 0.5
trainable_normal.stddev()
# ==> (approximately) 0.0075
```
It is also possible to assign values to a TransformedVariable, e.g.,
```python
d = tfd.Normal(
loc=tf.Variable(0.),
scale=tfp.util.TransformedVariable([1., 2.], bijector=tfb.Softplus()))
d.stddev()
# ==> [1., 2.]
with tf.control_dependencies([x.scale.assign_add([0.5, 1.])]):
d.stddev()
# ==> [1.5, 3.]
```
"""
def __init__(self, initial_value, bijector, dtype=None, name=None, **kwargs):
"""Creates the `TransformedVariable` object.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. Can also be a callable with
no argument that returns the initial value when called. Note: if
`initial_value` is a `TransformedVariable` then the instantiated object
does not create a new `tf.Variable`, but rather points to the underlying
`Variable` and chains the `bijector` arg with the underlying bijector as
`tfb.Chain([bijector, initial_value.bijector])`.
bijector: A `Bijector`-like instance which defines the transformations
applied to the underlying `tf.Variable`.
dtype: `tf.dtype.DType` instance or otherwise valid `dtype` value to
`tf.convert_to_tensor(..., dtype)`.
Default value: `None` (i.e., `bijector.dtype`).
name: Python `str` representing the underlying `tf.Variable`'s name.
Default value: `None`.
**kwargs: Keyword arguments forward to `tf.Variable`.
"""
# Check if `bijector` is "`Bijector`-like".
for attr in {'forward', 'forward_event_shape',
'inverse', 'inverse_event_shape',
'name', 'dtype'}:
if not hasattr(bijector, attr):
raise TypeError('Argument `bijector` missing required `Bijector` '
'attribute "{}".'.format(attr))
if callable(initial_value):
initial_value = initial_value()
initial_value = tf.convert_to_tensor(
initial_value, dtype_hint=bijector.dtype, dtype=dtype)
super(TransformedVariable, self).__init__(
pretransformed_input=tf.Variable(
initial_value=bijector.inverse(initial_value),
name=name,
dtype=dtype,
**kwargs),
transform_fn=bijector,
shape=initial_value.shape,
name=bijector.name)
self._bijector = bijector
@property
def bijector(self):
return self._bijector
@property
def initializer(self):
"""The initializer operation for the underlying variable."""
return self.pretransformed_input.initializer
@functools.wraps(tf.Variable.assign)
def assign(self, value, use_locking=False, name=None, read_value=True):
return self.pretransformed_input.assign(
self.bijector.inverse(value),
use_locking=use_locking,
name=name,
read_value=read_value)
@functools.wraps(tf.Variable.assign_add)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
value = tf.convert_to_tensor(value, self.dtype)
new_value = self.transform_fn(self.pretransformed_input) + value # pylint: disable=not-callable
return self.pretransformed_input.assign(
self.bijector.inverse(new_value),
use_locking=use_locking,
name=name,
read_value=read_value)
@functools.wraps(tf.Variable.assign_sub)
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
value = tf.convert_to_tensor(value, self.dtype)
new_value = self.transform_fn(self.pretransformed_input) - value # pylint: disable=not-callable
return self.pretransformed_input.assign(
self.bijector.inverse(new_value),
use_locking=use_locking,
name=name,
read_value=read_value)
|
{"hexsha": "f8f026f826335371632ca881609701f49c924fc5", "size": 18235, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/util/deferred_tensor.py", "max_stars_repo_name": "chrism0dwk/probability", "max_stars_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-21T06:30:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-08T19:29:15.000Z", "max_issues_repo_path": "tensorflow_probability/python/util/deferred_tensor.py", "max_issues_repo_name": "chrism0dwk/probability", "max_issues_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_probability/python/util/deferred_tensor.py", "max_forks_repo_name": "chrism0dwk/probability", "max_forks_repo_head_hexsha": "ab260f15cae94c6802c2f2769fb448ad213b79cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-31T13:08:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-31T13:08:33.000Z", "avg_line_length": 35.0, "max_line_length": 100, "alphanum_fraction": 0.6717301892, "include": true, "reason": "import numpy", "num_tokens": 4429}
|
#! /usr/bin/env python
# standard library imports
import argparse
import textwrap
import sys # NOQA importing sys so I can mock sys.argv in tests
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import pandas as pd
import numpy as np
# want different default mu values for normal and poisson distributions
def fill_default_mu(args):
if args.type[0] == 'normal':
args.mu = [0.] if args.mu is None else args.mu
elif args.type[0] == 'poisson':
args.mu = [1.] if args.mu is None else args.mu
return args
def get_samples(args):
"""
Return samples from selected distribution
"""
# dictionary to hold numpy arguments for different distributions
distribution_for = {
'uniform': {
'function': np.random.uniform,
'kwargs': {
'low': args.min[0],
'high': args.max[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'normal': {
'function': np.random.normal,
'kwargs': {
'loc': args.mu[0] if args.mu else None,
'scale': args.sigma[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'poisson': {
'function': np.random.poisson,
'kwargs': {
'lam': args.mu[0] if args.mu else None,
'size': (args.num_samples[0], args.columns[0]),
},
},
'beta': {
'function': np.random.beta,
'kwargs': {
'a': args.alpha[0],
'b': args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'gamma': {
'function': np.random.gamma,
'kwargs': {
'shape': args.alpha[0],
'scale': 1. / args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'binomial': {
'function': np.random.binomial,
'kwargs': {
'n': args.N[0],
'p': args.p[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
}
# grab the function for generating proper distribution
dist = distribution_for[args.type[0]]
# call the random generating function with the proper kwargs
values = dist['function'](**dist['kwargs'])
# set column names of output dataframe
columns = ['c{}'.format(c) for c in range(args.columns[0])]
# framify and return results
return pd.DataFrame(values, columns=columns)
def main():
msg = textwrap.dedent(
"""
Return random samples from common probability distrubtions.
-----------------------------------------------------------------------
Examples:
uniform: p.rand -n 1000 -t uniform --min=0 --max=1 | p.hist
normal: p.rand -n 1000 -t normal --mu=0 --sigma=1 | p.hist
poisson: p.rand -n 1000 -t poisson --mu=1 | p.hist
beta: p.rand -n 1000 -t beta --alpha=2 --beta=6 | p.hist
gamma: p.rand -n 1000 -t gamma --alpha=1 --beta=1 | p.hist
binomial: p.rand -n 1000 -t binomial --N=10 --p=0.4 | p.hist
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
parser.add_argument(
'-t', '--type', nargs=1, type=str, default=['uniform'],
choices=['uniform', 'normal', 'beta', 'gamma', 'binomial', 'poisson'],
help='type of distribution (default=\'uniform\')')
parser.add_argument(
'-n', '--num_samples', nargs=1, default=[10], type=int,
help='The number of rows to generate (default=10)')
parser.add_argument(
'-c', '--columns', nargs=1, default=[1], type=int,
help='The number of columns to generate per row (default=1)')
parser.add_argument(
'--N', nargs=1, default=[10], type=int,
help=(
'(Binomial Dist) Largest possible value for random variable. '
'(default=10)'
)
)
parser.add_argument(
'--p', nargs=1, default=[.5], type=float,
help=(
'(Binomial Dist) Bernoulli probability for each trial'
'(default=.5)'
)
)
parser.add_argument(
'--mu', nargs=1, type=float,
help='(Normal, Poisson) Mean (defaults: normal:0, poisson:1')
parser.add_argument(
'--sigma', nargs=1, default=[1.], type=float,
help='(Normal) standard deviation, (default: 1)')
parser.add_argument(
'--min', nargs=1, default=[0.], type=float,
help='(Uniform) Minimum value of range, (default: 0)')
parser.add_argument(
'--max', nargs=1, default=[1.], type=float,
help='(Uniform) Maximum value of range, (default: 1)')
parser.add_argument(
'--alpha', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
parser.add_argument(
'--beta', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
arg_lib.add_args(parser, 'io_out')
# parse arguments
args = parser.parse_args()
# set some defaults
args = fill_default_mu(args)
# get the samples
df = get_samples(args)
# write dataframe to output
io_lib.df_to_output(args, df)
if __name__ == '__main__': # pragma: no cover
main()
|
{"hexsha": "3d7fcc62ce3475a6c94465402a6be948ccd05ed0", "size": 5729, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandashells/bin/p_rand.py", "max_stars_repo_name": "timgates42/pandashells", "max_stars_repo_head_hexsha": "4b565435a25ac713eeeacf28c3e5b52fe94530d8", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 878, "max_stars_repo_stars_event_min_datetime": "2015-08-02T02:07:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T19:06:47.000Z", "max_issues_repo_path": "pandashells/bin/p_rand.py", "max_issues_repo_name": "timgates42/pandashells", "max_issues_repo_head_hexsha": "4b565435a25ac713eeeacf28c3e5b52fe94530d8", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2015-05-12T15:56:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-13T20:58:29.000Z", "max_forks_repo_path": "pandashells/bin/p_rand.py", "max_forks_repo_name": "timgates42/pandashells", "max_forks_repo_head_hexsha": "4b565435a25ac713eeeacf28c3e5b52fe94530d8", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-08-02T22:48:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T20:54:58.000Z", "avg_line_length": 32.3672316384, "max_line_length": 79, "alphanum_fraction": 0.5264444057, "include": true, "reason": "import numpy", "num_tokens": 1406}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% [velo2, tmax]= SINCHRONIZE(qini, qfinal, velocity) Finds a mean speed and the required
% time to perform a movement between the joint coordinates qini and qfinal.
% If the speed of each joint is different, the maximum time to perform the movement
% by the slower joint is taken as a basis.
%
% Inputs:
% Qini: initial position in joint coordinates.
% Qfinal: final position in joint coordinates.
% Velocity: stores the maximum velocity of each joint.
% Outputs:
% velo2: new maximum speed for each joint.
% tmax: time needed to perform the movement.
%
% See also: MOVEJ, COMPUTE_JOINT_TRAJECTORY_INDEP
%
% Author: Arturo Gil
% Date: 29/03/2012
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Copyright (C) 2012, by Arturo Gil Aparicio
%
% This file is part of ARTE (A Robotics Toolbox for Education).
%
% ARTE is free software: you can redistribute it and/or modify
% it under the terms of the GNU Lesser General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% ARTE is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Lesser General Public License for more details.
%
% You should have received a copy of the GNU Leser General Public License
% along with ARTE. If not, see <http://www.gnu.org/licenses/>.
function [actual_speed, maxtime]=synchronize(qini, qfinal, speed, accel)
tacel = speed./accel;
tcte = (abs(qfinal(:)-qini(:))-accel(:).*tacel.^2)./speed(:);
time_total = tcte + 2*tacel;
maxtime=max(time_total);
actual_speed = (qfinal(:)-qini(:)-accel(:).*tacel.^2)/maxtime(:);
|
{"author": "4rtur1t0", "repo": "ARTE", "sha": "6e836f3156bb36af63b70bd93375c8ff4ee643c4", "save_path": "github-repos/MATLAB/4rtur1t0-ARTE", "path": "github-repos/MATLAB/4rtur1t0-ARTE/ARTE-6e836f3156bb36af63b70bd93375c8ff4ee643c4/RAPID/functions/synchronize.m"}
|
"""
Standalone script to load all bad odometers in an astropy table.
This can also be used for any other google sheet by changing the id and
the tab.
"""
import requests
from astropy.table import Table
URL_BASE = ('https://docs.google.com/spreadsheets/d/'
'{}/gviz/tq?tqx=out:csv&sheet={}')
SHEET_ID = '1gvMp1nHmEcKCUpxsTxkx-5m115mLuQIGHhxJCyVoZCM'
WORKSHEET = 0
# fetch data
url = URL_BASE.format(SHEET_ID, WORKSHEET)
data = requests.get(url)
tbl = Table.read(data.text, format='ascii')
# Convert boolean
tbl['PP'] = tbl['PP'] == 'TRUE'
tbl['RV'] = tbl['RV'] == 'TRUE'
|
{"hexsha": "8ca958cea2b5e6a969777fc6386e405ab9ffa357", "size": 586, "ext": "py", "lang": "Python", "max_stars_repo_path": "spirou/sandbox/bad_odo_list/load_bad_odo.py", "max_stars_repo_name": "njcuk9999/apero-utils", "max_stars_repo_head_hexsha": "f77de4c9123874e5bb6ed6bd03a7de3b27057402", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-08T17:03:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T17:49:44.000Z", "max_issues_repo_path": "spirou/sandbox/bad_odo_list/load_bad_odo.py", "max_issues_repo_name": "njcuk9999/apero-utils", "max_issues_repo_head_hexsha": "f77de4c9123874e5bb6ed6bd03a7de3b27057402", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-09-24T17:35:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-11T16:10:13.000Z", "max_forks_repo_path": "spirou/sandbox/bad_odo_list/load_bad_odo.py", "max_forks_repo_name": "njcuk9999/apero-utils", "max_forks_repo_head_hexsha": "f77de4c9123874e5bb6ed6bd03a7de3b27057402", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-10T06:41:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-16T21:09:14.000Z", "avg_line_length": 25.4782608696, "max_line_length": 71, "alphanum_fraction": 0.7013651877, "include": true, "reason": "from astropy", "num_tokens": 182}
|
"""
VariantMap plot
Author: CY THAM
Version: 1.0.0
"""
import math
import numpy as np
import pandas as pd
import plotly.graph_objects as go
def VariantMap(
dataframe,
entries_per_batch=2500,
batch_no=1,
annotation=None,
filter_sample=None,
filter_file=None,
sample_order=None,
title="",
sample_names=None,
color_list=None,
colorbar_thick=25,
rangeslider=True,
height=500,
width=600,
):
"""Returns a Dash Bio VariantMap figure.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe generated by VariantBreak.
Please pre-process your VCF files with VariantBreak and load the output object here.
- entries_per_batch (number; default 2500): Number of SV entries to display
in a batch.
- batch_no (number; default 1): Batch number to display in the plot.
SVs are grouped by batches and the batches are labeled numerically and
chronologically with descending SV prevalence. Only a single batch is
allowed to be displayed in an instance, unless a slider is used in an app
to switch between each batch. Number of total batches = total number of
SV entries / entries_per_batch, rounded up.
- annotation (dict; optional): A dictionary where the keys are annotation
labels and the values are list of respective annotations. Only SVs with
the selected annotations will be displayed in the plot. The keys are:
'Gene_id', 'Transcript_id', 'Gene_name', 'Gene_type' and 'Gene_feature'
for GTF/GFF. For BED annotation files, the key will be their 4th column
label if present, or else they will be 'BED1', 'BED2' and so on. Please
refer to the legend.txt file.
- filter_sample (list; optional): The list of default sample names
(e.g. 'S1', 'S2') to be removed from the plot together with the SVs they
possessed. For example, a non-diseased sample can be selected by this
argument to omit non-diseased associated SVs in the remaining diseased sample.
- filter_file (list; optional): The list of default filter names
(e.g. 'Filter1', 'Filter2') for filter activation. SVs that overlapped with
the respective filter BED files will be excluded from the plot.
- sample_order (list, optional): The list of default sample names
(e.g. 'S1', 'S2') with the order intended for plotting. Samples can also be
omitted from the plot using this argument.
- title (string; optional): Title of plot.
- sample_names (dict; optional): If provided, sample labels will follow this
dict rather than the default labels (e.g. 'S1', 'S2') extracted from the
VariantBreak object. The keys should be: 'S1', 'S2', 'S3' and so on,
depending on how many samples you have.
- color_list (dict; optional): The list of colors to use for different SV classes.
The keys are: 'DEL' (deletion), 'INV' (inversion), 'INS' (insertion),
'BND' (translocation or transposition), 'DUP' (tandem duplication), 'UKN' (unknown),
'NIL' (SV not detected).
- colorbar_thick (number; optional): The thickness of the colorbar, in px.
- rangeslider (bool; default True): Whether or not to show the range slider.
- height (number; default 500): The height of the graph, in px.
- width (number; default 700): The width of the graph, in px.
Usage example:
import pandas as pd
import dash_bio
# Load dataframe and metadata
file_path = "/path/to/sample.h5"
with pd.HDFStore(file_path, mode="r") as store:
df = store['dataset']
metadata = store.get_storer('dataset').attrs.metadata
# Add metadata to dataframe
df.metadata = ''
df.metadata = metadata
# Plot VariantMap
fig = dash_bio.VariantMap(df)
"""
# Get labels of samples to display
if sample_order is None:
# All samples to be displayed and default order
samples = dataframe.metadata["sample_names"]
else:
samples = sample_order
sv_classes = ["NIL", "DEL", "INV", "INS", "BND", "DUP", "UKN"]
color_dict = {
"DEL": "#4daf4a",
"INV": "#377eb8",
"INS": "#e41a1c",
"BND": "#984ea3",
"DUP": "#ff7f00",
"UKN": "#000000",
"NIL": "#d1d9e0",
}
colors = []
# Generate color list for colorbar
if color_list is None:
for _class in sv_classes:
colors.append(color_dict[_class])
else:
for _class in sv_classes:
try:
colors.append(color_list[_class])
except KeyError:
colors.append(color_dict[_class])
vm = _VariantMap(
dataframe,
entries_per_batch,
batch_no,
annotation,
filter_sample,
filter_file,
title,
samples,
sample_names,
colors,
colorbar_thick,
rangeslider,
height,
width,
)
return vm.figure()
class _VariantMap:
"""Returns a Dash Bio VariantMap object.
Methods:
- figure: Returns a VariantMap plotly graph object.
"""
def __init__(
self,
df,
entries_per_batch,
batch_no_for_display,
annotation,
filter_sample,
filter_file,
title,
samples,
sample_names,
colors,
colorbar_thick,
rangeslider,
height,
width,
):
self.title = title
self.colorbar_thick = colorbar_thick
self.rangeslider = rangeslider
self.height = height
self.width = width
# Generating discrete colorscale
markers = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
self.dcolorsc = discrete_colorscale(markers, colors)
self.tickvals = [0.071, 0.214, 0.357, 0.500, 0.643, 0.786, 0.929]
self.ticktext = ["NIL", "DEL", "INV", "INS", "BND", "DUP", "UKN"]
# Subset dataframe by gene name and SV index
if annotation:
if "Gene_name" in annotation and "index_list" in annotation:
if annotation["Gene_name"] and annotation["index_list"]:
df_genes = df[
df["Gene_name"].str.contains(
"|".join([x + ";" for x in annotation["Gene_name"]])
)
].copy()
df_indexes = df.loc[annotation["index_list"], :].copy()
df = pd.concat([df_genes, df_indexes])
else:
if annotation["Gene_name"]:
df = df[
df["Gene_name"].str.contains(
"|".join([x + ";" for x in annotation["Gene_name"]])
)
]
if annotation["index_list"]:
df = df.loc[annotation["index_list"], :]
else:
if "Gene_name" in annotation:
if annotation["Gene_name"]:
df = df[
df["Gene_name"].str.contains(
"|".join([x + ";" for x in annotation["Gene_name"]])
)
]
if "index_list" in annotation:
if annotation["index_list"]:
df = df.loc[annotation["index_list"], :]
# Subset dataframe by annotation
if annotation:
for _key in annotation:
if annotation[_key]:
if _key in ["Gene_name", "index_list"]:
pass
else:
df = df[df[_key].str.contains("|".join(annotation[_key]))]
# Subset dataframe by sample filter
if filter_sample:
for sample in filter_sample:
df = df[df[sample] == 0.0]
# Subtset dataframe by filter file
if filter_file:
for _filter in filter_file:
df = df[df[_filter] != "1"]
# Make a copy of dataframe
df_new = df.copy()
# Get actual sample order list
sample_order = [x for x in samples if x in df_new.columns]
# Calculate number of divisions
div = math.ceil(len(df_new) / entries_per_batch) + 0.001
# Calculate actual batch size
self.batch_size = math.ceil(len(df_new) / div)
# Add batch number to dataframe
df_new.loc[:, "Group"] = (
np.divmod(np.arange(len(df_new)), self.batch_size)[0] + 1
)
# Subset dataframe by batch label
df_new = df_new[df_new["Group"].isin([int(batch_no_for_display)])]
# Transpose dataframe
df_new = df_new.T
# Subset sample rows from dataframe and convert to list of lists
z = df_new.loc[sample_order, :].values.tolist()
# Reverse list
self.z = z[::-1]
# Subset hover-text row from dataframe and convert to list of lists
hover_list = ["Hover_" + x for x in sample_order]
hover_text = df_new.loc[hover_list, :].values.tolist()
# Reverse list
self.hover = hover_text[::-1]
# Change sample labels if provided
if sample_names is None:
names = sample_order
else:
names = []
for name in sample_order:
try:
names.append(sample_names[name])
except KeyError:
names.append(name)
# Reverse sample name list
names.reverse()
self.names = names
def figure(self):
"""
:return: a go.Figure object
"""
trace1 = go.Heatmap(
z=self.z,
y=self.names,
colorscale=self.dcolorsc,
colorbar=dict(
title=dict(
text="SV classes",
font=dict(family="Open Sans", size=14, color="#ffffff"),
),
thickness=self.colorbar_thick,
tickvals=self.tickvals,
ticktext=self.ticktext,
tickfont=dict(family="Open Sans", size=14, color="#ffffff"),
),
zmin=0.0,
zmax=1.0,
hovertext=self.hover,
hoverinfo="text",
xgap=2,
ygap=2,
)
layout = go.Layout(
title=dict(
text="<b>" + self.title + "<b>",
font=dict(family="Open Sans", size=18, color="#ffffff"),
x=0.48,
),
xaxis=dict(
title=dict(
text="Variants",
font=dict(family="Open Sans", size=16, color="#ffffff"),
standoff=3,
),
rangeslider=dict(visible=self.rangeslider),
showticklabels=False,
side="top",
type="-",
),
yaxis=dict(
title=dict(
text="Samples",
font=dict(family="Open Sans", size=16, color="#ffffff"),
standoff=3,
),
tickfont=dict(family="Open Sans", size=14, color="#ffffff"),
),
height=self.height,
width=self.width,
paper_bgcolor="rgba(10,43,77,255)",
plot_bgcolor="rgba(255,255,255,255)",
)
return go.Figure(data=[trace1], layout=layout)
def discrete_colorscale(markers, colors):
"""
:param markers:
:param colors:
:return: color scale
"""
markers = sorted(markers)
norm_mark = [
round((v - markers[0]) / (markers[-1] - markers[0]), 3) for v in markers
]
dcolorscale = []
for k in enumerate(colors):
dcolorscale.extend(
[[norm_mark[k[0]], colors[k[0]]], [norm_mark[k[0] + 1], colors[k[0]]]]
)
return dcolorscale
|
{"hexsha": "0aad4105f98e73380d24b53fa95cdd8e98a6b87e", "size": 11917, "ext": "py", "lang": "Python", "max_stars_repo_path": "dash_bio/component_factory/_variant.py", "max_stars_repo_name": "cytham/dash-bio", "max_stars_repo_head_hexsha": "331c8b3c80b5243c3cfa5261583a3595072edf15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dash_bio/component_factory/_variant.py", "max_issues_repo_name": "cytham/dash-bio", "max_issues_repo_head_hexsha": "331c8b3c80b5243c3cfa5261583a3595072edf15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dash_bio/component_factory/_variant.py", "max_forks_repo_name": "cytham/dash-bio", "max_forks_repo_head_hexsha": "331c8b3c80b5243c3cfa5261583a3595072edf15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5601092896, "max_line_length": 88, "alphanum_fraction": 0.5503901989, "include": true, "reason": "import numpy", "num_tokens": 2727}
|
import numpy
import networkx as nx
def map_step(p1, p2):
u = numpy.unique(p1)
splt = [p2[p1 == _u] for _u in u]
counter = [numpy.unique(_s, return_counts=True)
for _s in splt]
counter = [_x[0][numpy.argsort(_x[1])].tolist()
for _x in counter]
idx = numpy.max(u) + 1
mp = {}
for _c, _u in zip(counter, u):
n = _c.pop()
if n not in mp:
mp[n] = _u
while(True):
if numpy.sum(map(len, counter)) == 0:
break
for _c, _u in zip(counter, u):
if len(_c) == 0:
continue
n = _c.pop()
if n not in mp:
mp[n] = idx
idx += 1
return mp
def adjust_groups(rr, P):
valid = numpy.nonzero(numpy.diff(P, axis=0).sum(axis=1))[0] + 1
P = P[numpy.hstack([0, valid])]
rr = rr[numpy.hstack([0, valid])]
for i in range(len(P) - 1):
mp = map_step(P[i], P[i + 1])
for j in range(P.shape[1]):
P[i + 1, j] = mp[P[i + 1, j]]
return rr, P
def make_tree(rr, P):
def group_contents(lst, p, G):
return [[p[i] for i in G.nodes[g]['contents']]
for g in lst]
# noinspection PyUnresolvedReferences
def merge_list(lst, p, G):
groups = group_contents(lst, p, G)
merge_groups = numpy.arange(len(groups))
for i, g1 in enumerate(groups):
for j, g2 in enumerate(groups[i + 1:]):
if numpy.in1d(g1, g2).sum() > (0.5 * len(g1)) and\
numpy.in1d(g2, g1).sum() > (0.5 * len(g2)):
merge_groups[merge_groups == merge_groups[i + 1 + j]] = merge_groups[i]
merge_sets = [[l for grp, l in zip(merge_groups, lst) if grp == u_grp]
for u_grp in numpy.unique(merge_groups) if numpy.sum(merge_groups == u_grp) > 1]
return merge_sets
def merge_step(lst, p, r, G, idx):
to_merge = merge_list(lst, p, G)
for m in to_merge:
for _m in m:
lst.remove(_m)
n = [G.nodes[_m] for _m in m]
contents = numpy.unique(numpy.hstack([_n['contents'] for _n in n])).tolist()
G.add_node(idx, born=r, contents=contents)
for _m, _n in zip(m, n):
G.add_edge(idx, _m, length=_n['born'] - r, type='down')
lst.append(idx)
idx += 1
return idx
G = nx.DiGraph()
lst_nodes = []
for i in range(P.shape[1]):
G.add_node(i, born=rr[-1], contents=[i])
lst_nodes.append(i)
idx = P.shape[1]
for p, r in zip(P[-1::-1], rr[-1::-1]):
idx = merge_step(lst_nodes, p, r, G, idx)
print(lst_nodes)
return G
# noinspection PyTypeChecker,PyDefaultArgument
def layout_tree(G, root, pos_dict=None, x=0, y=[0, 10], length=['length']):
def make_splt(suc):
L = [len(G.nodes[_s]['contents']) for _s in suc]
L = numpy.hstack([0, numpy.cumsum(L)]).astype(float) / numpy.sum(L)
dy = numpy.diff(y)[0]
return [y[0] + L[i:i + 2] * dy for i in range(len(suc))]
if pos_dict is None:
pos_dict = {}
pos_dict[root] = (x, numpy.mean(y))
suc = list(G.successors(root))
suc = [_suc for _suc in suc if len(G.nodes[_suc]['contents']) < len(G.nodes[root]['contents'])]
splt = make_splt(suc)
for n, yy in zip(suc, splt):
l = numpy.mean([G.edges[(root, n)][_l] for _l in length])
layout_tree(G, n, pos_dict=pos_dict, x=x+l, y=yy, length=length)
return pos_dict
# noinspection PyTypeChecker,PyDefaultArgument
def layout_radial_tree(G, root, pos_dict=None, l=0, angle=[0, 2*numpy.pi], length=['length'], bidirectional=True):
def make_splt(suc):
L = [len(G.nodes[_s]['contents']) for _s in suc]
L = numpy.hstack([0, numpy.cumsum(L)]).astype(float) / numpy.sum(L)
da = numpy.diff(angle)[0]
return [angle[0] + L[i:i + 2] * da for i in range(len(suc))]
def polar2cart(pl, pa):
return (pl * numpy.cos(pa), pl * numpy.sin(pa))
if pos_dict is None:
pos_dict = {}
pos_dict[root] = polar2cart(l, numpy.mean(angle))
suc = list(G.successors(root))
suc = [_suc for _suc in suc if len(G.nodes[_suc]['contents']) < len(G.nodes[root]['contents'])]
splt = make_splt(suc)
for n, splt_a in zip(suc, splt):
el = [G.edges[(root, n)][_l] for _l in length]
if bidirectional:
el += [G.edges[(n, root)][_l] for _l in length]
el = numpy.mean(el)
layout_radial_tree(G, n, pos_dict=pos_dict, l=l+el, angle=splt_a, length=length, bidirectional=bidirectional)
return pos_dict
def get_leaves(T):
return sorted([_n for _n in T.nodes if T.out_degree[_n] == 1]) # 1 because we made edges bidirectional
def get_root(T):
return sorted(T.nodes)[-1]
def get_out_edges(T, node, edge_type='down'):
return [_e for _e in T.out_edges(node)
if T.edges[_e]['type'] == edge_type]
def make_bidirectional(T):
for e in T.edges:
tmp = T.edges[e].copy()
tmp['type'] = 'up'
T.add_edge(e[1], e[0], **tmp)
def con_mat2cluster_tree(M, radial=True):
import community
gamma = numpy.linspace(0, 12.75, 2001)
rr = 1.0 / gamma[1:-1]
G = nx.from_numpy_array(M + M.transpose(), create_using=nx.Graph())
partitions = [community.best_partition(G, resolution=_r) for _r in rr]
P = numpy.vstack([numpy.array([_part[i] for i in range(M.shape[0])]) for _part in partitions])
P = numpy.vstack([numpy.zeros(P.shape[1], dtype=int), P, numpy.arange(P.shape[1], dtype=int)])
T = make_tree(gamma, P)
make_bidirectional(T)
if radial:
pos_dict = layout_radial_tree(T, get_root(T))
else:
pos_dict = layout_tree(T, get_root(T))
return T, pos_dict
def tree2dist_mat(T, weight='length'):
leaves = get_leaves(T)
D = [[nx.algorithms.shortest_path_length(T, i, j, weight=weight)
for j in leaves] for i in leaves]
return numpy.array(D)
def _get_pairs(T, node=None):
if node is None:
node = get_root(T)
o_e = [_e for _e in T.out_edges(node)
if T.edges[_e]['type'] == 'down' and 'log_p' not in T.edges[_e]]
ret = []
for i, e1 in enumerate(o_e):
if 'w_out' in T.nodes[e1[1]]:
for e2 in o_e[(i + 1):]:
if 'w_out' in T.nodes[e2[1]]:
ret.append((e1[1], e2[1]))
else:
ret.extend(_get_pairs(T, e1[1]))
return ret
def _merge_w(p1, p2, r, tpl_out, tpl_in, W, ND):
ttl_w = W[p1] + W[p2]
w_out = (W[p1] * (ND[p1, :] + tpl_out[0])
+ W[p2] * (ND[p2, :] + tpl_out[1])) / ttl_w
w_in = (W[p1] * (ND[:, p1] + tpl_in[0])
+ W[p2] * (ND[:, p2] + tpl_in[1])) / ttl_w
W[r] = ttl_w
ND[r, :] = w_out
ND[:, r] = w_in
def fit_and_merge_pair(T, pair, W, ND, L):
N = numpy.array([[1, -1, 0, 0], [0, 0, 1, -1], [1, 0, 0, 1], [0, 1, 1, 0]], dtype=float)
path = nx.algorithms.shortest_path(T, pair[0], pair[1])
assert len(path) == 3
r = path[1]
x_out = ND[pair[0], :L].mean() - ND[pair[1], :L].mean()
x_in = ND[:L, pair[0]].mean() - ND[:L, pair[1]].mean()
a1 = ND[pair[0], pair[1]]
a2 = ND[pair[1], pair[0]]
b = numpy.array([x_out, x_in, a1, a2])
ir, jr, ri, rj = numpy.linalg.lstsq(N, b, rcond=None)[0]
def _updater(e, val):
val = numpy.maximum(val, 0.0)
e['log_p'] = numpy.nanmean([e.get('log_p', numpy.NaN), val])
_updater(T.edges[(pair[0], r)], -ir)
_updater(T.edges[(pair[1], r)], -jr)
_updater(T.edges[(r, pair[0])], -ri)
_updater(T.edges[(r, pair[1])], -rj)
tpl_out = (T.edges[(pair[0], r)]['log_p'], T.edges[(pair[1], r)]['log_p'])
tpl_in = (T.edges[(r, pair[0])]['log_p'], T.edges[(r, pair[1])]['log_p'])
_merge_w(pair[0], pair[1], r, tpl_out, tpl_in, W, ND)
def fit_tree_to_mat(T, M):
node = get_root(T)
L = M.shape[0]
ND = numpy.NaN * numpy.ones((len(T.nodes), len(T.nodes)))
ND[:L, :L] = numpy.log10(M)
W = numpy.NaN * numpy.ones(len(T.nodes))
W[:L] = 1
touched = set(range(M.shape[0]))
def _recursion(T, node):
edges = get_out_edges(T, node)
for i, e1 in enumerate(edges):
if e1[1] not in touched:
_recursion(T, e1[1])
for e2 in edges[(i + 1):]:
if e2[1] not in touched:
_recursion(T, e2[1])
fit_and_merge_pair(T, (e1[1], e2[1]), W, ND, L)
_recursion(T, node)
return W, ND
class TreeInnervationModel(object):
def __init__(self, T, p_func=lambda x: 10**-x, val_mask=None, mpr=None):
if mpr is None:
from white_matter.wm_recipe.parcellation import RegionMapper
self.mpr = RegionMapper()
else:
self.mpr = mpr
self.T = T
self.p_func = p_func
self.leaves = get_leaves(self.T)
self._M1 = None
if val_mask is None:
self._val_mask = numpy.ones((len(self.leaves), len(self.leaves)), dtype=bool)
else:
self._val_mask = val_mask
# noinspection PyDefaultArgument
def grow_from(self, idx, coming_from=[], valids=None):
if isinstance(idx, str) or isinstance(idx, unicode):
idx = self.mpr.region2idx(idx)
if valids is None:
valids = numpy.nonzero(self._val_mask[idx])[0]
elif idx in self.leaves:
if idx in valids:
return [idx]
else:
return []
edges = [e for e in self.T.out_edges(idx)
if (e[1], e[0]) not in coming_from]
ret = []
for e in edges:
p = self.p_func(self.T.edges[e]['log_p'])
if numpy.random.rand() < p:
ret.extend(self.grow_from(e[1], coming_from=[e], valids=valids))
return ret
def get_interaction_strength(self, axon_from, r1, r2, weight='log_p'):
T = self.T
if isinstance(axon_from, str) or isinstance(axon_from, unicode):
axon_from = self.mpr.region2idx(axon_from)
p1 = nx.algorithms.shortest_path(T, axon_from, r1, weight=weight)
p2 = nx.algorithms.shortest_path(T, axon_from, r2, weight=weight)
idxx = numpy.nonzero([_p in p2 for _p in p1])[0][-1]
dl = nx.algorithms.shortest_path_length(T, p1[idxx], r2, weight=weight) \
- nx.algorithms.shortest_path_length(T, axon_from, r2, weight=weight)
return self.p_func(dl)
def interaction_mat(self, axon_from, no_redundant=False):
T = self.T
leaves = get_leaves(T)
M = numpy.zeros((len(leaves), len(leaves)))
for i, l1 in enumerate(leaves):
if no_redundant:
for j, l2 in enumerate(leaves[(i + 1):]):
M[i, j + i + 1] = self.get_interaction_strength(axon_from, l1, l2)
else:
for j, l2 in enumerate(leaves):
M[i, j] = self.get_interaction_strength(axon_from, l1, l2)
return M
def idx2region_hemi(self, idxx):
if idxx > len(self.mpr.region_names):
return (self.mpr.idx2region(idxx - len(self.mpr.region_names)), 'contra')
return (self.mpr.idx2region(idxx), 'ipsi')
def region_hemi_names(self):
return [(_reg, 'ipsi') for _reg in self.mpr.region_names] +\
[(_reg, 'contra') for _reg in self.mpr.region_names]
def _first_order_mat(self):
M = self.p_func(tree2dist_mat(self.T, weight='log_p'))
M[numpy.eye(M.shape[0]) == 1] = numpy.NaN
return M
def first_order_mat(self):
if self._M1 is None:
self._M1 = self._first_order_mat()
self._M1[~self._val_mask] = 0.0
return self._M1
def to_json(self, fn, overwrite=False):
import json, os
import networkx as nx
if not overwrite and os.path.exists(fn):
raise Exception("File exists: " + fn)
with open(fn, 'w') as fid:
json.dump(nx.node_link_data(self.T), fid)
def draw(self, **kwargs):
from matplotlib import pyplot as plt
ax = plt.figure(figsize=(9, 9)).add_axes([0, 0, 1, 1])
mpr = self.mpr
pos = layout_radial_tree(self.T, get_root(self.T), length=['log_p'])
lbls = dict(enumerate(mpr.region_names))
lbls.update(dict([(i + len(mpr.region_names), v) for i, v in enumerate(mpr.region_names)]))
cols = [[0.95, 0.5, 0.5] for _ in range(len(mpr.region_names))]
cols.extend([[0.5, 0.5, 1.0] for _ in range(len(mpr.region_names))])
cols.extend([[0.7, 0.7, 0.7] for _ in range(len(self.T.nodes) - 2 * len(mpr.region_names))])
szs = [50.0] * 2 * len(mpr.region_names) + [20.0] * (len(self.T.nodes) - 2 * len(mpr.region_names))
nx.draw_networkx(self.T, pos, font_size=8, labels=lbls, node_color=cols, node_size=szs, ax=ax, **kwargs)
plt.axis('equal')
plt.axis('off')
@classmethod
def from_con_mats(cls, mat_topology, mat_weights, optimize=False, **kwargs):
mat_topology[numpy.isnan(mat_topology)] = 0.0 # TODO: Instead mask out
mat_weights[numpy.isnan(mat_weights)] = 0.0
T, pos_dict = con_mat2cluster_tree(mat_topology, radial=True)
epsilon = mat_weights[mat_weights > 0].min()
fit_tree_to_mat(T, mat_weights + epsilon)
if optimize:
for n in get_leaves(T):
for e in T.out_edges(n):
T.edges[e]['log_p'] = 0.0
mdl_tmp = cls(T)
M1 = mdl_tmp.first_order_mat()
M1[mat_weights == 0] = 0.0
sbtrct = numpy.log10(numpy.polyfit(M1[~numpy.isnan(M1)],
mat_weights[~numpy.isnan(M1)], 1)[0])
for e in T.edges:
if T.edges[e]['log_p'] > sbtrct and T.edges[e]['log_p'] > 0.0:
T.edges[e]['log_p'] = T.edges[e]['log_p'] - sbtrct
return cls(T, val_mask=(mat_weights > 0), **kwargs)
@classmethod
def from_json(cls, fn, **kwargs):
import networkx as nx
import json
with open(fn, 'r') as fid:
data = json.load(fid)
T = nx.node_link_graph(data)
return cls(T, **kwargs)
@classmethod
def from_config(cls, cfg, **kwargs):
import os, h5py
if not os.path.exists(cfg["json_cache"]) or not os.path.exists(cfg["h5_cache"]):
raise NotImplementedError("I will implement this later!")
h5 = h5py.File(str(cfg["h5_cache"]), 'r')
val_mask = h5[str(cfg["h5_dset"])][:]
ret = cls.from_json(cfg["json_cache"], val_mask=val_mask, **kwargs) #TODO: read p_func from cfg
ret.cfg = cfg
return ret
class TreeInnervationModelCollection(object):
def __init__(self, mdl_dict):
self._mdl_dict = mdl_dict
def __getitem__(self, item):
return self._mdl_dict[item]
@classmethod
def from_config_file(cls, cfg_file=None):
import os
from white_matter.utils.paths_in_config import path_local_to_path
from white_matter.utils.data_from_config import read_config
from white_matter.wm_recipe.parcellation import RegionMapper
if cfg_file is None:
cfg_file = os.path.join(os.path.split(__file__)[0], 'default.json')
mpr = RegionMapper()
else:
mpr = RegionMapper(cfg_file=cfg_file)
cfg = read_config(cfg_file)
cfg_root = cfg["cfg_root"]
cfg = cfg["PTypes"]
mdl_dict = {}
for k in cfg.keys():
path_local_to_path(cfg[k], cfg_root, ["json_cache", "h5_cache"])
mdl_dict[k] = TreeInnervationModel.from_config(cfg[k], mpr=mpr)
return cls(mdl_dict)
# VALIDATION OF TREE MODEL
def _naive_model(val_data, smpls=1000):
N = val_data.shape[1]
mn_data = val_data.mean(axis=0)
return numpy.vstack([numpy.random.rand(N) <= mn_data
for _ in range(smpls)])
def _make_bins(v):
if len(numpy.unique(v)) < 1000:
bins = numpy.unique(v)
else:
bins = numpy.linspace(numpy.min(v), numpy.max(v), 999)
db = numpy.mean(numpy.diff(bins))
epsilon = db * 1E-9
bins = numpy.hstack([bins[0] - db, bins, bins[-1] + db])
return bins, bins[:-1] + db / 2 - epsilon
def distance_func(V, dist='cityblock'):
from scipy.spatial import distance
return distance.pdist(V, dist)
def plot_hamming_distances(D_data, D_model, D_naive):
from matplotlib import pyplot as plt
bins, bin_c = _make_bins(numpy.hstack([D_data, D_model, D_naive]))
H_data = numpy.histogram(D_data, bins=bins, density=True)[0]
H_model = numpy.histogram(D_model, bins=bins, density=True)[0]
H_naive = numpy.histogram(D_naive, bins=bins, density=True)[0]
ax = plt.figure().add_axes([0.15, 0.15, 0.8, 0.8])
ax.plot(bin_c, H_data, label='Data')
ax.plot(bin_c, H_model, label='Tree-based model')
ax.plot(bin_c, H_naive, label='Naive model')
ax.set_xlabel('Hamming distance')
ax.set_ylabel('Fraction')
plt.legend()
def to_cdf(v):
from scipy import interpolate
bins, bin_c = _make_bins(v)
H = numpy.histogram(v, bins=bins, density=True)[0]
H = numpy.cumsum(H) / H.sum()
return interpolate.interp1d(bin_c, H, 'nearest',
bounds_error=False, fill_value='extrapolate')
def to_rvs(smpls):
def rvs(**kwargs):
if 'size' not in kwargs:
return numpy.random.choice(smpls)
return numpy.random.choice(smpls, kwargs['size'], replace=True)
return rvs
def validate_tree_model(tree_mdl, val_idx, val_data, smpls=10000, dist='cityblock'):
from scipy.stats import kstest
N = val_data.shape[1]
def idx2bc(idx):
ret = numpy.zeros(N, dtype=bool)
ret[idx] = True
return ret
grown = [tree_mdl.grow_from(val_idx)
for _ in range(smpls)]
grown = numpy.vstack([idx2bc(_x) for _x in grown])
D_data = distance_func(val_data, dist=dist)
D_model = distance_func(grown, dist=dist)
D_naive = distance_func(_naive_model(val_data), dist=dist)
plot_hamming_distances(D_data, D_model, D_naive)
# Distances are strongly non-independent samples. Need to use the ORIGINAL number of samples for the "N" kwarg.
return kstest(to_rvs(D_data), to_cdf(D_model), N=val_data.shape[0]),\
kstest(to_rvs(D_data), to_cdf(D_naive), N=val_data.shape[0])
|
{"hexsha": "9def99f7517aaa6bb25278b19cac23e5814151a4", "size": 18477, "ext": "py", "lang": "Python", "max_stars_repo_path": "white_matter/wm_recipe/p_types/ptype_tree_model.py", "max_stars_repo_name": "alex4200/Long-range-micro-connectome", "max_stars_repo_head_hexsha": "833aad78bc71e49a5059b276e65d3fef21686f9d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-05-01T13:12:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T10:34:56.000Z", "max_issues_repo_path": "white_matter/wm_recipe/p_types/ptype_tree_model.py", "max_issues_repo_name": "alex4200/Long-range-micro-connectome", "max_issues_repo_head_hexsha": "833aad78bc71e49a5059b276e65d3fef21686f9d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-03T13:56:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T07:16:37.000Z", "max_forks_repo_path": "white_matter/wm_recipe/p_types/ptype_tree_model.py", "max_forks_repo_name": "alex4200/Long-range-micro-connectome", "max_forks_repo_head_hexsha": "833aad78bc71e49a5059b276e65d3fef21686f9d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-03T12:05:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T12:05:12.000Z", "avg_line_length": 36.880239521, "max_line_length": 117, "alphanum_fraction": 0.5772582129, "include": true, "reason": "import numpy,from scipy,import networkx", "num_tokens": 5439}
|
// Copyright Carl Philipp Reh 2009 - 2016.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <fcppt/make_ref.hpp>
#include <fcppt/noncopyable.hpp>
#include <fcppt/reference_comparison.hpp>
#include <fcppt/reference_output.hpp>
#include <fcppt/optional/comparison.hpp>
#include <fcppt/optional/map.hpp>
#include <fcppt/optional/object.hpp>
#include <fcppt/optional/output.hpp>
#include <fcppt/optional/reference.hpp>
#include <fcppt/preprocessor/disable_gcc_warning.hpp>
#include <fcppt/preprocessor/pop_warning.hpp>
#include <fcppt/preprocessor/push_warning.hpp>
#include <fcppt/config/external_begin.hpp>
#include <boost/test/unit_test.hpp>
#include <string>
#include <fcppt/config/external_end.hpp>
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
BOOST_AUTO_TEST_CASE(
optional_map
)
{
FCPPT_PP_POP_WARNING
typedef
fcppt::optional::object<
std::string::size_type
>
optional_size;
typedef
fcppt::optional::object<
std::string
>
optional_string;
auto const conversion(
[](
std::string const &_val
)
{
return
_val.size();
}
);
BOOST_CHECK_EQUAL(
fcppt::optional::map(
optional_string(),
conversion
),
optional_size()
);
BOOST_CHECK_EQUAL(
fcppt::optional::map(
optional_string(
"test"
),
conversion
),
optional_size(
4u
)
);
}
namespace
{
class noncopyable
{
FCPPT_NONCOPYABLE(
noncopyable
);
public:
noncopyable()
{
}
~noncopyable()
{
}
};
}
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
BOOST_AUTO_TEST_CASE(
optional_map_ref
)
{
FCPPT_PP_POP_WARNING
typedef
fcppt::optional::object<
std::string
>
optional_string;
noncopyable test{};
typedef
fcppt::optional::reference<
noncopyable
>
optional_noncopyable_ref;
BOOST_CHECK_EQUAL(
fcppt::optional::map(
optional_string(
"42"
),
[
&test
](
std::string
)
{
return
fcppt::make_ref(
test
);
}
),
optional_noncopyable_ref{
fcppt::make_ref(
test
)
}
);
}
|
{"hexsha": "d8bc53e30291eed631749e32d5e04d5397b876e1", "size": 2153, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/optional/map.cpp", "max_stars_repo_name": "vinzenz/fcppt", "max_stars_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/optional/map.cpp", "max_issues_repo_name": "vinzenz/fcppt", "max_issues_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/optional/map.cpp", "max_forks_repo_name": "vinzenz/fcppt", "max_forks_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.9513888889, "max_line_length": 61, "alphanum_fraction": 0.6985601486, "num_tokens": 656}
|
# -*- coding: utf-8 -*-
"""RegressionTorchModel Base class for model with no cell specific parameters"""
import matplotlib.pyplot as plt
# +
import numpy as np
import pandas as pd
from cell2location.models.base.torch_model import TorchModel
class RegressionTorchModel(TorchModel):
r"""Base class for regression models with no cell-specific parameters (enable minibatch training).
:param sample_id: str with column name in cell2covar that denotes sample
:param cell2covar: pd.DataFrame with covariates in columns and cells in rows, rows should be named.
:param X_data: Numpy array of gene expression (cols) in cells (rows)
:param n_iter: number of iterations, when using minibatch, the number of epochs (passes through all data),
supersedes self.n_iter
:param (data_type, learning_rate, total_grad_norm_constraint, verbose, var_names, var_names_read, obs_names, fact_names):
arguments for parent class :func:`~cell2location.models.BaseModel`
:param minibatch_size: if None all data is used for training,
if not None - the number of cells / observations per batch. For best results use 1024 cells per batch.
:param minibatch_seed: order of cells in minibatch is chose randomly, so a seed for each traning restart
should be provided
:param prior_eps: numerical stability constant added to initial values
:param nb_param_conversion_eps: NB distribution numerical stability constant, see :func:`~cell2location.models.TorchModel.nb_log_prob`
:param use_cuda: boolean, telling pytorch to use the GPU (if true).
:param use_average_as_initial_value: boolean, use average gene expression for each categorical covariate as initial value?
:param stratify_cv: when using cross-validation on cells (selected in the training method), this is a pd.Series that
tells :func:`~sklearn.model_selection.train_test_split` how to stratify when creating a split.
"""
def __init__(
self,
sample_id,
cell2covar: pd.DataFrame,
X_data: np.ndarray,
data_type="float32",
n_iter=200000,
learning_rate=0.001,
total_grad_norm_constraint=200,
verbose=True,
var_names=None,
var_names_read=None,
obs_names=None,
fact_names=None,
minibatch_size=None,
minibatch_seed=[41, 56, 345],
prior_eps=1e-8,
nb_param_conversion_eps=1e-8,
use_cuda=False,
use_average_as_initial_value=True,
stratify_cv=None,
):
############# Initialise parameters ################
# convert covariates to binary matrix
# test for column types, get dummies for categorical / character, and just copy over continous
cell2covar_df = pd.get_dummies(cell2covar.loc[:, ~cell2covar.columns.isin([sample_id])])
cell2sample_df = pd.get_dummies(cell2covar[[sample_id]])
cell2sample_covar_df = pd.concat([cell2sample_df, cell2covar_df], axis=1)
fact_names = cell2sample_covar_df.columns
n_fact = cell2sample_covar_df.shape[1]
# extract obs names and sample id
obs_names = cell2covar.index
sample_id = cell2covar[sample_id]
super().__init__(
X_data,
n_fact,
data_type,
n_iter,
learning_rate,
total_grad_norm_constraint,
verbose,
var_names,
var_names_read,
obs_names,
fact_names,
sample_id,
use_cuda,
)
self.nb_param_conversion_eps = nb_param_conversion_eps
self.cell_factors_df = None
self.minibatch_size = minibatch_size
self.minibatch_seed = minibatch_seed
self.n_cells_total = self.n_obs
self.which_sample = self.fact_names.isin(cell2sample_df.columns)
self.n_samples = np.sum(self.which_sample)
self.n_covar = self.n_fact - self.n_samples
self.prior_eps = prior_eps
self.cell2sample_df = cell2sample_df
self.cell2sample_covar_df = cell2sample_covar_df
# convert to np.ndarray
self.cell2sample_mat = cell2sample_df.values
self.cell2sample_covar_mat = cell2sample_covar_df.values
# find mean and variance for each gene
self.gene_mean = (self.X_data + self.prior_eps).mean(0).astype(self.data_type).reshape((1, self.n_var))
self.noise_gene_mean = (self.gene_mean / 10).astype(self.data_type).reshape((1, self.n_var))
self.prior_gene_mean = np.concatenate([self.noise_gene_mean, self.gene_mean], axis=0)
self.stratify_cv = stratify_cv
self.extra_data["cell2sample_covar"] = self.cell2sample_covar_mat
if use_average_as_initial_value:
# compute initial value for parameters: cluster averages
self.cell2sample_covar_sig_mat = self.cell2sample_covar_mat / self.cell2sample_covar_mat.sum(0)
self.clust_average_mat = np.dot(self.cell2sample_covar_sig_mat.T, self.X_data) + self.prior_eps
self.clust_average_mat[self.which_sample, :] = self.clust_average_mat[self.which_sample, :] / 10
# aver = get_cluster_averages(adata_snrna_raw, 'annotation_1') + self.prior_eps
# variances = get_cluster_variances(adata_snrna_raw, 'annotation_1') + self.prior_eps
# shape = aver ** 2 / variances
# shape = shape.mean(1).values
# overdisp_mean = shape.reshape((1, adata_snrna_raw.shape[1]))
self.gene_E_mat = None # np.sqrt(1 / overdisp_mean) # get gene_E ~ Exponential()
else:
self.clust_average_mat = None
self.gene_E_mat = None
# =====================Other functions======================= #
def plot_gene_budget(self):
plt.hist(np.log10(self.samples["post_sample_means"]["gene_level"][:, 0]), bins=50)
plt.xlabel("Gene expression level (hierarchical)")
plt.title("Gene expression level (hierarchical)")
plt.tight_layout()
def sample2df(self, gene_node_name="gene_factors", sample_type="means"):
r"""Export cell factors as Pandas data frames.
:param node_name: name of the cell factor model parameter to be exported
:param gene_node_name: name of the gene factor model parameter to be exported
:param sample_type: type of posterior sample (means, q05, q95, sds)
:return: 8 Pandas dataframes added to model object:
.covariate_effects, .covariate_effects_sd, .covariate_effects_q05, .covariate_effects_q95
.sample_effects, .sample_effects_sd, .sample_effects_q05, .sample_effects_q95
"""
# export parameters for covariate effects
cov_ind = ~self.which_sample
self.covariate_effects = pd.DataFrame.from_records(
self.samples["post_sample_" + sample_type][gene_node_name][cov_ind, :].T,
index=self.var_names,
columns=[sample_type + "_cov_effect_" + i for i in self.fact_names[cov_ind]],
)
# export parameters for sample effects
sample_ind = self.which_sample
self.sample_effects = pd.DataFrame.from_records(
self.samples["post_sample_" + sample_type][gene_node_name][sample_ind, :].T,
index=self.var_names,
columns=[sample_type + "_sample_effect" + i for i in self.fact_names[sample_ind]],
)
def annotate_cell_adata(self, adata, use_raw=True):
r"""Add covariate and sample coefficients to anndata.var
:param adata: anndata object to annotate
:return: updated anndata object
"""
if self.cell_factors_df is None:
self.sample2df()
if use_raw is True:
var_index = adata.raw.var.index
### Covariate effect
# add gene factors to adata
adata.raw.var[self.covariate_effects.columns] = self.covariate_effects.loc[var_index, :]
### Sample effects
# add gene factors to adata
adata.raw.var[self.sample_effects.columns] = self.sample_effects.loc[var_index, :]
else:
var_index = adata.var.index
### Covariate effect
# add gene factors to adata
adata.var[self.covariate_effects.columns] = self.covariate_effects.loc[var_index, :]
### Sample effects
# add gene factors to adata
adata.var[self.sample_effects.columns] = self.sample_effects.loc[var_index, :]
return adata
|
{"hexsha": "863b6d52d93164d18b9b68bea616aaad12fb1a5b", "size": 8540, "ext": "py", "lang": "Python", "max_stars_repo_path": "cell2location/models/base/regression_torch_model.py", "max_stars_repo_name": "nadavyayon/cell2location", "max_stars_repo_head_hexsha": "54141fb85d4b0d64825dfdb6d1bf147b025c856b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 127, "max_stars_repo_stars_event_min_datetime": "2020-06-22T16:50:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T09:48:30.000Z", "max_issues_repo_path": "cell2location/models/base/regression_torch_model.py", "max_issues_repo_name": "nadavyayon/cell2location", "max_issues_repo_head_hexsha": "54141fb85d4b0d64825dfdb6d1bf147b025c856b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2020-06-24T01:31:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T13:40:05.000Z", "max_forks_repo_path": "cell2location/models/base/regression_torch_model.py", "max_forks_repo_name": "nadavyayon/cell2location", "max_forks_repo_head_hexsha": "54141fb85d4b0d64825dfdb6d1bf147b025c856b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 36, "max_forks_repo_forks_event_min_datetime": "2020-06-19T16:41:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T02:40:51.000Z", "avg_line_length": 42.4875621891, "max_line_length": 138, "alphanum_fraction": 0.6641686183, "include": true, "reason": "import numpy", "num_tokens": 1980}
|
"""Read, Write, and Convert between different word vector serialization formats."""
__version__ = "4.0.0"
from typing import Dict, Tuple
from enum import Enum
import numpy as np
#: A mapping of word to integer index. This index is used pull the this words
#: vector from the matrix of word vectors.
Vocab = Dict[str, int]
#: The actual word vectors. These are always of rank 2 and have the shape ``[vocab size, vector size]``
Vectors = np.ndarray
class FileType(Enum):
"""An Enumeration of the Word Vector file types supported."""
#: The format used by Glove. See :py:func:`~word_vectors.read.read_glove` for a
#: description of file format and common pre-trained embeddings that use this format.
GLOVE = "glove"
#: The text format introduced by Word2Vec. See :py:func:`~word_vectors.read.read_w2v_text`
#: for a description of the file format and common pre-trained embeddings that use this format.
W2V_TEXT = "w2v-text"
#: The binary format used by Word2Vec and pre-trained GoogleNews vectors. See
#: :py:func:`~word_vectors.read.read_w2v` for a description of the file format and common
#: pre-trained embeddings that use this format.
W2V = "w2v"
#: Our new Leader file format. See :py:func:`~word_vectors.read.read_leader` for a description of the file format.
LEADER = "leader"
#: The file format used to distribute FastText vectors, it is just the word2vec text format.
#: See :py:func:`~word_vectors.read.read_w2v_text` for a description of the file format.
FASTTEXT = "w2v-text"
#: The file format used to distribute Numberbatch vectors, it is just the word2vec text format.
#: See :py:func:`~word_vectors.read.read_w2v_text` for a description of the file format.
NUMBERBATCH = "w2v-text"
@classmethod
def from_string(cls, value: str) -> "FileType":
"""Convert a string into the Enum value.
Args:
value: The string specifying the file type.
Returns:
The Enum value parsed from the string.
Raises:
ValueError: If the string wasn't able to be parsed into
an Enum value.
"""
value = value.lower()
if value == "glove":
return cls.GLOVE
if value == "w2v_text" or value == "w2v-text":
return cls.W2V_TEXT
if value == "w2v":
return cls.W2V
if value == "leader":
return cls.LEADER
if value == "numberbatch":
return cls.NUMBERBATCH
if value in ("fasttext", "fast-text", "fast_text"):
return cls.FASTTEXT
raise ValueError(f"Unable to understand file type, got: {value}")
def __str__(self) -> str:
"""When calling ``str`` on an enum member output a value suitable for filenames"""
return self.value
INT_SIZE = 4 #: The size of an int32 in bytes used when reading binary files.
FLOAT_SIZE = 4 #: The size of a float32 in bytes when reading a binary file.
LONG_SIZE = 8 #: The size of an int64 in bytes when reading binary files.
LEADER_HEADER = 3 #: The number of elements in the Leader format header.
LEADER_MAGIC_NUMBER = 38941 #: A magic number used to identify a Leader format file.
import word_vectors.read as read_module
import word_vectors.write as write_module
import word_vectors.convert as convert_module
from word_vectors.read import (
read,
read_with_vocab,
read_w2v,
read_w2v_with_vocab,
read_w2v_text,
read_w2v_with_vocab,
read_glove,
read_glove_with_vocab,
read_leader,
read_leader_with_vocab,
verify_leader,
)
from word_vectors.convert import (
convert,
w2v_to_leader,
w2v_to_glove,
w2v_to_w2v_text,
glove_to_leader,
glove_to_w2v,
glove_to_w2v_text,
w2v_text_to_leader,
w2v_text_to_w2v,
w2v_text_to_glove,
leader_to_glove,
leader_to_w2v,
leader_to_w2v_text,
)
from word_vectors.write import write, write_w2v, write_w2v_text, write_glove, write_leader
|
{"hexsha": "e1cc2a2ad53d37b074b6a0c162feb68c71caaa9f", "size": 4012, "ext": "py", "lang": "Python", "max_stars_repo_path": "word_vectors/__init__.py", "max_stars_repo_name": "blester125/word-vectors", "max_stars_repo_head_hexsha": "4f6d8b2b6d8b87fad453a37000c6d0d236a6cb96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-06T08:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-06T08:37:34.000Z", "max_issues_repo_path": "word_vectors/__init__.py", "max_issues_repo_name": "blester125/word-vectors", "max_issues_repo_head_hexsha": "4f6d8b2b6d8b87fad453a37000c6d0d236a6cb96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-04-24T13:21:10.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-23T19:45:51.000Z", "max_forks_repo_path": "word_vectors/__init__.py", "max_forks_repo_name": "blester125/word_vectors", "max_forks_repo_head_hexsha": "4f6d8b2b6d8b87fad453a37000c6d0d236a6cb96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8214285714, "max_line_length": 118, "alphanum_fraction": 0.685443669, "include": true, "reason": "import numpy", "num_tokens": 1010}
|
import cv2
import numpy as np
import pytesseract
from PIL import Image
# Path of working folder on Disk
src_path = "Gamer"
def get_string(img_path):
# Read image with opencv
img = cv2.imread(img_path)
# Convert to gray
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply dilation and erosion to remove some noise
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
# Write image after removed noise
cv2.imwrite(src_path + "removed_noise.png", img)
# Apply threshold to get image with only black and white
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
# Write the image after apply opencv to do some ...
cv2.imwrite(src_path + "thres.png", img)
# Recognize text with tesseract for python
result = pytesseract.image_to_string(Image.open(src_path + "thres.png"))
# Remove template file
#os.remove(temp)
return result
print ('--- Start recognize text from image ---')
st=get_string(src_path+'.png')
print(st)
print ("------ Done -------")
|
{"hexsha": "e2380b79d726a6eb3da7035140e365d620688224", "size": 1187, "ext": "py", "lang": "Python", "max_stars_repo_path": "untitled0.py", "max_stars_repo_name": "Vrittik/PyPiTess", "max_stars_repo_head_hexsha": "498e3a97f5148f6c85344a68d058e51465fa49ef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "untitled0.py", "max_issues_repo_name": "Vrittik/PyPiTess", "max_issues_repo_head_hexsha": "498e3a97f5148f6c85344a68d058e51465fa49ef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "untitled0.py", "max_forks_repo_name": "Vrittik/PyPiTess", "max_forks_repo_head_hexsha": "498e3a97f5148f6c85344a68d058e51465fa49ef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2619047619, "max_line_length": 100, "alphanum_fraction": 0.6613310868, "include": true, "reason": "import numpy", "num_tokens": 307}
|
program ut_dyn_ipert
use m_dyn, only: dyn_init
use m_dyn, only: dyn_vect
use m_dyn, only: dyn_get
use m_dyn, only: dyn_put
use m_dyn, only: dyn_clean
use m_set_eta, only: set_eta
use m_mapz_pert, only: mapz_pert_set
use m_mapz_pert, only: mapz_pert_interp
implicit none
type(dyn_vect) :: xpi ! input vector
type(dyn_vect) :: xpo ! output vector
integer ll,kmi,kmo
integer nymd, nhms, ks, freq, rc
real ptop,pint
integer :: dyntype=5
character(len=255) :: ipfname
character(len=255) :: opfname
real,allocatable,dimension(:) :: ak,bk
real,allocatable,dimension(:) :: plevi,plevo
kmo = 132
ipfname = 'old.nc4'
opfname = 'new.nc4'
call dyn_get ( ipfname, nymd, nhms, xpi, rc, timidx=1, freq=freq, vectype=dyntype, pncf=.true. )
kmi=xpi%grid%km
allocate(ak(kmo+1),bk(kmo+1))
call set_eta ( kmo,ks,ptop,pint,ak,bk )
call dyn_init ( xpi%grid%im, xpi%grid%jm, kmo, xpi%grid%lm, xpo, rc, &
vectype=dyntype, ptop=ptop, ks=ks, ak=ak, bk=bk )
if (rc/=0) then
print *, 'main: Error initializing dyn vector(xpo), rc=', rc
call exit(1)
endif
deallocate(ak,bk)
! set pressure levels
allocate(plevi(kmi),plevo(kmo))
call mapz_pert_set (kmi,plevi)
call mapz_pert_set (kmo,plevo)
! interpolate vertically
call mapz_pert_interp ( plevi, plevo, xpi, xpo, rc)
if (rc/=0) then
print *, 'main: Error from mapz_pert_interp(xpo), rc=', rc
call exit(1)
endif
! write out result
call dyn_put ( trim(opfname), nymd, nhms, 0, xpo, rc, freq=freq, vectype=dyntype )
! clean up
deallocate(plevi,plevo)
call dyn_clean(xpi)
call dyn_clean(xpo)
end program ut_dyn_ipert
|
{"hexsha": "c96afa967972268067fe0df0018409678560e783", "size": 1610, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Shared/GMAO_hermes/ut_dyn_ipert.f90", "max_stars_repo_name": "joeylamcy/gchp", "max_stars_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_stars_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:23:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:39:30.000Z", "max_issues_repo_path": "Shared/GMAO_hermes/ut_dyn_ipert.f90", "max_issues_repo_name": "joeylamcy/gchp", "max_issues_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_issues_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_issues_count": 105, "max_issues_repo_issues_event_min_datetime": "2019-07-08T19:27:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T02:12:16.000Z", "max_forks_repo_path": "Shared/GMAO_hermes/ut_dyn_ipert.f90", "max_forks_repo_name": "joeylamcy/gchp", "max_forks_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_forks_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-07-05T18:00:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T16:26:29.000Z", "avg_line_length": 25.15625, "max_line_length": 96, "alphanum_fraction": 0.701242236, "num_tokens": 590}
|
module mod_settings
use iso_fortran_env, only: wp=>real64
implicit none
private
public :: t_settings
type t_settings
integer :: length = 0
integer :: width = 0
end type t_settings
end module mod_settings
|
{"hexsha": "6d8d463d303617074af81e5a0171ba88d19da3da", "size": 275, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/settings/mod_settings.f90", "max_stars_repo_name": "cbcoutinho/learn_dg", "max_stars_repo_head_hexsha": "b22bf91d1a0daedb6b48590c7361c3a9c3c7f371", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-03-08T09:26:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T01:25:12.000Z", "max_issues_repo_path": "src/settings/mod_settings.f90", "max_issues_repo_name": "cbcoutinho/learn_dg", "max_issues_repo_head_hexsha": "b22bf91d1a0daedb6b48590c7361c3a9c3c7f371", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/settings/mod_settings.f90", "max_forks_repo_name": "cbcoutinho/learn_dg", "max_forks_repo_head_hexsha": "b22bf91d1a0daedb6b48590c7361c3a9c3c7f371", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-01-03T05:51:10.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-03T05:51:10.000Z", "avg_line_length": 19.6428571429, "max_line_length": 43, "alphanum_fraction": 0.5890909091, "num_tokens": 66}
|
[STATEMENT]
lemma gen_in_free_hull: "x \<in> G \<Longrightarrow> x \<in> \<langle>\<BB>\<^sub>F G\<rangle>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> G \<Longrightarrow> x \<in> \<langle>\<BB>\<^sub>F G\<rangle>
[PROOF STEP]
using free_hull.free_gen_in[folded basis_gen_hull_free]
[PROOF STATE]
proof (prove)
using this:
?w \<in> ?G \<Longrightarrow> ?w \<in> \<langle>\<BB>\<^sub>F ?G\<rangle>
goal (1 subgoal):
1. x \<in> G \<Longrightarrow> x \<in> \<langle>\<BB>\<^sub>F G\<rangle>
[PROOF STEP]
.
|
{"llama_tokens": 212, "file": "Combinatorics_Words_Submonoids", "length": 2}
|
SUBROUTINE INVP2 (*)
C
C INVP2 INITIALIZES THEN CALLS EITHER SDCOMP OR DECOMP DEPENDING ON
C THE OPTION SELECTED ON THE EIGR CARD
C
INTEGER FILEA ,FILEL ,FILEU ,SCR1 ,
1 SCR2 ,SCR3 ,SCR4 ,SCR5 ,
2 SR1FIL ,SR2FIL ,DUM ,SCR6 ,
3 RDP ,UPRTRI ,
4 SWITCH ,SCR7 ,SCR8 ,OPTION ,
5 OPT2 ,PREC ,Q(1)
DOUBLE PRECISION DET ,DETDET ,DETC ,MINDD
COMMON /SFACT / FILEA(7) ,FILEL(7) ,FILEU(7) ,SR1FIL ,
1 SR2FIL ,NZ ,DET ,DETC ,
2 POWER ,ISR3FL ,MINDD ,ICHL
COMMON /INVPXX/ DUMM(12) ,SWITCH
COMMON /INVPWX/ DUM(14) ,SCR1(7) ,SCR2(7) ,SCRX ,
1 SCRXX ,SCR3 ,SCR4 ,SCR5 ,
2 SCR6 ,SCR7 ,SCR8
COMMON /NAMES / IJ(8) ,RDP ,IK(5) ,LOWTRI ,
1 UPRTRI
COMMON /DCOMPX/ IA(7) ,IL(7) ,IU(7) ,ISCR1 ,
1 ISCR2 ,ISCR3 ,DETDET ,IPOWR ,
2 MZ ,MIND
COMMON /SYSTEM/ KSYSTM(63)
COMMON /REIGKR/ OPTION
COMMON /ZZZZZZ/ Z(1)
EQUIVALENCE (Q(1),Z(1))
EQUIVALENCE (KSYSTM(55),PREC)
DATA OPT2 / 4HUINV/
C
FILEA(1) = SCR1(1)
IF (SWITCH .EQ. 1) GO TO 10
FILEL(1) = SCR2(1)
FILEU(1) = SCR3
GO TO 20
10 FILEL(1) = SCR7
FILEU(1) = SCR8
20 CONTINUE
SR1FIL = SCR4
SR2FIL = SCR5
ISR3FL = SCR6
ICHL = 0
FILEA(2) = DUM(2)
FILEA(3) = DUM(3)
FILEA(4) = DUM(4)
FILEA(5) = PREC
FILEA(6) = 0
FILEA(7) = 0
FILEL(5) = PREC
IF (OPTION .EQ. OPT2) GO TO 40
C
C SYMMETRIC DECOMPOSITION SELECTED.
C
NZ = KORSZ(Z)
CALL SDCOMP (*30,Z,Z,Z)
FILEL(3) = FILEL(2)
FILEL(4) = LOWTRI
CALL WRTTRL (FILEL)
RETURN
30 RETURN 1
C
C UNSYMMETRIC DECOMPOSITION SELECTED.
C
40 DO 50 I = 1,21
IA(I) = FILEA(I)
50 CONTINUE
ISCR1 = SCR4
ISCR2 = SCR5
ISCR3 = SCR6
MZ = KORSZ(Q)
CALL DECOMP (*30,Q,Q,Q)
IL(3) = IL(2)
IL(4) = LOWTRI
CALL WRTTRL (IL)
IU(3) = IU(2)
IU(4) = UPRTRI
IU(5) = IL(5)
CALL WRTTRL (IU)
RETURN
END
|
{"hexsha": "3e042b8778516f409dab2dd4b092fae77d3df84c", "size": 2611, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/invp2.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/invp2.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/invp2.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 31.8414634146, "max_line_length": 72, "alphanum_fraction": 0.418613558, "num_tokens": 979}
|
# Autogenerated wrapper script for Exodus_jll for x86_64-linux-gnu
export libexodus
using Zlib_jll
using NetCDF_jll
using HDF5_jll
JLLWrappers.@generate_wrapper_header("Exodus")
JLLWrappers.@declare_library_product(libexodus, "libexodus.so.2")
function __init__()
JLLWrappers.@generate_init_header(Zlib_jll, NetCDF_jll, HDF5_jll)
JLLWrappers.@init_library_product(
libexodus,
"lib/libexodus.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "54e65572a2a12d5bcd438ea1f8958e85a3758115", "size": 521, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_stars_repo_name": "JuliaBinaryWrappers/Exodus_jll.jl", "max_stars_repo_head_hexsha": "738e0ef23ee095ae1d0818453088caf9a2151f3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_issues_repo_name": "JuliaBinaryWrappers/Exodus_jll.jl", "max_issues_repo_head_hexsha": "738e0ef23ee095ae1d0818453088caf9a2151f3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_forks_repo_name": "JuliaBinaryWrappers/Exodus_jll.jl", "max_forks_repo_head_hexsha": "738e0ef23ee095ae1d0818453088caf9a2151f3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4210526316, "max_line_length": 69, "alphanum_fraction": 0.763915547, "num_tokens": 153}
|
import os
import cv2
import time
import numpy as np
import pyautogui
import matplotlib.pyplot as plt
from input_feeder import InputFeeder
from mouse_controller import MouseController
from face_detection import Model_fd
from gaze_estimation import Model_ge
from facial_landmarks_detection import Model_fld
from head_pose_estimation import Model_hpe
from mouse_controller import MouseController
from argparse import ArgumentParser
def build_parser():
parser = ArgumentParser()
required = parser.add_argument_group('required', 'These are must provide arguments for the main.py script')
optional = parser.add_argument_group('optional', 'These are optional arguments as there is default values set in the app itself')
optional.add_argument("-d", "--device", type=str, default="CPU", help="Specify the target device to infer on: CPU, GPU, FPGA or MYRIAD is acceptable. Sample will look for a suitable plugin for device specified (CPU by default)")
optional.add_argument("-c", "--prob_threshold", type=float, default=0.5, help="This specifies the probability threshold value for face detection model")
optional.add_argument("-FDO", type=int, default=0, help="to toggle displaying face detector bounding boxes")
optional.add_argument("-FLD", type=int, default=0, help="to toggle displaying eyes bounding boxes")
required.add_argument("-t", "--input_type", required=True, type=str, help="This specifies the type of input whether it can be an image, or pre-saved videos, or the feed from a webcam")
required.add_argument("-f", "--model_fd", required=True, type=str, help="Path to model's directory with a trained model for face detection.")
required.add_argument("-g", "--model_ge", required=True, type=str, help="Path to to model's directory with a trained model for gaze estimation.")
required.add_argument("-p", "--model_hpe", required=True, type=str, help="Path to to model's directory with a trained model for head pose estimation.")
required.add_argument("-l", "--model_fld", required=True, type=str, help="Path to to model's directory with a trained model for facial landmarks detection.")
required.add_argument("-i", "--input", required=True, type=str, help="Path to image or video file")
return parser
def main(args):
device = args.device
input_type = args.input_type
input_file = args.input
model_fd = args.model_fd
model_ge = args.model_ge
model_hpe = args.model_hpe
model_fld = args.model_fld
conf = args.prob_threshold
flag_fd = args.FDO
flag_fld = args.FLD
'''Initializing all the classes and checking the different model classes for any unsupported layers'''
Face_Det = Model_fd(model_fd)
start_lt_fd = time.time()
Face_Det.load_model(device)
total_lt_fd = round((time.time() - start_lt_fd), 2)
Face_Det.check_model(device)
Head_Pose = Model_hpe(model_hpe)
start_lt_hpe = time.time()
Head_Pose.load_model(device)
total_lt_hpe = round((time.time() - start_lt_hpe), 2)
Head_Pose.check_model(device)
Landmarks_Det = Model_fld(model_fld)
start_lt_fld = time.time()
Landmarks_Det.load_model(device)
total_lt_fld = round((time.time() - start_lt_fld), 2)
Landmarks_Det.check_model(device)
Gaze_Det = Model_ge(model_ge)
start_lt_ge = time.time()
Gaze_Det.load_model(device)
total_lt_ge = round((time.time() - start_lt_ge), 2)
Gaze_Det.check_model(device)
mouse = MouseController('medium', 'medium')
'''Reading the input in a loop and passing it through the pipline of all the model's and then using their output
to move the pointer on screen using the pyautogui python library'''
feed=InputFeeder(input_type=input_type, input_file=input_file)
feed.load_data()
(width, height, fps) = feed.get_dim()
out_video = cv2.VideoWriter(os.path.join('/home/workspace/CPC_project/results/', 'output_video.mp4'), 0x00000021, fps, (width, height))
start_inference_time = time.time()
counter = 0
for batch in feed.next_batch():
if np.shape(batch) != ():
counter+=1
ppi_fd = Face_Det.preprocess_input(batch)
outputs_fd = Face_Det.predict(ppi_fd)
ppo_fd, ymin, ymax, xmin, xmax = Face_Det.preprocess_output(batch, width, height, conf, outputs_fd)
ppi_hpe = Head_Pose.preprocess_input(ppo_fd)
(yaw_a, pitch_a, roll_a) = Head_Pose.predict(ppi_hpe)
(yaw, pitch, roll) = Head_Pose.preprocess_output(yaw_a, pitch_a, roll_a)
(ppi_fld, height_fd, width_fd) = Landmarks_Det.preprocess_input(ppo_fd)
outputs_fld = Landmarks_Det.predict(ppi_fld)
(left_eye, right_eye, batch, ppo_fd_) = Landmarks_Det.preprocess_output(batch, ppo_fd, height_fd, width_fd, outputs_fld, ymin, ymax, xmin, xmax, flag_fld)
if np.shape(left_eye) != () and np.shape(right_eye) != () and np.sum(left_eye) != 0 and np.sum(right_eye) != 0:
(ppi_ge_left, ppi_ge_right) = Gaze_Det.preprocess_input(left_eye, right_eye)
outputs_ge = Gaze_Det.predict(ppi_ge_left, ppi_ge_right, yaw, pitch, roll)
(x,y,z) = Gaze_Det.preprocess_output(outputs_ge)
print(x,y,z)
print(counter)
else:
continue
(screen_width, screen_height) = pyautogui.size()
mouse.move(x, y)
(xx, yy) = pyautogui.position()
xx = int((width/screen_width)*xx)
yy = int((height/screen_height)*yy)
batch[(yy-14):(yy+14),(xx-7):(xx+7)]=[0,0,255]
if flag_fd:
cv2.rectangle(batch, (xmin, ymin), (xmax, ymax), (0,0,255), 3)
out_video.write(batch)
else:
break
feed.close()
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps_avg=counter/total_inference_time
with open(os.path.join('/home/workspace/CPC_project/results/', 'stats.txt'), 'w') as f:
f.write(str(total_lt_fd)+'\n')
f.write(str(total_lt_hpe)+'\n')
f.write(str(total_lt_fld)+'\n')
f.write(str(total_lt_ge)+'\n')
f.write(str(total_inference_time)+'\n')
f.write(str(fps_avg)+'\n')
print(f"Load_Time-Face-Detection-Model:{total_lt_fd}")
print(f"Load_Time-Head-Pose-Estimation-Model:{total_lt_hpe}")
print(f"Load_Time-Facial-Landmarks-Detection-Model:{total_lt_fld}")
print(f"Load_Time-Gaze-Estimation-Model:{total_lt_ge}")
print(f"Total_Inference_Time:{total_inference_time}")
print(f"FPS average:{fps_avg}")
print(f"Total no. of frames:{counter}")
cv2.destroyAllWindows()
if __name__ == '__main__':
args = build_parser().parse_args()
main(args)
|
{"hexsha": "df67b484bc799e1c36c9aac365dd46cca21d5596", "size": 7200, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "ojsindher/Computer-Pointer-Controller-OPENVINO", "max_stars_repo_head_hexsha": "7e6c286d0eb90005cc5fc881439f09776c31755e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "ojsindher/Computer-Pointer-Controller-OPENVINO", "max_issues_repo_head_hexsha": "7e6c286d0eb90005cc5fc881439f09776c31755e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "ojsindher/Computer-Pointer-Controller-OPENVINO", "max_forks_repo_head_hexsha": "7e6c286d0eb90005cc5fc881439f09776c31755e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3684210526, "max_line_length": 279, "alphanum_fraction": 0.6477777778, "include": true, "reason": "import numpy", "num_tokens": 1728}
|
// Copyright (C) 2012-2016 Internet Systems Consortium, Inc. ("ISC")
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <config.h>
#include <cstddef>
#include <fstream>
#include <gtest/gtest.h>
#include <stdint.h>
#include <string>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <dhcp/iface_mgr.h>
#include <exceptions/exceptions.h>
#include "command_options_helper.h"
using namespace std;
using namespace isc;
using namespace isc::perfdhcp;
using namespace boost::posix_time;
// Verify that default constructor sets lease type to the expected value.
TEST(LeaseTypeTest, defaultConstructor) {
CommandOptions::LeaseType lease_type;
EXPECT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
}
// Verify that the constructor sets the lease type to the specified value.
TEST(LeaseTypeTest, constructor) {
CommandOptions::LeaseType
lease_type1(CommandOptions::LeaseType::ADDRESS);
EXPECT_TRUE(lease_type1.is(CommandOptions::LeaseType::ADDRESS));
CommandOptions::LeaseType
lease_type2(CommandOptions::LeaseType::PREFIX);
EXPECT_TRUE(lease_type2.is(CommandOptions::LeaseType::PREFIX));
}
// Verify that the lease type can be modified using set() function.
TEST(LeaseTypeTest, set) {
CommandOptions::LeaseType
lease_type(CommandOptions::LeaseType::ADDRESS);
EXPECT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
lease_type.set(CommandOptions::LeaseType::PREFIX);
EXPECT_TRUE(lease_type.is(CommandOptions::LeaseType::PREFIX));
}
// Verify that the includes() function returns true when the lease type
// specified with the function argument is the same as the lease type
// encapsulated by the LeaseType object on which include function is called
// or when the lease type value encapsulated by this object is
// ADDRESS_AND_PREFIX.
TEST(LeaseTypeTest, includes) {
// Lease type: ADDRESS
CommandOptions::LeaseType lease_type(CommandOptions::LeaseType::ADDRESS);
// Lease type IS ADDRESS.
ASSERT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
// Lease type includes the ADDRESS.
EXPECT_TRUE(lease_type.includes(CommandOptions::LeaseType::ADDRESS));
// Lease type does not include PREFIX.
EXPECT_FALSE(lease_type.includes(CommandOptions::LeaseType::PREFIX));
// Lease type does not include ADDRESS_AND_PREFIX.
EXPECT_FALSE(
lease_type.includes(CommandOptions::LeaseType::ADDRESS_AND_PREFIX)
);
// Do the same check for PREFIX.
lease_type.set(CommandOptions::LeaseType::PREFIX);
EXPECT_FALSE(lease_type.includes(CommandOptions::LeaseType::ADDRESS));
EXPECT_TRUE(lease_type.includes(CommandOptions::LeaseType::PREFIX));
EXPECT_FALSE(
lease_type.includes(CommandOptions::LeaseType::ADDRESS_AND_PREFIX)
);
// When lease type is set to 'address-and-prefix' it means that client
// requests both address and prefix (IA_NA and IA_PD). Therefore, the
// LeaseType::includes() function should return true for both ADDRESS
// and PREFIX.
lease_type.set(CommandOptions::LeaseType::ADDRESS_AND_PREFIX);
EXPECT_TRUE(lease_type.includes(CommandOptions::LeaseType::ADDRESS));
EXPECT_TRUE(lease_type.includes(CommandOptions::LeaseType::PREFIX));
EXPECT_TRUE(
lease_type.includes(CommandOptions::LeaseType::ADDRESS_AND_PREFIX)
);
}
// Verify that the LeaseType::fromCommandLine() function parses the lease-type
// argument specified as -e<lease-type>.
TEST(LeaseTypeTest, fromCommandLine) {
CommandOptions::LeaseType
lease_type(CommandOptions::LeaseType::ADDRESS);
ASSERT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
lease_type.fromCommandLine("prefix-only");
ASSERT_TRUE(lease_type.is(CommandOptions::LeaseType::PREFIX));
lease_type.fromCommandLine("address-only");
EXPECT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
lease_type.fromCommandLine("address-and-prefix");
EXPECT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS_AND_PREFIX));
EXPECT_THROW(lease_type.fromCommandLine("bogus-parameter"),
isc::InvalidParameter);
}
// Verify that the LeaseType::toText() function returns the textual
// representation of the lease type specified.
TEST(LeaseTypeTest, toText) {
CommandOptions::LeaseType lease_type;
ASSERT_TRUE(lease_type.is(CommandOptions::LeaseType::ADDRESS));
EXPECT_EQ("address-only (IA_NA option added to the client's request)",
lease_type.toText());
lease_type.set(CommandOptions::LeaseType::PREFIX);
EXPECT_EQ("prefix-only (IA_PD option added to the client's request)",
lease_type.toText());
lease_type.set(CommandOptions::LeaseType::ADDRESS_AND_PREFIX);
EXPECT_EQ("address-and-prefix (Both IA_NA and IA_PD options added to the"
" client's request)", lease_type.toText());
}
/// \brief Test Fixture Class
///
/// This test fixture class is used to perform
/// unit tests on perfdhcp CommandOptions class.
class CommandOptionsTest : public virtual ::testing::Test
{
public:
/// \brief Default Constructor
CommandOptionsTest() { }
protected:
/// \brief Parse command line and cleanup
///
/// The method tokenizes command line to array of C-strings,
/// parses arguments using CommandOptions class to set
/// its data members and de-allocates array of C-strings.
///
/// \param cmdline Command line to parse.
/// \throws std::bad allocation if tokenization failed.
/// \return true if program has been run in help or version mode ('h' or 'v' flag).
bool process(const std::string& cmdline) {
return (CommandOptionsHelper::process(cmdline));
}
/// \brief Get full path to a file in testdata directory.
///
/// \param filename filename being appended to absolute
/// path to testdata directory
///
/// \return full path to a file in testdata directory.
std::string getFullPath(const std::string& filename) const {
std::ostringstream stream;
stream << TEST_DATA_DIR << "/" << filename;
return (stream.str());
}
/// \brief Check default initialized values
///
/// Check if initialized values are correct
void checkDefaults() {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp 192.168.0.1"));
EXPECT_EQ(4, opt.getIpVersion());
EXPECT_EQ(CommandOptions::DORA_SARR, opt.getExchangeMode());
EXPECT_TRUE(opt.getLeaseType().is(CommandOptions::LeaseType::ADDRESS));
EXPECT_EQ(0, opt.getRate());
EXPECT_EQ(0, opt.getRenewRate());
EXPECT_EQ(0, opt.getReleaseRate());
EXPECT_EQ(0, opt.getReportDelay());
EXPECT_EQ(0, opt.getClientsNum());
// default mac
const uint8_t mac[6] = { 0x00, 0x0C, 0x01, 0x02, 0x03, 0x04 };
std::vector<uint8_t> v1 = opt.getMacTemplate();
ASSERT_EQ(6, v1.size());
EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac));
// Check if DUID is initialized. The DUID-LLT is expected
// to start with DUID_LLT value of 1 and hardware ethernet
// type equal to 1 (HWETHER_TYPE).
const uint8_t duid_llt_and_hw[4] = { 0x0, 0x1, 0x0, 0x1 };
// We assume DUID-LLT length 14. This includes 4 octets of
// DUID_LLT value, two octets of hardware type, 4 octets
// of time value and 6 octets of variable link layer (MAC)
// address.
const int duid_llt_size = 14;
// DUID is not given from the command line but it is supposed
// to be initialized by the CommandOptions private method
// generateDuidTemplate().
std::vector<uint8_t> v2 = opt.getDuidTemplate();
ASSERT_EQ(duid_llt_size, opt.getDuidTemplate().size());
EXPECT_TRUE(std::equal(v2.begin(), v2.begin() + 4,
duid_llt_and_hw));
// Check time field contents.
ptime now = microsec_clock::universal_time();
ptime duid_epoch(from_iso_string("20000101T000000"));
time_period period(duid_epoch, now);
uint32_t duration_sec = period.length().total_seconds();
// Read time from the template generated.
uint32_t duration_from_template = 0;
memcpy(&duration_from_template, &v2[4], 4);
duration_from_template = htonl(duration_from_template);
// In special cases, we may have overflow in time field
// so we give ourselves the margin of 10 seconds here.
// If time value has been set more then 10 seconds back
// it is safe to compare it with the time value generated
// from now.
if (duration_from_template > 10) {
EXPECT_GE(duration_sec, duration_from_template);
}
EXPECT_EQ(0, opt.getBase().size());
EXPECT_EQ(0, opt.getNumRequests().size());
EXPECT_EQ(0, opt.getPeriod());
for (size_t i = 0; i < opt.getDropTime().size(); ++i) {
EXPECT_DOUBLE_EQ(1, opt.getDropTime()[i]);
}
ASSERT_EQ(opt.getMaxDrop().size(), opt.getMaxDropPercentage().size());
for (size_t i = 0; i < opt.getMaxDrop().size(); ++i) {
EXPECT_EQ(0, opt.getMaxDrop()[i]);
EXPECT_EQ(0, opt.getMaxDropPercentage()[i]);
}
EXPECT_EQ("", opt.getLocalName());
EXPECT_FALSE(opt.isInterface());
EXPECT_EQ(0, opt.getPreload());
EXPECT_EQ(1, opt.getAggressivity());
EXPECT_EQ(0, opt.getLocalPort());
EXPECT_FALSE(opt.isSeeded());
EXPECT_EQ(0, opt.getSeed());
EXPECT_FALSE(opt.isBroadcast());
EXPECT_FALSE(opt.isRapidCommit());
EXPECT_FALSE(opt.isUseFirst());
EXPECT_EQ(0, opt.getTemplateFiles().size());
EXPECT_EQ(0, opt.getTransactionIdOffset().size());
EXPECT_EQ(0, opt.getRandomOffset().size());
EXPECT_GT(0, opt.getElapsedTimeOffset());
EXPECT_GT(0, opt.getServerIdOffset());
EXPECT_GT(0, opt.getRequestedIpOffset());
EXPECT_EQ("", opt.getDiags());
EXPECT_EQ("", opt.getWrapped());
EXPECT_EQ("192.168.0.1", opt.getServerName());
}
};
TEST_F(CommandOptionsTest, Defaults) {
EXPECT_NO_THROW(process("perfdhcp all"));
checkDefaults();
}
TEST_F(CommandOptionsTest, HelpVersion) {
// The parser is supposed to return true if 'h' or 'v' options
// are specified.
EXPECT_TRUE(process("perfdhcp -h"));
EXPECT_TRUE(process("perfdhcp -v"));
EXPECT_TRUE(process("perfdhcp -h -v"));
EXPECT_TRUE(process("perfdhcp -6 -l ethx -h all"));
EXPECT_TRUE(process("perfdhcp -l ethx -v all"));
// No 'h' or 'v' option specified. The false value
// should be returned.
EXPECT_FALSE(process("perfdhcp -l ethx all"));
}
TEST_F(CommandOptionsTest, UseFirst) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -1 -B -l ethx all"));
EXPECT_TRUE(opt.isUseFirst());
}
TEST_F(CommandOptionsTest, UseRelayV6) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -6 -A1 -l ethx all"));
EXPECT_TRUE(opt.isUseRelayedV6());
// -4 and -A must not coexist
EXPECT_THROW(process("perfdhcp -4 -A1 -l ethx all"), isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, IpVersion) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -6 -l ethx -c -i all"));
EXPECT_EQ(6, opt.getIpVersion());
EXPECT_EQ("ethx", opt.getLocalName());
EXPECT_TRUE(opt.isRapidCommit());
EXPECT_FALSE(opt.isBroadcast());
process("perfdhcp -4 -B -l ethx all");
EXPECT_EQ(4, opt.getIpVersion());
EXPECT_TRUE(opt.isBroadcast());
EXPECT_FALSE(opt.isRapidCommit());
// Negative test cases
// -4 and -6 must not coexist
EXPECT_THROW(process("perfdhcp -4 -6 -l ethx all"), isc::InvalidParameter);
// -6 and -B must not coexist
EXPECT_THROW(process("perfdhcp -6 -B -l ethx all"), isc::InvalidParameter);
// -c and -4 (default) must not coexist
EXPECT_THROW(process("perfdhcp -c -l ethx all"), isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, LeaseType) {
CommandOptions& opt = CommandOptions::instance();
// Check that the -e address-only works for IPv6.
ASSERT_NO_THROW(process("perfdhcp -6 -l etx -e address-only all"));
EXPECT_EQ(6, opt.getIpVersion());
EXPECT_EQ("etx", opt.getLocalName());
EXPECT_TRUE(opt.getLeaseType().is(CommandOptions::LeaseType::ADDRESS));
// Check that the -e address-only works for IPv4.
ASSERT_NO_THROW(process("perfdhcp -4 -l etx -e address-only all"));
EXPECT_EQ(4, opt.getIpVersion());
EXPECT_EQ("etx", opt.getLocalName());
EXPECT_TRUE(opt.getLeaseType().is(CommandOptions::LeaseType::ADDRESS));
// Check that the -e prefix-only works.
ASSERT_NO_THROW(process("perfdhcp -6 -l etx -e prefix-only all"));
EXPECT_EQ(6, opt.getIpVersion());
EXPECT_EQ("etx", opt.getLocalName());
EXPECT_TRUE(opt.getLeaseType().is(CommandOptions::LeaseType::PREFIX));
// Check that -e prefix-only must not coexist with -4 option.
EXPECT_THROW(process("perfdhcp -4 -l ethx -e prefix-only all"),
InvalidParameter);
// Check that -e prefix-only must not coexist with -T options.
EXPECT_THROW(process("perfdhcp -6 -l ethx -e prefix-only -T file1.hex"
" -T file2.hex -E 4 all"), InvalidParameter);
}
TEST_F(CommandOptionsTest, Rate) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -4 -r 10 -l ethx all"));
EXPECT_EQ(10, opt.getRate());
// Negative test cases
// Rate must not be 0
EXPECT_THROW(process("perfdhcp -4 -r 0 -l ethx all"),
isc::InvalidParameter);
// -r must be specified to use -n, -p and -D
EXPECT_THROW(process("perfdhcp -6 -t 5 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -4 -n 150 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -6 -p 120 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -4 -D 1400 -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, RenewRate) {
CommandOptions& opt = CommandOptions::instance();
// If -f is specified together with -r the command line should
// be accepted and the renew rate should be set.
EXPECT_NO_THROW(process("perfdhcp -6 -r 10 -f 10 -l ethx all"));
EXPECT_EQ(10, opt.getRenewRate());
// Check that the release rate can be set to different value than
// rate specified as -r<rate>. Also, swap -f and -r to make sure
// that order doesn't matter.
EXPECT_NO_THROW(process("perfdhcp -6 -f 5 -r 10 -l ethx all"));
EXPECT_EQ(5, opt.getRenewRate());
// Renew rate should also be accepted for DHCPv4 case.
EXPECT_NO_THROW(process("perfdhcp -4 -f 5 -r 10 -l ethx all"));
EXPECT_EQ(5, opt.getRenewRate());
// The renew rate should not be greater than the rate.
EXPECT_THROW(process("perfdhcp -6 -r 10 -f 11 -l ethx all"),
isc::InvalidParameter);
// The renew-rate of 0 is invalid.
EXPECT_THROW(process("perfdhcp -6 -r 10 -f 0 -l ethx all"),
isc::InvalidParameter);
// The negative renew-rate is invalid.
EXPECT_THROW(process("perfdhcp -6 -r 10 -f -5 -l ethx all"),
isc::InvalidParameter);
// If -r<rate> is not specified the -f<renew-rate> should not
// be accepted.
EXPECT_THROW(process("perfdhcp -6 -f 10 -l ethx all"),
isc::InvalidParameter);
// Renew rate should be specified.
EXPECT_THROW(process("perfdhcp -6 -r 10 -f -l ethx all"),
isc::InvalidParameter);
// -f and -i are mutually exclusive
EXPECT_THROW(process("perfdhcp -6 -r 10 -f 10 -l ethx -i all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, ReleaseRate) {
CommandOptions& opt = CommandOptions::instance();
// If -F is specified together with -r the command line should
// be accepted and the release rate should be set.
EXPECT_NO_THROW(process("perfdhcp -6 -r 10 -F 10 -l ethx all"));
EXPECT_EQ(10, opt.getReleaseRate());
// Check that the release rate can be set to different value than
// rate specified as -r<rate>. Also, swap -F and -r to make sure
// that order doesn't matter.
EXPECT_NO_THROW(process("perfdhcp -6 -F 5 -r 10 -l ethx all"));
EXPECT_EQ(5, opt.getReleaseRate());
// The release rate should not be greater than the rate.
EXPECT_THROW(process("perfdhcp -6 -r 10 -F 11 -l ethx all"),
isc::InvalidParameter);
// The release-rate of 0 is invalid.
EXPECT_THROW(process("perfdhcp -6 -r 10 -F 0 -l ethx all"),
isc::InvalidParameter);
// The negative release-rate is invalid.
EXPECT_THROW(process("perfdhcp -6 -r 10 -F -5 -l ethx all"),
isc::InvalidParameter);
// If -r<rate> is not specified the -F<release-rate> should not
// be accepted.
EXPECT_THROW(process("perfdhcp -6 -F 10 -l ethx all"),
isc::InvalidParameter);
// Currently the -F<release-rate> can be specified for IPv6 mode
// only.
EXPECT_THROW(process("perfdhcp -4 -r 10 -F 10 -l ethx all"),
isc::InvalidParameter);
// Release rate should be specified.
EXPECT_THROW(process("perfdhcp -6 -r 10 -F -l ethx all"),
isc::InvalidParameter);
// -F and -i are mutually exclusive
EXPECT_THROW(process("perfdhcp -6 -r 10 -F 10 -l ethx -i all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, ReleaseRenew) {
CommandOptions& opt = CommandOptions::instance();
// It should be possible to specify the -F, -f and -r options.
EXPECT_NO_THROW(process("perfdhcp -6 -r 10 -F 3 -f 5 -l ethx all"));
EXPECT_EQ(10, opt.getRate());
EXPECT_EQ(3, opt.getReleaseRate());
EXPECT_EQ(5, opt.getRenewRate());
// It should be possible to specify the -F and -f with the values which
// sum is equal to the rate specified as -r<rate>.
EXPECT_NO_THROW(process("perfdhcp -6 -r 8 -F 3 -f 5 -l ethx all"));
EXPECT_EQ(8, opt.getRate());
EXPECT_EQ(3, opt.getReleaseRate());
EXPECT_EQ(5, opt.getRenewRate());
// Check that the sum of the release and renew rate is not greater
// than the rate specified as -r<rate>.
EXPECT_THROW(process("perfdhcp -6 -F 6 -f 5 -r 10 -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, ReportDelay) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -r 100 -t 17 -l ethx all"));
EXPECT_EQ(17, opt.getReportDelay());
// Negative test cases
// -t must be positive integer
EXPECT_THROW(process("perfdhcp -r 10 -t -8 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -r 10 -t 0 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -r 10 -t s -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, ClientsNum) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -R 200 -l ethx all"));
EXPECT_EQ(200, opt.getClientsNum());
process("perfdhcp -R 0 -l ethx all");
EXPECT_EQ(0, opt.getClientsNum());
// Negative test cases
// Number of clients must be non-negative integer
EXPECT_THROW(process("perfdhcp -R -5 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -R gs -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Base) {
CommandOptions& opt = CommandOptions::instance();
uint8_t mac[6] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60 };
uint8_t duid[14] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x10, 0x11, 0x1F, 0x14 };
// Test DUID and MAC together.
EXPECT_NO_THROW(process("perfdhcp -b DUID=0101010101010101010110111F14"
" -b MAC=10::20::30::40::50::60"
" -l 127.0.0.1 all"));
std::vector<uint8_t> v1 = opt.getMacTemplate();
std::vector<uint8_t> v2 = opt.getDuidTemplate();
EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac));
EXPECT_TRUE(std::equal(v2.begin(), v2.end(), duid));
// Test valid DUID.
EXPECT_NO_THROW(
process("perfdhcp -b duid=0101010101010101010110111F14 -l 127.0.0.1 all")
);
ASSERT_EQ(sizeof(duid) / sizeof(uint8_t), v2.size());
EXPECT_TRUE(std::equal(v2.begin(), v2.end(), duid));
// Test mix of upper/lower case letters.
EXPECT_NO_THROW(process("perfdhcp -b DuiD=0101010101010101010110111F14"
" -b Mac=10::20::30::40::50::60"
" -l 127.0.0.1 all"));
v1 = opt.getMacTemplate();
v2 = opt.getDuidTemplate();
EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac));
EXPECT_TRUE(std::equal(v2.begin(), v2.end(), duid));
// Use "ether" instead of "mac".
EXPECT_NO_THROW(process("perfdhcp -b ether=10::20::30::40::50::60"
" -l 127.0.0.1 all"));
v1 = opt.getMacTemplate();
EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac));
// Use "ETHER" in upper case.
EXPECT_NO_THROW(process("perfdhcp -b ETHER=10::20::30::40::50::60"
" -l 127.0.0.1 all"));
v1 = opt.getMacTemplate();
EXPECT_TRUE(std::equal(v1.begin(), v1.end(), mac));
// "t" is invalid character in DUID
EXPECT_THROW(process("perfdhcp -6 -l ethx -b "
"duid=010101010101010101t110111F14 all"),
isc::InvalidParameter);
// "3x" is invalid value in MAC address
EXPECT_THROW(process("perfdhcp -b mac=10::2::3x::4::5::6 -l ethx all"),
isc::InvalidParameter);
// Base is not specified
EXPECT_THROW(process("perfdhcp -b -l ethx all"),
isc::InvalidParameter);
// Typo: should be mac= instead of mc=
EXPECT_THROW(process("perfdhcp -l ethx -b mc=00:01:02:03::04:05 all"),
isc::InvalidParameter);
// Too short DUID (< 6).
EXPECT_THROW(process("perfdhcp -l ethx -b duid=00010203 all"),
isc::InvalidParameter);
// Odd number of digits.
EXPECT_THROW(process("perfdhcp -l ethx -b duid=000102030405060 all"),
isc::InvalidParameter);
// Too short MAC (!= 6).
EXPECT_THROW(process("perfdhcp -l ethx -b mac=00:01:02:04 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, DropTime) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -l ethx -d 12 all"));
ASSERT_EQ(2, opt.getDropTime().size());
EXPECT_DOUBLE_EQ(12, opt.getDropTime()[0]);
EXPECT_DOUBLE_EQ(1, opt.getDropTime()[1]);
EXPECT_NO_THROW(process("perfdhcp -l ethx -d 2 -d 4.7 all"));
ASSERT_EQ(2, opt.getDropTime().size());
EXPECT_DOUBLE_EQ(2, opt.getDropTime()[0]);
EXPECT_DOUBLE_EQ(4.7, opt.getDropTime()[1]);
// Negative test cases
// Drop time must not be negative
EXPECT_THROW(process("perfdhcp -l ethx -d -2 -d 4.7 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -l ethx -d -9.1 -d 0 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, TimeOffset) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -l ethx -T file1.x -T file2.x -E 4 all"));
EXPECT_EQ(4, opt.getElapsedTimeOffset());
// Negative test cases
// Argument -E must be used with -T
EXPECT_THROW(process("perfdhcp -l ethx -E 3 -i all"),
isc::InvalidParameter);
// Value in -E not specified
EXPECT_THROW(process("perfdhcp -l ethx -T file.x -E -i all"),
isc::InvalidParameter);
// Value for -E must not be negative
EXPECT_THROW(process("perfdhcp -l ethx -E -3 -T file.x all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, ExchangeMode) {
CommandOptions& opt = CommandOptions::instance();
process("perfdhcp -l ethx -i all");
EXPECT_EQ(CommandOptions::DO_SA, opt.getExchangeMode());
// Negative test cases
// No template file specified
EXPECT_THROW(process("perfdhcp -i -l ethx -X 3 all"),
isc::InvalidParameter);
// Offsets can't be used in simple exchanges (-i)
EXPECT_THROW(process("perfdhcp -i -l ethx -O 2 -T file.x all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -i -l ethx -E 3 -T file.x all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -i -l ethx -S 1 -T file.x all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -i -l ethx -I 2 -T file.x all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Offsets) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -E5 -4 -I 2 -S3 -O 30 -X7 -l ethx "
"-X3 -T file1.x -T file2.x all"));
EXPECT_EQ(2, opt.getRequestedIpOffset());
EXPECT_EQ(5, opt.getElapsedTimeOffset());
EXPECT_EQ(3, opt.getServerIdOffset());
ASSERT_EQ(2, opt.getRandomOffset().size());
EXPECT_EQ(30, opt.getRandomOffset()[0]);
EXPECT_EQ(30, opt.getRandomOffset()[1]);
ASSERT_EQ(2, opt.getTransactionIdOffset().size());
EXPECT_EQ(7, opt.getTransactionIdOffset()[0]);
EXPECT_EQ(3, opt.getTransactionIdOffset()[1]);
// Negative test cases
// IP offset/IA_NA offset must be positive
EXPECT_THROW(process("perfdhcp -6 -I 0 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -6 -I -4 -l ethx all"),
isc::InvalidParameter);
// TODO - other negative cases
}
TEST_F(CommandOptionsTest, LocalPort) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -l ethx -L 2000 all"));
EXPECT_EQ(2000, opt.getLocalPort());
// Negative test cases
// Local port must be between 0..65535
EXPECT_THROW(process("perfdhcp -l ethx -L -2 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -l ethx -L all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -l ethx -L 65540 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Preload) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -1 -P 3 -l ethx all"));
EXPECT_EQ(3, opt.getPreload());
// Negative test cases
// Number of preload packages must not be negative integer
EXPECT_THROW(process("perfdhcp -P -1 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -P -3 -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Seed) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -6 -P 2 -s 23 -l ethx all"));
EXPECT_EQ(23, opt.getSeed());
EXPECT_TRUE(opt.isSeeded());
EXPECT_NO_THROW(process("perfdhcp -6 -P 2 -s 0 -l ethx all"));
EXPECT_EQ(0, opt.getSeed());
EXPECT_FALSE(opt.isSeeded());
// Negative test cases
// Seed must be non-negative integer
EXPECT_THROW(process("perfdhcp -6 -P 2 -s -5 -l ethx all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -6 -P 2 -s -l ethx all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, TemplateFiles) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -T file1.x -l ethx all"));
ASSERT_EQ(1, opt.getTemplateFiles().size());
EXPECT_EQ("file1.x", opt.getTemplateFiles()[0]);
EXPECT_NO_THROW(process("perfdhcp -T file1.x -s 12 -w start -T file2.x -4 -l ethx all"));
ASSERT_EQ(2, opt.getTemplateFiles().size());
EXPECT_EQ("file1.x", opt.getTemplateFiles()[0]);
EXPECT_EQ("file2.x", opt.getTemplateFiles()[1]);
// Negative test cases
// No template file specified
EXPECT_THROW(process("perfdhcp -s 12 -T -l ethx all"),
isc::InvalidParameter);
// Too many template files specified
EXPECT_THROW(process("perfdhcp -s 12 -l ethx -T file.x "
"-T file.x -T file.x all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Wrapped) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -B -w start -i -l ethx all"));
EXPECT_EQ("start", opt.getWrapped());
// Negative test cases
// Missing command after -w, expected start/stop
EXPECT_THROW(process("perfdhcp -B -i -l ethx -w all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Diagnostics) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -l ethx -i -x asTe all"));
EXPECT_EQ("asTe", opt.getDiags());
// Negative test cases
// No diagnostics string specified
EXPECT_THROW(process("perfdhcp -l ethx -i -x all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Aggressivity) {
CommandOptions& opt = CommandOptions::instance();
process("perfdhcp -a 10 -l 192.168.0.1 all");
EXPECT_EQ(10, opt.getAggressivity());
// Negative test cases
// Aggressivity must be non negative integer
EXPECT_THROW(process("perfdhcp -l ethx -a 0 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -l ethx -a all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -a -2 -l ethx -a 3 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, MaxDrop) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -D 25 -l ethx -r 10 all"));
EXPECT_EQ(25, opt.getMaxDrop()[0]);
EXPECT_NO_THROW(process("perfdhcp -D 25 -l ethx -D 15 -r 10 all"));
EXPECT_EQ(25, opt.getMaxDrop()[0]);
EXPECT_EQ(15, opt.getMaxDrop()[1]);
EXPECT_NO_THROW(process("perfdhcp -D 15% -l ethx -r 10 all"));
EXPECT_EQ(15, opt.getMaxDropPercentage()[0]);
EXPECT_NO_THROW(process("perfdhcp -D 15% -D25% -l ethx -r 10 all"));
EXPECT_EQ(15, opt.getMaxDropPercentage()[0]);
EXPECT_EQ(25, opt.getMaxDropPercentage()[1]);
EXPECT_NO_THROW(process("perfdhcp -D 1% -D 99% -l ethx -r 10 all"));
EXPECT_EQ(1, opt.getMaxDropPercentage()[0]);
EXPECT_EQ(99, opt.getMaxDropPercentage()[1]);
// Negative test cases
// Too many -D<value> options
EXPECT_THROW(process("perfdhcp -D 0% -D 1 -l ethx -r20 -D 3 all"),
isc::InvalidParameter);
// Too many -D<value%> options
EXPECT_THROW(process("perfdhcp -D 99% -D 13% -l ethx -r20 -D 10% all"),
isc::InvalidParameter);
// Percentage is out of bounds
EXPECT_THROW(process("perfdhcp -D101% -D 13% -l ethx -r20 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -D0% -D 13% -l ethx -r20 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, NumRequest) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -n 1000 -r 10 -l ethx all"));
EXPECT_EQ(1000, opt.getNumRequests()[0]);
EXPECT_NO_THROW(process("perfdhcp -n 5 -r 10 -n 500 -l ethx all"));
EXPECT_EQ(5, opt.getNumRequests()[0]);
EXPECT_EQ(500, opt.getNumRequests()[1]);
// Negative test cases
// Too many -n<value> parameters, expected maximum 2
EXPECT_THROW(process("perfdhcp -n 1 -n 2 -l ethx -n3 -r 20 all"),
isc::InvalidParameter);
// Num request must be positive integer
EXPECT_THROW(process("perfdhcp -n 1 -n -22 -l ethx -r 10 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -n 0 -l ethx -r 10 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Period) {
CommandOptions& opt = CommandOptions::instance();
EXPECT_NO_THROW(process("perfdhcp -p 120 -l ethx -r 100 all"));
EXPECT_EQ(120, opt.getPeriod());
// Negative test cases
// Test period must be positive integer
EXPECT_THROW(process("perfdhcp -p 0 -l ethx -r 50 all"),
isc::InvalidParameter);
EXPECT_THROW(process("perfdhcp -p -3 -l ethx -r 50 all"),
isc::InvalidParameter);
}
TEST_F(CommandOptionsTest, Interface) {
// In order to make this test portable we need to know
// at least one interface name on OS where test is run.
// Interface Manager has ability to detect interfaces.
// Although we don't call initIsInterface explicitly
// here it is called by CommandOptions object internally
// so this function is covered by the test.
dhcp::IfaceMgr& iface_mgr = dhcp::IfaceMgr::instance();
const dhcp::IfaceMgr::IfaceCollection& ifaces = iface_mgr.getIfaces();
std::string iface_name;
CommandOptions& opt = CommandOptions::instance();
// The local loopback interface should be available.
// If no interface have been found for any reason we should
// not fail this test.
if (!ifaces.empty()) {
// Get the name of the interface we detected.
iface_name = (*ifaces.begin())->getName();
// Use the name in the command parser.
ASSERT_NO_THROW(process("perfdhcp -4 -l " + iface_name + " abc"));
// We expect that command parser will detect that argument
// specified along with '-l' is the interface name.
EXPECT_TRUE(opt.isInterface());
// If neither interface nor server is specified then
// exception is expected to be thrown.
EXPECT_THROW(process("perfdhcp -4"), isc::InvalidParameter);
}
}
TEST_F(CommandOptionsTest, Server) {
CommandOptions& opt = CommandOptions::instance();
// There is at least server parameter needed. If server is not
// specified the local interface must be specified.
// The server value equal to 'all' means use broadcast.
ASSERT_NO_THROW(process("perfdhcp all"));
// Once command line is parsed we expect that server name is
// set to broadcast address because 'all' was specified.
EXPECT_TRUE(opt.isBroadcast());
// The broadcast address is 255.255.255.255.
EXPECT_EQ(DHCP_IPV4_BROADCAST_ADDRESS, opt.getServerName());
// When all is specified for DHCPv6 mode we expect
// FF02::1:2 as a server name which means All DHCP
// servers and relay agents in local network segment
ASSERT_NO_THROW(process("perfdhcp -6 all"));
EXPECT_EQ(ALL_DHCP_RELAY_AGENTS_AND_SERVERS, opt.getServerName());
// When server='servers' in DHCPv6 mode we expect
// FF05::1:3 as server name which means All DHCP
// servers in local network.
ASSERT_NO_THROW(process("perfdhcp -6 servers"));
EXPECT_EQ(ALL_DHCP_SERVERS, opt.getServerName());
// If server name is neither 'all' nor 'servers'
// the given argument value is expected to be
// returned.
ASSERT_NO_THROW(process("perfdhcp -6 abc"));
EXPECT_EQ("abc", opt.getServerName());
}
TEST_F(CommandOptionsTest, LoadMacsFromFile) {
CommandOptions &opt = CommandOptions::instance();
std::string mac_list_full_path = getFullPath("mac-list.txt");
std::ostringstream cmd;
cmd << "perfdhcp -M " << mac_list_full_path << " abc";
EXPECT_NO_THROW(process(cmd.str()));
EXPECT_EQ(mac_list_full_path, opt.getMacListFile());
const CommandOptions::MacAddrsVector& m = opt.getMacsFromFile();
EXPECT_EQ(4, m.size());
}
TEST_F(CommandOptionsTest, LoadMacsFromFileNegativeCases) {
// Negative test cases
// Too many -M parameters, expected only 1
EXPECT_THROW(process("perfdhcp -M foo -M foo1 all"), isc::InvalidParameter);
// -M option can't use with -b option
EXPECT_THROW(process("perfdhcp -M foo -b mac=1234 all"),
isc::InvalidParameter);
}
|
{"hexsha": "607a395712cd205ed69d8b133100b3c8a6a71468", "size": 36166, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/bin/perfdhcp/tests/command_options_unittest.cc", "max_stars_repo_name": "nchaigne/kea", "max_stars_repo_head_hexsha": "2badfd4d9b4f2420b0e9683db5da16a3ab90dd81", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-08-24T19:55:21.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-24T19:55:21.000Z", "max_issues_repo_path": "src/bin/perfdhcp/tests/command_options_unittest.cc", "max_issues_repo_name": "nchaigne/kea", "max_issues_repo_head_hexsha": "2badfd4d9b4f2420b0e9683db5da16a3ab90dd81", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/bin/perfdhcp/tests/command_options_unittest.cc", "max_forks_repo_name": "nchaigne/kea", "max_forks_repo_head_hexsha": "2badfd4d9b4f2420b0e9683db5da16a3ab90dd81", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6179516686, "max_line_length": 93, "alphanum_fraction": 0.6535696511, "num_tokens": 9513}
|
from keras.layers import Dense, Dropout, Conv2D, Flatten
from keras.models import Sequential
from snake import NUM_CHANNELS, NUM_ACTIONS
from collections import deque
import random
import numpy as np
import keras
class DQNAgent:
def __init__(self, field_size, gamma, batch_size, min_replay_memory_size, replay_memory_size, target_update_freq):
self.gamma = gamma
self.field_height, self.field_width = field_size
self.batch_size = batch_size
self.min_replay_memory_size = min_replay_memory_size
self.target_update_freq = target_update_freq
self.model = self._create_model()
self.target_model = self._create_model()
self.target_model.set_weights(self.model.get_weights())
self.model.summary()
self.replay_memory = deque(maxlen=replay_memory_size)
self.target_update_counter = 0
def _create_model(self):
model = Sequential([
Conv2D(32, (3, 3), input_shape=(self.field_height, self.field_width, NUM_CHANNELS), activation='relu'),
Dropout(0.1),
Conv2D(32, (3, 3), activation='relu'),
Dropout(0.1),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.1),
Dense(NUM_ACTIONS)
])
model.compile(optimizer='rmsprop', loss='mse')
return model
def update_replay_memory(self, current_state, action, reward, next_state, done):
self.replay_memory.append((current_state, action, reward, next_state, done))
def get_q_values(self, x):
return self.model.predict(x)
def train(self):
# guarantee the minimum number of samples
if len(self.replay_memory) < self.min_replay_memory_size:
return
# get current q values and next q values
samples = random.sample(self.replay_memory, self.batch_size)
current_input = np.stack([sample[0] for sample in samples])
current_q_values = self.model.predict(current_input)
next_input = np.stack([sample[3] for sample in samples])
next_q_values = self.target_model.predict(next_input)
# update q values
for i, (current_state, action, reward, _, done) in enumerate(samples):
if done:
next_q_value = reward
else:
next_q_value = reward + self.gamma * np.max(next_q_values[i])
current_q_values[i, action] = next_q_value
# fit model
hist = self.model.fit(current_input, current_q_values, batch_size=self.batch_size, verbose=0, shuffle=False)
loss = hist.history['loss'][0]
return loss
def increase_target_update_counter(self):
self.target_update_counter += 1
if self.target_update_counter >= self.target_update_freq:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def save(self, model_filepath, target_model_filepath):
self.model.save(model_filepath)
self.target_model.save(target_model_filepath)
def load(self, model_filepath, target_model_filepath):
self.model = keras.models.load_model(model_filepath)
self.target_model = keras.models.load_model(target_model_filepath)
|
{"hexsha": "07fa5552af3800bd3c915959f8293a217bbbd67c", "size": 3259, "ext": "py", "lang": "Python", "max_stars_repo_path": "dqn_agent.py", "max_stars_repo_name": "choyi0521/snake-reinforcement-learning", "max_stars_repo_head_hexsha": "4881dfc163378f615654d85262901480858e5e65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-11-12T04:09:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T01:35:58.000Z", "max_issues_repo_path": "dqn_agent.py", "max_issues_repo_name": "choyi0521/snake-reinforcement-learning", "max_issues_repo_head_hexsha": "4881dfc163378f615654d85262901480858e5e65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dqn_agent.py", "max_forks_repo_name": "choyi0521/snake-reinforcement-learning", "max_forks_repo_head_hexsha": "4881dfc163378f615654d85262901480858e5e65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-09-30T10:22:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T03:06:31.000Z", "avg_line_length": 38.7976190476, "max_line_length": 118, "alphanum_fraction": 0.670144216, "include": true, "reason": "import numpy", "num_tokens": 699}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: work_gga_x *)
theta0 := 1.0008:
theta1 := 0.1926:
theta2 := 1.8962:
f0 := s -> s^2/(1 + s)^2:
f := x -> theta0 + f0(X2S*x)* (theta1 + f0(X2S*x) * theta2):
|
{"hexsha": "5c6446a1796123e920f015c157fb5624e6789362", "size": 406, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "libxc-4.2.3/maple/gga_x_bayesian.mpl", "max_stars_repo_name": "rdietric/lsms", "max_stars_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-04-03T15:35:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T03:19:23.000Z", "max_issues_repo_path": "libxc-4.2.3/maple/gga_x_bayesian.mpl", "max_issues_repo_name": "rdietric/lsms", "max_issues_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-30T13:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:43:35.000Z", "max_forks_repo_path": "libxc-4.2.3/maple/gga_x_bayesian.mpl", "max_forks_repo_name": "rdietric/lsms", "max_forks_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-06-30T00:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:14:29.000Z", "avg_line_length": 23.8823529412, "max_line_length": 68, "alphanum_fraction": 0.6305418719, "num_tokens": 149}
|
[STATEMENT]
lemma fv_assignment_rhs_subset_fv_st'[simp]: "fv\<^sub>s\<^sub>e\<^sub>t (assignment_rhs\<^sub>s\<^sub>t S) \<subseteq> fv\<^sub>s\<^sub>t S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fv\<^sub>s\<^sub>e\<^sub>t (assignment_rhs\<^sub>s\<^sub>t S) \<subseteq> fv\<^sub>s\<^sub>t S
[PROOF STEP]
by (induct S rule: assignment_rhs\<^sub>s\<^sub>t.induct) auto
|
{"llama_tokens": 172, "file": "Stateful_Protocol_Composition_and_Typing_Strands_and_Constraints", "length": 1}
|
#ifndef CANARD_NET_OFP_DETAIL_ANY_TYPE_HPP
#define CANARD_NET_OFP_DETAIL_ANY_TYPE_HPP
#include <canard/net/ofp/detail/config.hpp>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
#include <boost/mpl/contains.hpp>
#include <boost/mpl/deref.hpp>
#include <boost/mpl/integral_c.hpp>
#include <boost/mpl/min_element.hpp>
#include <boost/mpl/placeholders.hpp>
#include <boost/mpl/transform_view.hpp>
#include <boost/operators.hpp>
#include <canard/net/ofp/detail/variant.hpp>
#include <canard/net/ofp/detail/visitors.hpp>
#include <canard/net/ofp/type_traits/type_list.hpp>
namespace canard {
namespace net {
namespace ofp {
namespace detail {
template <class Derived> class empty_any_type_base {};
template <class Decoder, template <class> class Base = empty_any_type_base>
class any_type
: public Base<any_type<Decoder, Base>>
, private boost::equality_comparable<any_type<Decoder, Base>>
{
public:
using header_type = typename Decoder::header_type;
using type_id = typename Decoder::type_id;
using type_list = typename Decoder::decode_type_list;
static constexpr std::uint16_t header_size = Decoder::header_size;
private:
using inner_type_list = type_traits::to_type_list_t<type_list>;
template <class T>
using containable_if_t = typename std::enable_if<
boost::mpl::contains<inner_type_list, typename std::decay<T>::type>::value
>::type;
public:
static constexpr auto min_length() noexcept
-> std::uint16_t
{
return min_element_t<min_length_t>::value;
}
static constexpr auto min_byte_length() noexcept
-> std::uint16_t
{
return min_element_t<min_byte_length_t>::value;
}
template <class T, class = containable_if_t<T>>
any_type(T&& t)
: variant_(std::forward<T>(t))
{
}
template <class T, class = containable_if_t<T>>
auto operator=(T&& t)
-> any_type&
{
variant_ = std::forward<T>(t);
return *this;
}
CANARD_NET_OFP_DECL auto type() const noexcept
-> type_id;
CANARD_NET_OFP_DECL auto length() const noexcept
-> std::uint16_t;
CANARD_NET_OFP_DECL auto byte_length() const noexcept
-> std::uint16_t;
CANARD_NET_OFP_DECL auto index() const noexcept
-> std::size_t;
template <class Visitor>
auto visit(Visitor&& visitor)
-> typename std::remove_reference<Visitor>::type::result_type
{
return detail::apply_visitor(std::forward<Visitor>(visitor), variant_);
}
template <class Visitor>
auto visit(Visitor&& visitor) const
-> typename std::remove_reference<Visitor>::type::result_type
{
return detail::apply_visitor(std::forward<Visitor>(visitor), variant_);
}
template <class Validator>
void validate(Validator validator) const
{
visit(detail::validation_visitor<Validator>{validator});
}
template <class Container>
auto encode(Container& container) const
-> Container&
{
return visit(detail::encoding_visitor<Container>{container});
}
template <class Iterator>
static auto decode(Iterator& first, Iterator last)
-> any_type
{
return Decoder::template decode<any_type>(first, last, to_any{});
}
friend auto operator==(any_type const& lhs, any_type const& rhs) noexcept
-> bool
{
return lhs.equal_impl(rhs);
}
template <class T, class = containable_if_t<T>>
friend auto operator==(any_type const& lhs, T const& rhs) noexcept
-> bool
{
if (auto const v = lhs.template ptr_any_cast<T>()) {
return *v == rhs;
}
return false;
}
template <class T, class = containable_if_t<T>>
friend auto operator==(T const& lhs, any_type const& rhs) noexcept
-> bool
{
return rhs == lhs;
}
template <class T, class = containable_if_t<T>>
friend auto operator!=(any_type const& lhs, T const& rhs) noexcept
-> bool
{
return !(lhs == rhs);
}
template <class T, class = containable_if_t<T>>
friend auto operator!=(T const& lhs, any_type const& rhs) noexcept
-> bool
{
return !(rhs == lhs);
}
friend auto equivalent(any_type const& lhs, any_type const& rhs) noexcept
-> bool
{
return lhs.equivalent_impl(rhs);
}
template <class T, class = containable_if_t<T>>
friend auto equivalent(any_type const& lhs, T const& rhs) noexcept
-> bool
{
if (auto const v = lhs.template ptr_any_cast<T>()) {
return equivalent(*v, rhs);
}
return false;
}
template <class T, class = containable_if_t<T>>
friend auto equivalent(T const& lhs, any_type const& rhs) noexcept
-> bool
{
return equivalent(rhs, lhs);
}
template <class T, class D, template <class> class B>
friend auto any_cast(any_type<D, B>&)
-> T&;
template <class T, class D, template <class> class B>
friend auto any_cast(any_type<D, B> const&)
-> T const&;
template <class T, class D, template <class> class B>
friend auto any_cast(any_type<D, B>*)
-> T*;
template <class T, class D, template <class> class B>
friend auto any_cast(any_type<D, B> const*)
-> T const*;
private:
template <template <class> class F>
using min_element_t = typename boost::mpl::deref<
typename boost::mpl::min_element<
typename boost::mpl::transform_view<
inner_type_list, F<boost::mpl::placeholders::_>
>::type
>::type
>::type;
template <class T>
struct min_length_t
: boost::mpl::integral_c<std::uint16_t, T::min_length()>
{};
template <class T>
struct min_byte_length_t
: boost::mpl::integral_c<std::uint16_t, T::min_byte_length()>
{};
CANARD_NET_OFP_DECL auto equal_impl(any_type const&) const noexcept
-> bool;
CANARD_NET_OFP_DECL auto equivalent_impl(any_type const&) const noexcept
-> bool;
template <class T>
auto ref_any_cast()
-> T&
{
return detail::get<T>(variant_);
}
template <class T>
auto ref_any_cast() const
-> T const&
{
return detail::get<T>(variant_);
}
template <class T>
auto ptr_any_cast()
-> T*
{
return detail::get<T>(std::addressof(variant_));
}
template <class T>
auto ptr_any_cast() const
-> T const*
{
return detail::get<T>(std::addressof(variant_));
}
struct to_any
{
template <class T>
auto operator()(T&& t) const
-> any_type
{
return any_type{std::forward<T>(t)};
}
};
private:
using variant_t = typename detail::make_variant_over<inner_type_list>::type;
variant_t variant_;
};
template <class T, class Decoder, template <class> class Base>
auto any_cast(any_type<Decoder, Base>& any)
-> T&
{
return any.template ref_any_cast<T>();
}
template <class T, class Decoder, template <class> class Base>
auto any_cast(any_type<Decoder, Base> const& any)
-> T const&
{
return any.template ref_any_cast<T>();
}
template <class T, class Decoder, template <class> class Base>
auto any_cast(any_type<Decoder, Base>* const any)
-> T*
{
return any->template ptr_any_cast<T>();
}
template <class T, class Decoder, template <class> class Base>
auto any_cast(any_type<Decoder, Base> const* const any)
-> T const*
{
return any->template ptr_any_cast<T>();
}
} // namespace detail
} // namespace ofp
} // namespace net
} // namespace canard
#if defined(CANARD_NET_OFP_HEADER_ONLY) || !defined(CANARD_NET_OFP_USE_EXPLICIT_INSTANTIATION)
# include <canard/net/ofp/detail/impl/any_type.hpp>
#endif
#endif // CANARD_NET_OFP_DETAIL_ANY_TYPE_HPP
|
{"hexsha": "9f1381f82eced5938e86577444c07c94a7374b3a", "size": 7850, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/canard/net/ofp/detail/any_type.hpp", "max_stars_repo_name": "amedama41/bulb", "max_stars_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/canard/net/ofp/detail/any_type.hpp", "max_issues_repo_name": "amedama41/bulb", "max_issues_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2016-07-21T11:29:13.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-03T05:16:42.000Z", "max_forks_repo_path": "include/canard/net/ofp/detail/any_type.hpp", "max_forks_repo_name": "amedama41/bulb", "max_forks_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9075907591, "max_line_length": 94, "alphanum_fraction": 0.6480254777, "num_tokens": 1979}
|
from __future__ import print_function
from sklearn.feature_extraction.text import CountVectorizer
import argparse
import logging
from time import time
import numpy as np
import codecs
from gensim import corpora, matutils
from gensim.models import TfidfModel, LsiModel
import os
import ntpath
from pathlib import Path
from sys import stderr
from scipy.sparse import csc_matrix
from scipy.sparse import hstack
from six import iteritems
import shutil
from joblib import Parallel, delayed
from pdb import set_trace as st
# Display progress logs on stdout
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)s %(message)s')
mark = "%%%_"
def rm_words(user_input, stop_words, out_file):
"""Sanitize using intersection and list.remove()"""
# Downsides:
# - Looping over list while removing from it?
# http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python
stop_words = set(stop_words)
with codecs.open(out_file, mode = "a", encoding = 'latin-1', errors = 'substitute') as f:
for sw in stop_words.intersection(user_input):
while sw in user_input:
user_input.remove(sw)
f.write("%s\n" % " ".join(user_input))
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
import subprocess
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
def sublinear(x):
return np.log2(x) + 1
def binary(x):
return 1.0
def freq(x):
return x
class windowStreamer(object):
def __init__(self, dictionary, input_file, vectorizer, wsize=10):
self.file_name = input_file
self.analyzer = vectorizer.build_analyzer()
self.tokenizer = vectorizer.build_tokenizer()
self.wsize = wsize
def __iter__(self):
for line in codecs.open(self.file_name, mode = "r", encoding = 'latin-1', errors = 'substitute'):
ln = self.tokenizer(line)
try:
for i, _ in enumerate(ln):
try:
#word = ln[i + self.wsize]
word = mark + ln[i]
except KeyError:
continue
#s=" ".join(ln[i:i + self.wsize] + ln[i + self.wsize + 1:i + self.wsize*2 + 1])
w = ln[i - self.wsize:i] + ln[i + 1:i + (self.wsize + 1)]
s = " ".join(w)
wi = [word] + self.tokenizer(" ".join(self.analyzer(s)))
bow = dictionary.doc2bow(wi)
if len(wi) < 2:
#stderr.write("%s\n" % wi)
continue
yield bow
except IndexError:
break
class streamer(object):
def __init__(self, file_name, vectorizer = None, only_tokens=False):
self.file_name=file_name
self.analyzer=vectorizer.build_analyzer()
self.tokenizer=vectorizer.build_tokenizer()
self.only_tokens=only_tokens
def __iter__(self):
if self.only_tokens:
for s in open(self.file_name, mode = 'r', encoding = 'latin-1', errors = 'substitute'):
yield self.tokenizer(s)
else:
for s in open(self.file_name, mode = 'r', encoding = 'latin-1', errors = 'substitute'):
yield self.tokenizer(" ".join(self.analyzer(s))) + [mark + w for w in self.tokenizer(s)]
def save_sample(word, context, dictionary, out_dataset="word_dataset", fsubsamp=50, op="sum", verbose=False):
if word in ["", " "] or not word.isalpha():
return None
sshape=(max(dictionary.keys()) + 1, 1)
path = out_dataset + "/" + word
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if verbose: stderr.write("Sample couldn't be stored: %s\n" % word)
return None
rows=np.array([i for i, f in context])
data=np.array([f for i, f in context])
D=np.memmap(path + "/vector", dtype='float32', mode='w+', shape=data.shape)
R=np.memmap(path + "/rows", dtype='int32', mode='w+', shape=data.shape)
K=np.memmap(path + "/n_samples", dtype='int32', mode='w+', shape=(1,))
Cs=np.memmap(path + "/c_shape", dtype='int32', mode='w+', shape=(1,))
D[:] = data[:]
R[:] = rows[:]
Cs[:] = rows.shape[0]
K+=1
del Cs, D, R, K
return None
else:
K=np.memmap(path + "/n_samples", dtype='int32', mode='r+', shape=(1,))
# limit the number of samples stored for each word to 'fmax'
if K[0] >= fsubsamp:
return None
else:
rows=np.array([i for i, f in context])
data=np.array([f for i, f in context])
cols=np.array([0]*int(data.shape[0]))
sample=csc_matrix((data, (rows, cols)), shape=sshape)
Cs = np.memmap(path + "/c_shape", dtype='int32', mode='r+', shape=(1,))
cshape=(Cs[0], )
D=np.memmap(path + "/vector", dtype='float32', mode='r', shape=cshape)
R=np.memmap(path + "/rows", dtype='int32', mode='r', shape=cshape)
C=np.array([0]*int(R.shape[0]))
centroid=csc_matrix((D, (R, C)), shape=sshape)
del D, R
if op == "ol": # On-line mean
context = centroid + (sample - centroid)/(K[0] + 1.0)
if op == "avg":
context = (sample + centroid)/(K[0] + 1.0)
if op == "sum":
context = sample + centroid
data=context.data
rows=context.indices
cshape=(rows.shape[0], )
D=np.memmap(path + "/vector", dtype='float32', mode='w+', shape=cshape)
R=np.memmap(path + "/rows", dtype='int32', mode='w+', shape=cshape)
D[:] = data[:]
R[:] = rows[:]
Cs[:] = context.data.shape[0]
K+=1
del Cs, K, D, R
class stream_vectors(object):
def __init__(self, path="word_dataset"):
self.path=path
def __iter__(self):
for word_dir, _, vector_files in os.walk(self.path):
if vector_files==[]:
continue
Cs = np.memmap(word_dir + "/c_shape", dtype='int32', mode='r', shape=(1,))
cshape=(Cs[0], )
D=np.memmap(word_dir + "/vector", dtype='float32', mode='r', shape=cshape)
R=np.memmap(word_dir + "/rows", dtype='int32', mode='r', shape=cshape)
yield [(r, d) for r, d in zip(R, D)]
def wind2lsa(doc, dim):
v=np.zeros((dim,))
try:
for index, value in doc:
v[index] = value
except:
return v
return v
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model.')
parser.add_argument("--dataset", help = "The path to the raw text dataset file", required = True)
parser.add_argument("--cout", help = "The path to the cross-entropy output model file (default='output.vec')",
default = "output.vec")
parser.add_argument("--tmp", help = "The path to the temporary files (default='./tmp')",
default = "./tmp")
parser.add_argument("--fmin", help = "The minimum word frequency considered to embed (default = 3).",
default = 3, type = int)
parser.add_argument("--fmax", help = "The maximum word frequency portion considered to embed between [0.0, 1.0] (default = 0; no limit).",
default = 0, type = float)
parser.add_argument("--fsubsamp", help = "The maximum size of context samples for each word (default=50).",
default = 50, type = int)
parser.add_argument("--wsize", help = "The size of the sliding window (default=10).", default = 10, type = int)
parser.add_argument("--tf", help = "TF normalization: frequency, binary, sublinear (default='frequency').",
default = "frequency")
parser.add_argument("--combiner", help = "Combination operation among contexts of a word {'sum':summation, 'avg': mean, 'ol': online_mean} (default='sum').",
default = "sum")
parser.add_argument("--stop", help = "Toggles stop words stripping.", action = "store_true")
parser.add_argument("--char", help = "Toggles character n-grams instead of word n-grams (the default).",
action = "store_true")
parser.add_argument("--lsa", help = "Output embeddings dimension (default = 300).", default = 300, type = int)
parser.add_argument("--n_gramI", help = "Inferiror n-gram TF--IDF computation (default = 2).", default = 2, type = int)
parser.add_argument("--n_gramS", help = "Superiror n-gram TF--IDF computation (default = 6).", default = 6, type = int)
parser.add_argument("--replace", help = "Toggles replace stored temporal files with new ones.", action = "store_true")
parser.add_argument("--keep_tmp", help = "Keep temporal files by ending the embedding.", action = "store_true")
args = parser.parse_args()
print("Creating LSA sliding windows by using following params:\n%s\n" % vars(args))
# Functions for computing TF
wlocal={"frequency": freq, "binary": binary, "sublinear": sublinear}
TEMP_FOLDER=args.tmp
if not os.path.isdir(TEMP_FOLDER):
os.makedirs(TEMP_FOLDER)
tmp_name = TEMP_FOLDER + '/' + ntpath.basename(args.dataset) + '_fmin-' + str(args.fmin) + '_fmax-' + str(args.fmax) + '_tf-' + args.tf + '_stop' + str(args.stop) + \
'_char-' + str(args.char) + '_dim-' + str(args.lsa) + '_wsize-' + str(args.wsize) + '_n-' + str(args.n_gramI) + '_N-' + str(args.n_gramS) + '_combiner-' + args.combiner
input_ = ""
t0 = time()
ta = time()
vectorizer = CountVectorizer(analyzer = 'char', ngram_range = (args.n_gramI, args.n_gramS),
strip_accents = 'unicode')
if args.stop and (args.replace or not Path(TEMP_FOLDER + "/file_filtered.nstop").is_file()):
stderr.write("\nFiltering stop words from input file..")
with open("stop_words.txt", mode = 'r', encoding = 'latin-1', errors = 'substitute') as f:
stopwors = f.read().strip().split('\n')
stream=streamer(args.dataset, vectorizer = vectorizer, only_tokens=True)
Parallel(n_jobs=-1)(delayed(rm_words)(line, stopwors, TEMP_FOLDER + "/file_filtered.nstop") for line in stream)
#os.system(stopw_command)
if Path(TEMP_FOLDER + "/file_filtered.nstop").is_file():
input_ = TEMP_FOLDER + "/file_filtered.nstop"
stderr.write("\nInput file filtered from stop words in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
else:
stderr.write("\nNo filtered file could be created... aborting\n")
exit()
else:
input_ = args.dataset
t0 = time()
# Create vectorizer for shattering text into n-gram characters.
my_file = Path(tmp_name + '.dict')
if not my_file.is_file() or args.replace:
# This streamer returns a dictionary over the raw input file. The
# 'vectorizer' provides analyzer and tokenizer from which depends
# the resulting dictionary, e.g. tokenizing with character n-grams.
corpus = streamer(input_, vectorizer = vectorizer)
dictionary = corpora.Dictionary(corpus)
if not Path(TEMP_FOLDER + "/file_filtered.bad").is_file() or args.replace:
rare_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs)
if docfreq <= args.fmin
and dictionary[tokenid].startswith(mark)] if args.fmin > 0 else []
max_f = max([f for tokenid, f in iteritems(dictionary.dfs) if dictionary[tokenid].startswith(mark)])
freq_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs)
if docfreq >= args.fmax * max_f
and dictionary[tokenid].startswith(mark)] if args.fmax > 0 else []
if rare_ids + freq_ids != []:
bad_types = [dictionary[idx].strip(mark) for idx in rare_ids + freq_ids]
else:
bad_types = []
#with codecs.open(TEMP_FOLDER + "/badtypes", mode = "w", encoding = 'latin-1', errors = 'substitute') as f:
# for t in bad_types:
# f.write("%s\n" % t.strip(mark))
t0 = time()
stderr.write("\nRemoving frequent/rare words from input file...")
#os.system(badt_command) # Remove frequent and rare words
if bad_types != []:
stream=streamer(input_ , vectorizer = vectorizer, only_tokens=True)
Parallel(n_jobs=-1, verbose=1, backend="threading")(delayed(rm_words)(line, bad_types,
TEMP_FOLDER + "/file_filtered.bad") for line in stream)
if not os.stat(TEMP_FOLDER + "/file_filtered.bad").st_size == 0:
input_ = TEMP_FOLDER + "/file_filtered.bad"
dictionary.filter_tokens(rare_ids + freq_ids)
dictionary.compactify()
stderr.write("\nInput file filtered from frequent/rare words in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
else:
input_ = args.dataset
dictionary.save(tmp_name + '.dict')
stderr.write("\nDictionary created in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
dictionary = corpora.Dictionary.load(tmp_name + '.dict')
t0 = time()
stderr.write("\nSerializing sparse corpus\n")
my_file = Path(tmp_name + '.mm')
if not my_file.is_file() or args.replace:
# This stramer returns generator of sliding windows already vectorized
# with word counts
sdata = windowStreamer(dictionary = dictionary, input_file = input_, vectorizer = vectorizer,
wsize = args.wsize)
corpora.MmCorpus.serialize(tmp_name + '.mm', sdata)
stderr.write("\nSparse BoW corpus serialized in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
corps = corpora.MmCorpus(tmp_name + '.mm')
#st()
if not os.path.isdir(TEMP_FOLDER + "/word_dataset"):
t0 = time()
stderr.write("\nFitting entropy model for sparse word embeddings (TF-IDF)\n")
tfidf = TfidfModel(corps, normalize = True, wlocal = wlocal[args.tf])
tfidf_corpus = tfidf[corps]
stderr.write("\nSparse word embeddings created in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
t0=time()
stderr.write("\nMerging sparse entropy word embeddings (TF-IDF)\n")
for win in tfidf_corpus:
# The first item of a window in the gensim corpus contains the lexical type associated to it.
try:
word=[term for term in [dictionary[idx] for idx, weight in win] if mark in term][0].strip(mark)
except IndexError:
continue # Continue if there are empty windows that passed
save_sample(word = word, context = win, out_dataset = TEMP_FOLDER + "/word_dataset",
dictionary=dictionary, fsubsamp = args.fsubsamp, op=args.combiner, verbose=False)
# Once entropy vectors have been combined, let's stream them to a new corpus
stderr.write("\nSparse word embeddings memmaped in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
t0 = time()
tfidf_vectors=stream_vectors(path = TEMP_FOLDER + "/word_dataset")
corpora.MmCorpus.serialize(tmp_name + '_entropy.mm', tfidf_vectors)
stderr.write("\nSparse entropy matrix serialized in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
tfidf_corpus = corpora.MmCorpus(tmp_name + '_entropy.mm')
stderr.write("\nEntropy model fitted in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
t0 = time()
stderr.write("\nFitting latent orthogonal basis...\n")
lsi = LsiModel(tfidf_corpus, id2word = dictionary, num_topics = args.lsa)
corpus_lsi = lsi[tfidf_corpus]
print("Words embedded into orthogonal basis in %f min %f seg\n" % ((time() - t0)/60.0, time() - t0))
t0 = time()
print ("Saving vectors ... \n")
with codecs.open(args.cout, mode = "w", encoding = 'latin-1', errors = 'substitute') as f:
f.write("%s %s\n" % (lsi.docs_processed, lsi.num_topics))
for v, context in zip(corpus_lsi, tfidf_corpus):
word=[term for term in [dictionary[idx] for idx, weight in context] if mark in term][0].strip(mark)
f.write("%s %s\n" % (word, np.array2string(wind2lsa(v, lsi.num_topics),
formatter={'float_kind':lambda x: "%.6f" % x}, max_line_width=20000).strip(']').strip('[') ))
if not args.keep_tmp:
S=du(TEMP_FOLDER)
shutil.rmtree(TEMP_FOLDER)
print("Temporal files removed: size %s ...\n" % S)
print("Word embeddings saved at %s ...\nTotal time: %.4f min %.4f seg\n" % (args.cout, (time() - ta)/60, time() - ta))
|
{"hexsha": "3f33201df68c6aa4eb1eefea2e388fb52a64ff40", "size": 18061, "ext": "py", "lang": "Python", "max_stars_repo_path": "word2igf.py", "max_stars_repo_name": "iarroyof/discrimative_attributes", "max_stars_repo_head_hexsha": "1f18eddd5f114f45704d96955199ba686098d2e6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "word2igf.py", "max_issues_repo_name": "iarroyof/discrimative_attributes", "max_issues_repo_head_hexsha": "1f18eddd5f114f45704d96955199ba686098d2e6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "word2igf.py", "max_forks_repo_name": "iarroyof/discrimative_attributes", "max_forks_repo_head_hexsha": "1f18eddd5f114f45704d96955199ba686098d2e6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.6085858586, "max_line_length": 202, "alphanum_fraction": 0.5584408394, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4337}
|
Require ClassicalEpsilon.
Require Import Reals Psatz.
From stdpp Require Import tactics.
From mathcomp Require Import ssrfun ssreflect eqtype ssrbool seq fintype choice bigop.
From discprob.basic Require Import base sval order monad bigop_ext nify.
From discprob.prob Require Import prob countable finite stochastic_order.
From discprob.monad.idxval Require Import pival_dist pival ival_dist ival ival_pair pidist_singleton idist_pidist_pair extrema.
Import Lub.
(* This is an inductive characterization of eq_ivd_prob, as is proved later *)
Inductive irrel_ivd : ∀ X, ivdist X → ivdist X → Prop :=
| irrel_ivd_refl X : ∀ (I: ivdist X), irrel_ivd X I I
| irrel_ivd_sym X : ∀ I1 I2, irrel_ivd X I1 I2 → irrel_ivd X I2 I1
| irrel_ivd_trans X : ∀ I1 I2 I3, irrel_ivd X I1 I2 → irrel_ivd X I2 I3 → irrel_ivd X I1 I3
| irrel_ivd_proper X :
∀ I1 I1' I2 I2', eq_ivd I1 I1' → eq_ivd I2 I2' → irrel_ivd X I1 I2 → irrel_ivd X I1' I2'
| irrel_ivd_irrel X : ∀ {Y} I1 (I0: ivdist Y), irrel_ivd X I1 (x ← I0; I1)
| irrel_ivd_bind X Y: ∀ (I1 I2: ivdist X) (f1 f2: X → ivdist Y),
irrel_ivd X I1 I2 →
(∀ x, irrel_ivd Y (f1 x) (f2 x)) →
irrel_ivd Y (x ← I1; f1 x) (x ← I2; f2 x).
Arguments irrel_ivd {_}.
Definition le_pidist_irrel :=
λ {X : Type} (Is1 Is2 : pidist X), ∀ I : ivdist X, In (I: ival X) Is1 → ∃ I' : ivdist X, irrel_ivd I I' ∧ In (I': ival X) Is2.
Lemma le_pidist_irrel_refl {X: Type} (Is1: pidist X):
le_pidist_irrel Is1 Is1.
Proof.
intros I Hin. exists I; split; eauto. apply irrel_ivd_refl.
Qed.
Lemma irrel_ivd_support_coerce {X} (I1 I2: ivdist X) :
irrel_ivd I1 I2 →
∀ x, (∃ i2, ind I2 i2 = x ∧ val I2 i2 > 0) ↔ (∃ i1, ind I1 i1 = x ∧ val I1 i1 > 0).
Proof.
induction 1.
- split; intros; auto.
- intros. by rewrite (IHirrel_ivd x).
- intros. by rewrite (IHirrel_ivd2 x).
- intros.
rewrite (eq_ival_support_coerce I1 I1'); eauto.
rewrite (eq_ival_support_coerce I2 I2'); eauto.
- intros.
* split.
** intros ((i0&i1)&Heq&Hgt). exists i1.
rewrite //= in Heq Hgt.
split; auto. specialize (val_nonneg I0 i0); nra.
** intros (i1&Heq&Hgt).
edestruct (ivd_support_idx I0) as (i0&Hgt').
exists (existT i0 i1); split => //=; nra.
- intros x. split.
* intros ((i2&if2)&Hind&Hval).
rewrite //= in Hind.
edestruct (IHirrel_ivd (ind I2 i2)) as (HI2&_).
edestruct (HI2) as (i1&Hindeq&?).
{ eexists. split; eauto. rewrite //= in Hval. specialize (val_nonneg (f2 (ind I2 i2)) if2).
nra. }
edestruct (H1 (ind I2 i2)) as (Hf2&_).
edestruct Hf2 as (if1&?&?).
{ eexists. split; eauto. rewrite //= in Hval.
specialize (val_nonneg I2 i2); nra. }
unshelve (eexists).
{ exists i1. rewrite Hindeq; exact if1. }
split => //=; destruct Hindeq.
** rewrite /eq_rect_r//=.
** rewrite /eq_rect_r//=. nra.
* intros ((i2&if2)&Hind&Hval).
rewrite //= in Hind.
edestruct (IHirrel_ivd (ind I1 i2)) as (_&HI2).
edestruct (HI2) as (i1&Hindeq&?).
{ eexists. split; eauto. rewrite //= in Hval. specialize (val_nonneg (f1 (ind I1 i2)) if2).
nra. }
edestruct (H1 (ind I1 i2)) as (_&Hf2).
edestruct Hf2 as (if1&?&?).
{ eexists. split; eauto. rewrite //= in Hval.
specialize (val_nonneg I1 i2); nra. }
unshelve (eexists).
{ exists i1. rewrite Hindeq; exact if1. }
split => //=; destruct Hindeq.
** rewrite /eq_rect_r//=.
** rewrite /eq_rect_r//=. nra.
Qed.
Lemma le_pidist_irrel_support_coerce_aux {X} (Is1 Is2: pidist X) :
le_pidist_irrel Is2 Is1 →
∀ x, In_psupport x Is2 → In_psupport x Is1.
Proof.
intros Hle x (I2&i2&Hin2&?&Hval).
destruct (Hle {| ivd_ival := I2; val_sum1 := all_sum1 Is2 _ Hin2|}) as (I1&Heq&Hin1); eauto.
exists I1. edestruct (irrel_ivd_support_coerce _ _ Heq) as (i1&?&?).
{ eauto. }
eexists; split; eauto.
Qed.
Global Instance irrel_ivd_proper_instance {X} : Proper (@eq_ivd X ==> @eq_ivd X ==> iff) (@irrel_ivd X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
split; intros; eapply irrel_ivd_proper; eauto; try by symmetry.
Qed.
Global Instance irrel_ivd_Transitivite {X}: Transitive (@irrel_ivd X).
Proof. intros ???. apply irrel_ivd_trans. Qed.
Global Instance irrel_ivd_Reflexive {X}: Reflexive (@irrel_ivd X).
Proof. intros ?. apply irrel_ivd_refl. Qed.
Global Instance irrel_ivd_Symmetry {X}: Symmetric (@irrel_ivd X).
Proof. intros ??. apply irrel_ivd_sym. Qed.
Lemma is_Ex_ival_irrel_proper_bind {X Y} f (f1 f2: X → ivdist Y) (I1 I2: ivdist X) v
(Hirrel_ivd : irrel_ivd I1 I2)
(Hall_irrel : ∀ x : X, irrel_ivd (f1 x) (f2 x))
(IHinner : ∀ (x : X) (f : Y → R) (v : R), is_Ex_ival f (f1 x) v ↔ is_Ex_ival f (f2 x) v)
(IHirrel_ivd : ∀ (f : X → R) (v : R), is_Ex_ival f I1 v ↔ is_Ex_ival f I2 v):
is_Ex_ival f (ivd_bind _ _ f1 I1) v → is_Ex_ival f (ivd_bind _ _ f2 I2) v.
Proof.
intros His.
assert (ex_Ex_ival f (ivd_bind _ _ f1 I1)).
{ eapply is_Ex_ival_ex; eauto. }
rewrite -(is_Ex_ival_unique _ _ _ His).
feed pose proof (ex_Ex_ival_bind_post (λ x, Rabs (f x)) I1 f1) as Hex_I1.
{ eapply ex_Ex_ival_to_Rabs, is_Ex_ival_ex. eauto. }
feed pose proof (ex_Ex_ival_bind_post f I1 f1) as Hex_I1'.
{ eapply is_Ex_ival_ex. eauto. }
rewrite Ex_ival_bind_post //=.
assert (ex_Ex_ival f (ivd_bind _ _ f2 I2)).
{
apply ex_Ex_ival_from_Rabs, ex_Ex_ival_bind_post_inv; eauto using Rabs_pos, Rle_ge.
** intros.
apply is_Ex_ival_ex, ex_Ex_ival_to_Rabs in His.
edestruct (irrel_ivd_support_coerce I1 I2) as (Hlr&Hrl); eauto.
edestruct Hlr as (i1&Heqi1&Hvali1); eauto.
eapply ex_Ex_ival_bind_inv in His; eauto.
eapply ex_Ex_ival_is in His as (v'&His).
rewrite -Heqi1.
eapply is_Ex_ival_ex. eapply IHinner; eauto.
** apply ex_Ex_ival_is in Hex_I1 as (v'&His').
eapply is_Ex_ival_ex; eapply IHirrel_ivd.
eapply is_Ex_ival_proper_fun_support; eauto.
intros x Hsupport => //=.
symmetry.
apply is_Ex_ival_unique.
eapply IHinner.
eapply Ex_ival_correct. eapply (ex_Ex_ival_bind_inv (λ x, Rabs (f x)) f1 I1); eauto.
apply ex_Ex_ival_to_Rabs. eapply is_Ex_ival_ex; eauto.
}
cut (Ex_ival f (ivd_bind _ _ f2 I2) = (Ex_ival (λ x, Ex_ival f (f1 x)) I1)).
{ intros HEx. rewrite -HEx. apply Ex_ival_correct; eauto. }
rewrite Ex_ival_bind_post //=.
apply is_Ex_ival_unique.
eapply IHirrel_ivd.
eapply is_Ex_ival_proper_fun_support; last first.
{ eapply Ex_ival_correct. eauto. }
intros => //=.
symmetry.
apply is_Ex_ival_unique.
eapply IHinner.
eapply Ex_ival_correct. eapply (ex_Ex_ival_bind_inv f f1 I1); eauto.
Qed.
Lemma is_Ex_ival_irrel_proper {A} f (I I': ivdist A) v :
irrel_ivd I I' →
is_Ex_ival f I v ↔
is_Ex_ival f I' v.
Proof.
intros irrel_ivd.
revert v.
induction irrel_ivd; auto; intros.
- symmetry. eapply IHirrel_ivd.
- rewrite IHirrel_ivd1. auto.
- rewrite /eq_ivd in H.
etransitivity; first etransitivity; try eapply IHirrel_ivd.
{ split; apply is_Ex_ival_proper; eauto. by symmetry. }
{ split; apply is_Ex_ival_proper; eauto. by symmetry. }
- split. apply is_Ex_ival_bind_irrel, val_sum1.
intros His. cut (ex_Ex_ival f I1).
{ intros Hex. apply Ex_ival_correct in Hex.
cut (Ex_ival f I1 = v); intros; subst; eauto.
eapply is_Ex_ival_unique'; last eassumption.
apply is_Ex_ivd_bind_irrel; eauto.
}
apply is_Ex_ival_ex in His.
unshelve (eapply ex_Ex_ival_bind_inv in His; eauto).
{ exact (sval (ivd_support_idx I0)). }
destruct (ivd_support_idx _) => //=.
- split; eapply is_Ex_ival_irrel_proper_bind; eauto; try (intros; by symmetry).
Qed.
Lemma ex_Ex_ival_irrel_proper {A} f (I I': ivdist A) :
irrel_ivd I I' →
ex_Ex_ival f I →
ex_Ex_ival f I'.
Proof.
intros Hirrel (v&His)%ex_Ex_ival_is.
eapply is_Ex_ival_ex.
eapply is_Ex_ival_irrel_proper; eauto.
by symmetry.
Qed.
Lemma Ex_ival_irrel_proper {A} f (I I': ivdist A) :
irrel_ivd I I' →
ex_Ex_ival f I →
Ex_ival f I = Ex_ival f I'.
Proof.
intros. symmetry. apply is_Ex_ival_unique.
eapply is_Ex_ival_irrel_proper; eauto.
* symmetry. eauto.
* apply Ex_ival_correct; eauto.
Qed.
Lemma irrel_ivd_to_eq_ivd_prob {X} (I1 I2: ivdist X):
irrel_ivd I1 I2 →
eq_ivd_prob I1 I2.
Proof.
intros Hirrel.
apply eq_ivd_prob_alt.
intros x.
transitivity ((Pr (λ v, v = x) I1)).
{ rewrite /Ex_ival/idx_eq_ind//=. eapply SeriesC_ext; intros.
destruct ClassicalEpsilon.excluded_middle_informative => //=; nra.
}
transitivity ((Pr (λ v, v = x) I2)); last first.
{ rewrite /Ex_ival/idx_eq_ind//=. eapply SeriesC_ext; intros.
destruct ClassicalEpsilon.excluded_middle_informative => //=; nra.
}
apply Ex_ival_irrel_proper; eauto.
apply ex_Pr.
Qed.
Lemma In_isupport_pr_gt_0 {X: Type} (I: ivdist X) (x: X):
In_isupport x I →
0 < Pr (eq ^~ x) I.
Proof.
rewrite /Pr/Ex_ival => Hin.
destruct Hin as (i&?&?).
eapply (Series_strict_pos _ (pickle i)).
{ intros. rewrite /countable_sum/oapp.
destruct pickle_inv; try nra.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
rewrite Rmult_1_l. apply val_nonneg.
}
{ intros. rewrite /countable_sum/oapp.
rewrite pickleK_inv.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
}
feed pose proof (ex_Pr (eq^~ x) I).
apply ex_Ex_ival_is in H1 as (v&?).
rewrite /is_Ex_ival in H1.
destruct H1 as (Hex&His).
eexists. eauto.
Qed.
Lemma pr_gt_0_In_isupport {X: Type} (I: ivdist X) (x: X):
0 < Pr (eq ^~ x) I →
In_isupport x I.
Proof.
rewrite /Pr/Ex_ival => Hin.
eapply (Series_strict_pos_inv) in Hin as (n&?).
{
destruct (@pickle_inv (idx I) n) as [i|] eqn:Heq.
- exists i. rewrite //=/countable_sum//= Heq //= in H.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
* rewrite //= in H. split; eauto. nra.
* rewrite //= in H. nra.
- rewrite //=/countable_sum//= Heq //= in H ; nra.
}
intros n. rewrite /countable_sum. destruct pickle_inv => //=; last nra.
destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra.
rewrite Rmult_1_l. apply val_nonneg.
Qed.
(* This is a kind of conditional distribution *)
Lemma ival_slice_proof1 (X : Type) (I : ivdist X) (x : X):
∀ i : idx I, (if ClassicalEpsilon.excluded_middle_informative (In_isupport x I)
then
(if ClassicalEpsilon.excluded_middle_informative (ind I i = x) then val I i else 0) /
Pr (eq^~ x) I
else val I i) ≥ 0.
Proof.
intros i.
destruct ClassicalEpsilon.excluded_middle_informative; eauto; last apply val_nonneg.
apply Rle_ge, Rdiv_le_0_compat.
{ destruct ClassicalEpsilon.excluded_middle_informative => //=; eauto; try nra.
apply Rge_le, val_nonneg. }
{ apply In_isupport_pr_gt_0; eauto. }
Qed.
Definition ival_slice {X} (I: ivdist X) (x: X) : ival X.
refine {| idx := idx I;
ind := ind I;
val := λ i,
if ClassicalEpsilon.excluded_middle_informative (In_isupport x I) then
(if ClassicalEpsilon.excluded_middle_informative (ind I i = x) then
val I i
else
0) / Pr (λ i, i = x) I
else
val I i|}.
apply ival_slice_proof1.
Defined.
Lemma ival_slice_proof2 (X : Type) (I : ivdist X) (x : X):
is_series (countable_sum (val (ival_slice I x))) 1.
Proof.
rewrite //=. destruct ClassicalEpsilon.excluded_middle_informative; last apply val_sum1.
replace 1 with (Pr (eq^~ x) I */ Pr (eq^~ x) I); last first.
{ field. apply Rgt_not_eq, In_isupport_pr_gt_0; auto. }
apply is_seriesC_scal_r.
rewrite /Pr/Ex_ival.
apply (is_seriesC_ext _ (λ i0 : idx I, (if is_left (ClassicalEpsilon.excluded_middle_informative (ind I i0 = x))
then 1
else 0) * val I i0)).
{ intros. destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra. }
{
feed pose proof (ex_Pr (eq^~ x) I) as Hpr.
apply ex_Ex_ival_is in Hpr as (v&Hpr).
rewrite /is_Ex_ival in Hpr.
destruct Hpr as (Hex&His).
eapply Series_correct; eexists; eauto. }
Qed.
Definition ivdist_slice {X} (I: ivdist X) (x: X) : ivdist X.
Proof.
exists (ival_slice I x).
apply ival_slice_proof2.
Defined.
Lemma eq_ivd_prob_Pr_eq {X} (I1 I2: ivdist X) x:
eq_ivd_prob I1 I2 →
Pr (eq^~ x) I1 = Pr (eq^~ x) I2.
Proof.
rewrite /Pr/Ex_ival => Heq.
unshelve (eapply eq_ivd_prob_alt in Heq); first exact x.
rewrite /idx_eq_ind in Heq.
setoid_rewrite Rmult_if_distrib.
setoid_rewrite Rmult_0_l.
setoid_rewrite Rmult_1_l.
eauto.
Qed.
Lemma eq_ivd_prob_In_isupport {X: Type} I1 I2 (x: X):
eq_ivd_prob I1 I2 →
In_isupport x I1 →
In_isupport x I2.
Proof.
intros Heq Hin%In_isupport_pr_gt_0.
apply pr_gt_0_In_isupport.
erewrite <-eq_ivd_prob_Pr_eq; last eassumption.
eauto.
Qed.
Lemma eq_ivd_prob_to_irrel_ivd {X} (I1 I2: ivdist X):
eq_ivd_prob I1 I2 →
irrel_ivd I1 I2.
Proof.
intros Heq.
transitivity (x ← I1; _ ← ivdist_slice I2 x; mret x).
{ transitivity (x ← I1; mret x).
{ rewrite ivd_right_id. reflexivity. }
apply irrel_ivd_bind; first reflexivity.
intros x. apply irrel_ivd_irrel.
}
transitivity (x ← I2; _ ← ivdist_slice I1 x; mret x); last first.
{ symmetry.
transitivity (x ← I2; mret x).
{ rewrite ivd_right_id. reflexivity. }
apply irrel_ivd_bind; first reflexivity.
intros x. apply irrel_ivd_irrel.
}
cut (eq_ivd (I1 ≫= (λ x : X, ivdist_slice I2 x ≫= (λ _ : X, mret x)))
(I2 ≫= (λ x : X, ivdist_slice I1 x ≫= (λ _ : X, mret x)))).
{ intros ->. reflexivity. }
apply eq_ival_nondep_inj_surj_suffice.
apply eq_ival_nondep_inj_surj'_helper.
unshelve eexists.
{ intros (i1&i2&?). exists i2. exists i1. exact tt. }
rewrite //=.
split_and!.
* intros (i1&i2&[]) (i1'&i2'&[]) _ _ => //=.
inversion 1; subst. auto.
* intros (i2&i1&[]).
unshelve (eexists).
{ exists i1. exists i2. exact tt. }
split_and!; eauto => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
** intros Hgt. eapply Rge_gt_trans; last eassumption.
right. rewrite //=.
cut (Pr (eq^~ (ind I2 i2)) I1 = Pr (eq^~ (ind I1 i1)) I2).
{ intros ->. nra. }
rewrite e0; eapply eq_ivd_prob_Pr_eq; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
by symmetry.
** cut (val I2 i2 = 0).
{ intros ->. nra. }
destruct (val_nonneg I2 i2); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
{ by symmetry. }
eexists; eauto.
* intros (i1&i2&[]) => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
cut (val I1 i1 = 0).
{ intros ->. nra. }
destruct (val_nonneg I1 i1); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
eexists; eauto.
* intros (i1&i2&[]) => //=.
repeat destruct ClassicalEpsilon.excluded_middle_informative => //=; try nra; try congruence.
** intros Hgt.
cut (Pr (eq^~ (ind I2 i2)) I1 = Pr (eq^~ (ind I1 i1)) I2).
{ intros ->. nra. }
rewrite e0; eapply eq_ivd_prob_Pr_eq; eauto.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
by symmetry.
** intros; exfalso. eapply n. rewrite e.
eapply eq_ivd_prob_In_isupport; eauto.
** cut (val I1 i1 = 0).
{ intros ->. nra. }
destruct (val_nonneg I1 i1); last auto.
exfalso. eapply n.
eapply eq_ivd_prob_In_isupport; eauto.
eexists; eauto.
Qed.
Lemma irrel_ivd_choice {X} (I1 I1' I2 I2': ivdist X) p Hpf Hpf':
irrel_ivd I1 I2 →
irrel_ivd I1' I2' →
irrel_ivd (ivdplus p Hpf I1 I1') (ivdplus p Hpf' I2 I2').
Proof.
intros Hirrel1 Hirrel2.
transitivity (b ← ivdplus p Hpf (mret true) (mret false);
if (b: bool) then I1 else I1').
{ rewrite ivd_plus_bind ?ivd_left_id. reflexivity. }
transitivity (b ← ivdplus p Hpf' (mret true) (mret false);
if (b: bool) then I2 else I2'); last first.
{ rewrite ivd_plus_bind ?ivd_left_id. reflexivity. }
apply irrel_ivd_bind.
{ cut (eq_ivd (ivdplus p Hpf (mret true) (mret false)) (ivdplus p Hpf' (mret true) (mret false))).
{ intros ->; reflexivity. }
apply ivdist_plus_proper; reflexivity.
}
intros [|]; eauto.
Qed.
Definition irrel_pidist {X: Type} (Is1 Is2: pidist X) :=
∀ f, bounded_fun f → Rbar_le (Ex_min f Is2) (Ex_min f Is1).
Lemma irrel_pidist_Ex_max {X: Type} (Is1 Is2: pidist X) :
irrel_pidist Is1 Is2 → ∀ f, bounded_fun f → Rbar_le (Ex_max f Is1) (Ex_max f Is2).
Proof.
intros Hirrel f Hb.
rewrite ?Ex_max_neg_min.
apply Rbar_opp_le.
apply Hirrel.
destruct Hb as (c&?).
exists c => x. rewrite Rabs_Ropp; eauto.
Qed.
Lemma Ex_max_irrel_pidist {X: Type} (Is1 Is2: pidist X) :
(∀ f, bounded_fun f → Rbar_le (Ex_max f Is1) (Ex_max f Is2)) →
irrel_pidist Is1 Is2.
Proof.
intros Hirrel f Hb.
specialize (Hirrel (λ x, (- f x))).
rewrite ?Ex_max_neg_min in Hirrel.
apply Rbar_opp_le.
setoid_rewrite Ropp_involutive in Hirrel.
eapply Hirrel. destruct Hb as (c&?). exists c.
intros x. rewrite Rabs_Ropp; eauto.
Qed.
Lemma irrel_pidist_refl {X} : ∀ I, @irrel_pidist X I I.
Proof. intros f Hb; reflexivity. Qed.
Lemma irrel_pidist_trans {X} :
∀ I1 I2 I3, @irrel_pidist X I1 I2 → @irrel_pidist X I2 I3 → @irrel_pidist X I1 I3.
Proof.
intros I1 I2 I3 Hi1 Hi2 f Hb.
specialize (Hi1 f Hb).
specialize (Hi2 f Hb).
etransitivity; eauto.
Qed.
Lemma bounded_supp_fun_le_pidist {A} f (Is Is': pidist A):
le_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
bounded_fun_on f (λ x, In_psupport x Is).
Proof.
intros Hle Hbf.
eapply bounded_fun_on_anti; try eassumption.
intros a. eapply le_pidist_support_coerce_aux; eauto.
Qed.
Lemma Ex_min_le_pidist_irrel {X} (f: X → R) Is1 Is2:
le_pidist_irrel Is1 Is2 →
Rbar_le (Ex_min f Is2) (Ex_min f Is1).
Proof.
intros Hle.
rewrite /Ex_min.
destruct (Glb_Rbar_correct (Ex_pidist f Is1)) as (Hlb&Hglb).
apply Hglb. intros r Hex. destruct Hex as (I&Hin&Hex).
edestruct (Hle {| ivd_ival := I; val_sum1 := all_sum1 Is1 _ Hin |}) as (I2&Heq&Hin2).
{ rewrite //=. }
{ eapply (is_Ex_ival_irrel_proper f) in Heq; last eauto.
destruct (Glb_Rbar_correct (Ex_pidist f Is2)) as (Hlb2&Hglb2).
eapply Hlb2. eexists; split; eauto.
eapply Heq => //=.
}
Qed.
Lemma Ex_max_le_pidist_irrel {X} (f: X → R) Is1 Is2:
le_pidist_irrel Is1 Is2 →
Rbar_le (Ex_max f Is1) (Ex_max f Is2).
Proof.
rewrite ?Ex_max_neg_min.
intros Hle.
apply Rbar_opp_le.
apply Ex_min_le_pidist_irrel; eauto.
Qed.
Lemma irrel_pidist_proper_irrel {X} :
∀ I1 I1' I2 I2', le_pidist_irrel I1' I1 → le_pidist_irrel I2 I2' →
@irrel_pidist X I1 I2 → @irrel_pidist X I1' I2'.
Proof.
intros I1 I1' I2 I2' Hle1 Hle2 Hirrel12.
intros f Hb.
etransitivity.
{ apply Ex_min_le_pidist_irrel; eauto. }
etransitivity.
{ eapply Hirrel12; eauto. }
{ apply Ex_min_le_pidist_irrel; eauto. }
Qed.
Lemma irrel_pidist_bind1 {X Y}: ∀ (I1 I2: pidist X) (f: X → pidist Y),
@irrel_pidist X I1 I2 →
@irrel_pidist Y (x ← I1; f x) (x ← I2; f x).
Proof.
intros I1 I2 f Hirrel.
intros g Hb.
rewrite ?Ex_min_bind_post;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
Qed.
Lemma irrel_pidist_bind {X Y}: ∀ (I1 I2: pidist X) (f1 f2: X → pidist Y),
@irrel_pidist X I1 I2 →
(∀ x, @irrel_pidist Y (f1 x) (f2 x)) →
@irrel_pidist Y (x ← I1; f1 x) (x ← I2; f2 x).
Proof.
intros I1 I2 f1 f2 Hirrel Hirrelfun.
eapply irrel_pidist_trans.
{ eapply irrel_pidist_bind1; eauto. }
intros f Hb. eapply Ex_min_bind_le;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
intros a ?. eapply Hirrelfun; eauto.
Qed.
Lemma irrel_pidist_proper X :
∀ (I1 I1' I2 I2': pidist X), le_pidist I1' I1 → le_pidist I2 I2'
→ irrel_pidist I1 I2 → irrel_pidist I1' I2'.
Proof.
intros ???? Hle1 Hle2. eapply irrel_pidist_proper_irrel.
{ intros x Hin. edestruct (Hle1 x) as (x'&Heq&Hin'); eauto.
exists {| ivd_ival := x'; val_sum1 := all_sum1 I1 _ Hin'|}; split; auto.
eapply irrel_ivd_proper; eauto; last apply irrel_ivd_refl.
reflexivity.
}
{ intros x Hin. edestruct (Hle2 x) as (x'&Heq&Hin'); eauto.
exists {| ivd_ival := x'; val_sum1 := all_sum1 I2' _ Hin'|}; split; auto.
eapply irrel_ivd_proper; eauto; last apply irrel_ivd_refl.
reflexivity.
}
Qed.
Global Instance irrel_pidist_mono_instance {X} : Proper (@le_pidist X --> @le_pidist X ==> Coq.Program.Basics.impl) (@irrel_pidist X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
intros Hirrel. eapply irrel_pidist_proper; eauto.
Qed.
Global Instance irrel_pidist_proper_instance {X} : Proper (@eq_pidist X ==> @eq_pidist X ==> iff) (@irrel_pidist X).
Proof.
intros I1 I1' Heq1 I2 I2' Heq2.
split; intros Hirrel; eapply irrel_pidist_proper; eauto;
try (setoid_rewrite Heq1; reflexivity);
try (setoid_rewrite Heq2; reflexivity).
Qed.
Global Instance irrel_pidist_Transitivite {X}: Transitive (@irrel_pidist X).
Proof. intros ???. apply irrel_pidist_trans. Qed.
Global Instance irrel_pidist_Reflexive {X}: Reflexive (@irrel_pidist X).
Proof. intros ?. apply irrel_pidist_refl. Qed.
Record irrel_couplingP {A1 A2} (I1: ivdist A1) (Is2: pidist A2) (P: A1 → A2 → Prop) : Type :=
{ irrel_I : ivdist A1;
irrel_Is : pidist A2;
irrel_rel_I : irrel_ivd I1 irrel_I;
irrel_rel_Is : irrel_pidist irrel_Is Is2;
irrel_couple_wit :> idist_pidist_couplingP irrel_I irrel_Is P
}.
Definition lsupport {A1 A2 Is1 Is2 P} (Icouple: irrel_couplingP Is1 Is2 P) (y: A2) :=
{ x : A1 | ∃ i Hpf, ival.ind Icouple i = (exist _ (x, y) Hpf) ∧ ival.val Icouple i > 0 }.
Definition rsupport {A1 A2 Is1 Is2 P} (Icouple: irrel_couplingP Is1 Is2 P) (x: A1) :=
{ y : A2 | ∃ i Hpf, ival.ind Icouple i = (exist _ (x, y) Hpf) ∧ ival.val Icouple i > 0 }.
Definition irrel_coupling_propP {A1 A2} (I1: ivdist A1) (Is2: pidist A2) P : Prop :=
∃ (ic: irrel_couplingP I1 Is2 P), True.
Lemma ic_wit_to_prop {A1 A2} (I1 : ivdist A1) (Is2: pidist A2) P :
irrel_couplingP I1 Is2 P →
irrel_coupling_propP I1 Is2 P.
Proof.
intros; eexists; eauto.
Qed.
Lemma ic_prop_to_wit {A1 A2} (I1 : ivdist A1) (Is2: pidist A2) P :
irrel_coupling_propP I1 Is2 P →
irrel_couplingP I1 Is2 P.
Proof.
intros (?&_)%ClassicalEpsilon.constructive_indefinite_description; auto.
Qed.
Lemma irrel_pidist_support_coerce {X} (I1 I2: pidist X) :
irrel_pidist I2 I1 →
∀ x, In_psupport x I2 → In_psupport x I1.
Proof.
intros Hirrel x Hin.
destruct Hin as (I&i&Hin&Hind&Hval).
assert (0 < Pr (eq ^~ x) {| ivd_ival := I; val_sum1 := all_sum1 _ _ Hin|}).
{ eapply In_isupport_pr_gt_0.
eexists; eauto. }
assert (Rbar_lt 0 (Pr_max (eq^~ x) I1)) as Hmax.
{
apply (Rbar_lt_le_trans _ (Pr_max (eq^~ x) I2)); last first.
{ eapply irrel_pidist_Ex_max; eauto.
exists 1. intros. destruct (is_left); rewrite Rabs_right; nra.
}
apply (Rbar_lt_le_trans _ (Pr (eq^~ x) {| ivd_ival := I; val_sum1 := all_sum1 I2 I Hin |}));
first done.
apply Ex_max_spec1' => //=.
eapply (ex_Pr (eq^~x) {| ivd_ival := I; val_sum1 := all_sum1 I2 I Hin |}).
}
assert (∃ I' : ivdist X, In (I': ival X) I1 ∧ 0 < Pr (eq^~x) I') as (I'&Hin'&Hpr').
{
apply Classical_Pred_Type.not_all_not_ex. intros Hneg.
apply Rbar_lt_not_le in Hmax. apply Hmax.
apply Ex_max_spec2.
intros r' (I'&Hin'&Heq).
apply Rbar_not_lt_le. intros Hlt.
exfalso; eapply (Hneg {| ivd_ival := I'; val_sum1 := all_sum1 _ _ Hin'|}).
split; first done.
rewrite /Pr. erewrite is_Ex_ival_unique; last eassumption.
auto.
}
exists I'. apply pr_gt_0_In_isupport in Hpr'.
destruct Hpr' as (?&?&?). eexists; split_and!; eauto.
Qed.
Lemma irrel_pidist_choice {X} (I1 I1' I2 I2': pidist X) p Hpf Hpf':
irrel_pidist I1 I2 →
irrel_pidist I1' I2' →
irrel_pidist (pidist_plus p Hpf I1 I1') (pidist_plus p Hpf' I2 I2').
Proof.
intros Hirrel1 Hirrel2.
transitivity (b ← pidist_plus p Hpf (mret true) (mret false);
if (b: bool) then I1 else I1').
{ rewrite pidist_plus_bind ?pidist_left_id. reflexivity. }
transitivity (b ← pidist_plus p Hpf' (mret true) (mret false);
if (b: bool) then I2 else I2'); last first.
{ rewrite pidist_plus_bind ?pidist_left_id. reflexivity. }
apply irrel_pidist_bind.
{ cut (eq_pidist (pidist_plus p Hpf (mret true) (mret false))
(pidist_plus p Hpf' (mret true) (mret false))).
{ intros ->; reflexivity. }
apply pidist_plus_proper; reflexivity.
}
intros [|]; eauto.
Qed.
Lemma irrel_pidist_irrel {X Y}: ∀ I1 (I0: pidist Y), @irrel_pidist X (x ← I0; I1) I1.
Proof.
intros. intros f Hbounded.
rewrite Ex_min_bind_irrel //=; try reflexivity;
eauto using Ex_min_bounded_is_bounded, ex_Ex_extrema_bounded_fun,
Ex_min_bounded_fun_finite.
Qed.
Lemma irrel_coupling_proper {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
eq_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
le_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono_irrel {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
eq_ivd I1 I2 →
irrel_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Lemma irrel_coupling_mono_irrel' {A1 A2} (I1 I2 : ivdist A1) (Is1 Is2: pidist A2) P:
irrel_ivd I1 I2 →
irrel_pidist Is1 Is2 →
irrel_couplingP I1 Is1 P →
irrel_couplingP I2 Is2 P.
Proof.
intros HeqI HeqIs [I1' Is1' HeqI1 HeqIs1 Hcouple].
exists I1' Is1'.
- setoid_rewrite <-HeqI. done.
- setoid_rewrite <-HeqIs. done.
- done.
Qed.
Global Instance irrel_coupling_prop_Proper {A1 A2}:
Proper (@eq_ivd A1 ==> @le_pidist A2 ==> eq ==> impl) irrel_coupling_propP.
Proof.
intros ?? Heq ?? Hle ?? ->.
intros H%ic_prop_to_wit.
apply ic_wit_to_prop.
eapply irrel_coupling_mono; eauto.
Qed.
Global Instance irrel_coupling_prop_irrel_Proper {A1 A2}:
Proper (@eq_ivd A1 ==> @irrel_pidist A2 ==> eq ==> impl) irrel_coupling_propP.
Proof.
intros ?? Heq ?? Hle ?? ->.
intros H%ic_prop_to_wit.
apply ic_wit_to_prop.
eapply irrel_coupling_mono_irrel; eauto.
Qed.
Lemma irrel_coupling_mret {A1 A2} (P: A1 → A2 → Prop) x y:
P x y →
irrel_couplingP (mret x) (mret y) P.
Proof.
intros HP. exists (mret x) (mret y); try reflexivity.
by apply ip_coupling_mret.
Qed.
Lemma irrel_coupling_prop_mret {A1 A2} (P: A1 → A2 → Prop) x y:
P x y →
irrel_coupling_propP (mret x) (mret y) P.
Proof.
intros; apply ic_wit_to_prop, irrel_coupling_mret; auto.
Qed.
Lemma irrel_coupling_bind {A1 A2 B1 B2} P (f1: A1 → ivdist B1) (f2: A2 → pidist B2)
I1 Is2 Q (Ic: irrel_couplingP I1 Is2 P):
(∀ x y, P x y → irrel_couplingP (f1 x) (f2 y) Q) →
irrel_couplingP (mbind f1 I1) (mbind f2 Is2) Q.
Proof.
intros Hfc.
destruct Ic as [I1' Is2' HeqI HeqIs Hcouple].
destruct Hcouple as [I2' ? [Ic ? ?]%ic_coupling_to_id].
unshelve (eexists).
- refine (xy ← Ic; _).
destruct xy as ((x&y)&HP).
destruct (Hfc _ _ HP).
exact irrel_I0.
- refine (xy ← singleton Ic; _).
destruct xy as ((x&y)&HP).
destruct (Hfc x y HP).
exact irrel_Is0.
- etransitivity.
{ eapply irrel_ivd_bind. eauto. reflexivity. }
etransitivity.
{ eapply irrel_ivd_bind. setoid_rewrite idc_proj1. reflexivity. reflexivity. }
setoid_rewrite ivd_assoc. eapply irrel_ivd_bind; first reflexivity.
intros ((x&y)&HP).
destruct (Hfc _ _ _) as [? ? ?]. rewrite /irrel_I.
rewrite /sval. setoid_rewrite ivd_left_id. done.
- etransitivity; last first.
{ eapply irrel_pidist_bind.
- etransitivity; last by eauto. eapply irrel_pidist_proper; first by eauto.
reflexivity. reflexivity.
- intros; reflexivity.
}
setoid_rewrite idc_proj2. setoid_rewrite singleton_bind.
setoid_rewrite pidist_assoc.
eapply irrel_pidist_bind; first reflexivity.
intros ((x&y)&HP).
destruct (Hfc _ _ _) as [? ? ?]. rewrite /irrel_I.
rewrite /sval. setoid_rewrite singleton_mret. setoid_rewrite pidist_left_id.
eauto.
- eapply (ip_coupling_bind _ _ _ _ (λ x y, x = y)).
* apply ip_coupling_singleton.
* intros ((?&?)&HP1) ((x&y)&HP2).
inversion 1; subst.
rewrite //=.
assert (HP1 = HP2). { apply classical_proof_irrelevance. }
subst.
destruct (Hfc x y HP2). eauto.
Qed.
Lemma irrel_coupling_prop_bind {A1 A2 B1 B2} P (f1: A1 → ivdist B1) (f2: A2 → pidist B2)
I1 Is2 Q (Ic: irrel_coupling_propP I1 Is2 P):
(∀ x y, P x y → irrel_coupling_propP (f1 x) (f2 y) Q) →
irrel_coupling_propP (mbind f1 I1) (mbind f2 Is2) Q.
Proof.
intros; eapply ic_wit_to_prop, irrel_coupling_bind; intros; apply ic_prop_to_wit; eauto.
Qed.
Lemma irrel_coupling_trivial {A1 A2} (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, True).
Proof.
assert ({ I' : ivdist A2 | In (I': ival A2) Is}) as (I'&Hin).
{ destruct Is as [(Is&Hne) Hall] => //=.
rewrite //= in Hall.
apply ClassicalEpsilon.constructive_indefinite_description in Hne as (I'&His).
exists {| ivd_ival := I'; val_sum1 := Hall _ His |}.
auto.
}
exists (x ← I'; I) (singleton (x ← I; I')).
{ eapply irrel_ivd_irrel. }
{ eapply irrel_pidist_proper_irrel; [| apply le_pidist_irrel_refl | reflexivity ].
intros I0 Hin'. inversion Hin' as [Heq].
exists I'; split; auto.
eapply (irrel_ivd_proper _ (x ← I; I')).
{ rewrite /eq_ivd. rewrite -Heq //=. }
{ reflexivity. }
symmetry. apply irrel_ivd_irrel.
}
exists (x ← I; I').
{ intros ?. eapply In_pidist_le_singleton. eexists; split; first reflexivity.
rewrite /In/singleton//=. }
unshelve (eexists).
{ refine (ivd_ival (x ← I; y ← I'; mret _)).
exists (x, y); done. }
- setoid_rewrite ival_bind_comm. setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity.
intros. setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
- setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity.
intros. setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
Qed.
Lemma irrel_coupling_prop_trivial {A1 A2} (I: ivdist A1) (Is: pidist A2):
irrel_coupling_propP I Is (λ x y, True).
Proof.
apply ic_wit_to_prop, irrel_coupling_trivial.
Qed.
Lemma irrel_coupling_conseq {A1 A2} (P1 P2: A1 → A2 → Prop) (I: ivdist A1) (Is: pidist A2):
(∀ x y, P1 x y → P2 x y) →
irrel_couplingP I Is P1 →
irrel_couplingP I Is P2.
Proof.
intros HP Hirrel.
destruct Hirrel as [I0 Is0 ? ? ?].
exists I0 Is0; auto.
eapply ip_coupling_conseq; eauto.
Qed.
Lemma irrel_coupling_plus {A1 A2} p Hpf p' Hpf'
(P : A1 → A2 → Prop) (Is1 Is1': ivdist A1) (Is2 Is2': pidist A2) :
p = p' →
irrel_couplingP Is1 Is2 P →
irrel_couplingP Is1' Is2' P →
irrel_couplingP (ivdplus p Hpf Is1 Is1') (pidist_plus p' Hpf' Is2 Is2') P.
Proof.
intros Hpeq Hic Hic'. subst.
destruct Hic as [I1i Is2i Hirrel1i Hirrel2i Hwit].
destruct Hic' as [I1i' Is2i' Hirrel1i' Hirrel2i' Hwit'].
exists (ivdplus p' Hpf I1i I1i') (pidist_plus p' Hpf' Is2i Is2i').
{ eapply irrel_ivd_choice; eauto. }
{ eapply irrel_pidist_choice; eauto. }
apply ip_coupling_plus; eauto.
Qed.
Lemma irrel_coupling_bind_condition {A1 B1 B2} (f1: A1 → ivdist B1) (f2: A1 → pidist B2)
I Is Q x:
(le_pidist (singleton I) Is ) →
(irrel_couplingP (f1 x) (f2 x) Q) →
irrel_couplingP (x ← I; y ← f1 x; mret (x, y))
(x ← Is; y ← f2 x; mret (x, y))
(λ xy1 xy2, fst xy1 = x → fst xy2 = x → Q (snd xy1) (snd xy2)).
Proof.
intros Hle Hc.
eapply (irrel_coupling_bind (λ x y, x = y)).
{ exists I Is; try reflexivity.
exists I; eauto. apply ival_coupling_refl.
}
intros ? y ?; subst.
destruct (ClassicalEpsilon.excluded_middle_informative (x = y)).
- intros; subst. eapply irrel_coupling_bind; eauto.
intros. apply irrel_coupling_mret => ? //=.
- intros. eapply irrel_coupling_bind.
* apply irrel_coupling_trivial.
* intros. apply irrel_coupling_mret => ? //=. intros. congruence.
Qed.
Lemma irrel_coupling_support {X Y} I1 I2 (P: X → Y → Prop):
∀ (Ic: irrel_couplingP I1 I2 P),
irrel_couplingP I1 I2 (λ x y, ∃ Hpf: P x y, In_isupport x I1 ∧ In_psupport y I2 ∧
In_isupport (exist _ (x, y) Hpf) Ic).
Proof.
intros [? ? Heq1 Heq2 Ic].
specialize (ip_coupling_support _ _ _ Ic).
eexists; eauto.
eapply ip_coupling_conseq; eauto.
intros x y (Hpf&Hin1&Hin2&?); exists Hpf; repeat split; auto.
- edestruct Hin1 as (i&?&?).
edestruct (irrel_ivd_support_coerce _ _ Heq1) as (Hcoerce&_).
apply Hcoerce; eauto.
- eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma irrel_coupling_support_wit {X Y} I1 I2 (P: X → Y → Prop):
∀ (Ic: irrel_couplingP I1 I2 P),
{ xy : X * Y | ∃ Hpf : P (fst xy) (snd xy),
In_isupport (fst xy) I1 ∧ In_psupport (snd xy) I2 ∧ In_isupport (exist _ xy Hpf) Ic }.
Proof.
intros [? ? Heq1 Heq2 Ic].
specialize (ip_coupling_support_wit _ _ _ Ic).
rewrite //=.
intros ((x&y)&Hpf).
exists (x, y).
destruct Hpf as (Hpf&Hin1&Hin2&?).
exists Hpf; repeat split; auto.
- edestruct Hin1 as (i&?&?).
edestruct (irrel_ivd_support_coerce _ _ Heq1) as (Hcoerce&_).
apply Hcoerce; eauto.
- eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma rsupport_support_right {X Y} (Ix: ivdist X) (x: X) Is (P: X → Y → Prop)
(Ic: irrel_couplingP Ix Is P) (c: rsupport Ic x) :
In_psupport (proj1_sig c) Is.
Proof.
destruct c as (y'&ic&HP&Hind&Hgt).
rewrite //=. destruct Ic as [Ix' Is' Hirrel_ivd Hirrel_pidist Ic].
eapply irrel_pidist_support_coerce; eauto.
destruct Ic as [Iy Hle Ic].
rewrite //= in ic Hind Hgt.
clear Hirrel_pidist.
destruct (irrel_ivd_support_coerce _ _ Hirrel_ivd x) as (Hcoerce&_).
destruct (Hle Iy) as (Iy'&Heq&Hin); first by auto.
destruct Ic as [Ic Hproj1 Hproj2].
rewrite //= in ic Hind Hgt.
symmetry in Hproj2.
setoid_rewrite Heq in Hproj2.
destruct Hproj2 as (h1&h2&?&?&Hindic&Hvalic).
assert (val (x0 ← Ic; mret (sval x0).2) (existT ic tt) > 0) as Hgt'.
{ rewrite //= Rmult_1_r //=. }
specialize (Hindic (coerce_supp _ _ Hgt')).
specialize (Hvalic (coerce_supp _ _ Hgt')).
rewrite //= in Hindic Hvalic.
exists Iy'.
exists (sval (h1 (coerce_supp _ _ Hgt'))).
repeat split; auto.
- rewrite Hindic Hind //=.
- rewrite Hvalic //=.
Qed.
Lemma rsupport_post {X Y} (Ix: ivdist X) (x: X) Is (P: X → Y → Prop)
(Ic: irrel_couplingP Ix Is P) (c: rsupport Ic x) :
P x (proj1_sig c).
Proof.
destruct c as (y&I&i&Hind&?).
rewrite //=.
Qed.
Transparent pidist_ret.
Lemma rsupport_mret_right {X Y} (Ix: ivdist X) (x: X) (y: Y) (P: X → Y → Prop)
(Ic: irrel_couplingP Ix (mret y) P) (c: rsupport Ic x) :
proj1_sig c = y.
Proof.
edestruct (rsupport_support_right _ _ _ _ Ic c) as (Iy&iy&Hin&Hind&?).
subst; rewrite -Hind //=.
rewrite /In/mret/base.mret//= in Hin.
subst. destruct iy => //=.
Qed.
Opaque pidist_ret.
Lemma ip_irrel_coupling {A1 A2} (I: ivdist A1) (Is: pidist A2) (P: A1 → A2 → Prop):
idist_pidist_couplingP I Is P →
irrel_couplingP I Is P.
Proof.
intros.
exists I Is; try reflexivity; eauto.
Qed.
Lemma irrel_bounded_supp_fun {A} f (Is Is': pidist A):
irrel_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
bounded_fun_on f (λ x, In_psupport x Is).
Proof.
intros Hle Hbf.
eapply bounded_fun_on_anti; try eassumption.
eapply irrel_pidist_support_coerce; eauto.
Qed.
Lemma irrel_pidist_bounded_supp_Ex_max {A} f (Is Is': pidist A):
irrel_pidist Is Is' →
bounded_fun_on f (λ x, In_psupport x Is') →
Rbar_le (Ex_max f Is) (Ex_max f Is').
Proof.
intros Hi Hb1.
feed pose proof (irrel_bounded_supp_fun f Is Is') as Hb2; eauto.
assert (bounded_fun_on f (λ x, In_psupport x Is ∨ In_psupport x Is')) as Hb.
{ destruct Hb1 as (c1&?).
destruct Hb2 as (c2&?).
exists (Rmax c1 c2).
intros x [Hin1|Hin2]; rewrite Rmax_Rle; intuition.
}
clear Hb1. clear Hb2.
edestruct (bounded_fun_on_to_bounded f) as (g'&Hb'&Heq); eauto.
feed pose proof (irrel_pidist_Ex_max Is Is' Hi g' Hb'); eauto.
erewrite (Ex_max_eq_ext_supp f g' Is'); eauto.
etransitivity; eauto.
erewrite (Ex_max_eq_ext_supp f g' Is); eauto; first reflexivity.
Qed.
Lemma Ex_min_irrel_anti {A} f (Is Is': pidist A) :
irrel_pidist Is Is' →
bounded_fun f →
Rbar_le (Ex_min f Is') (Ex_min f Is).
Proof. eauto. Qed.
Lemma irrel_coupling_eq_ex_Ex {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
ex_Ex_ival f I.
Proof.
intros [Is1_irrel Is2_irrel Hirrel_ivd Hirrel_pidst Ic] Hex.
assert (idist_pidist_couplingP (x ← Is1_irrel; mret (f x))
(x ← Is2_irrel; mret (g x))
(λ x y, x = y)) as Ic'.
{ eapply ip_coupling_bind; eauto => ???.
apply ip_coupling_mret; auto. }
destruct Ic' as [I2 Hmem Ic'].
apply ival_coupling_eq in Ic'.
eapply ex_Ex_ival_irrel_proper.
{ symmetry; eauto. }
rewrite (ex_Ex_ival_fmap id f).
setoid_rewrite Ic'.
cut (ex_Ex_extrema id (x ← Is2_irrel; mret (g x))).
{ intros Hex'. edestruct (Hmem I2) as (I2'&Heq'&?); first done.
rewrite Heq'. eapply Hex'; eauto. }
rewrite -ex_Ex_extrema_fmap. eauto.
eapply ex_Ex_extrema_bounded_fun.
eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
Rbar_le (Ex_min g Is) (Ex_ival f I).
Proof.
intros Hirrel Hb.
feed pose proof (irrel_coupling_eq_ex_Ex f g I Is) as Hex; eauto.
destruct Hirrel as [Is1_irrel Is2_irrel Hirrel_ivd Hirrel_pidst Ic].
assert (idist_pidist_couplingP (x ← Is1_irrel; mret (f x))
(x ← Is2_irrel; mret (g x))
(λ x y, x = y)) as Ic'.
{ eapply ip_coupling_bind; eauto => ???.
apply ip_coupling_mret; auto. }
destruct Ic' as [I2 Hmem Ic'].
apply ival_coupling_eq in Ic'.
etransitivity; first apply Ex_min_irrel_anti; eauto.
erewrite Ex_ival_irrel_proper; eauto.
transitivity (Ex_min (λ x, Ex_min id (mret (g x))) Is2_irrel).
{ apply Ex_min_le_ext.
* intros. rewrite Ex_min_mret. reflexivity.
* eapply ex_Ex_extrema_bounded_fun; eauto.
}
assert (ex_Ex_ival f Is1_irrel).
{ eapply ex_Ex_ival_irrel_proper; eauto. }
etransitivity; first eapply Ex_min_bind_post_aux2; last first.
- transitivity (Ex_ival (λ x, Ex_ival id (mret (f x))) Is1_irrel); last first.
{ apply Ex_ival_mono.
* intros. rewrite Ex_ival_mret. reflexivity.
* setoid_rewrite Ex_ival_mret.
eapply ex_Ex_ival_irrel_proper; eauto.
* eapply ex_Ex_ival_irrel_proper; eauto.
}
rewrite -Ex_ival_bind_post; last first.
{ rewrite -ex_Ex_ival_fmap. eauto. }
transitivity (Ex_ival id I2); last first.
{ refl_right. f_equal. symmetry. eapply Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto. }
apply In_pidist_le_singleton in Hmem.
destruct Hmem as (I2'&Heq22'&?).
transitivity (Ex_ival id I2'); last first.
{ refl_right. f_equal. symmetry. eapply Ex_ival_proper; eauto.
eapply ex_Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto. }
apply Ex_min_spec1'; auto.
eapply ex_Ex_ival_proper; eauto.
eapply ex_Ex_ival_proper; eauto.
rewrite -ex_Ex_ival_fmap. eauto.
- setoid_rewrite Ex_min_mret.
apply ex_Ex_extrema_bounded_fun; eauto.
- intros. setoid_rewrite Ex_min_mret. rewrite //=.
- apply Ex_min_bounded_fun_finite.
setoid_rewrite Ex_min_mret. eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min' {A1 A2 A3} f g (h : A3 → R) (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun (λ x, h (g x)) →
Rbar_le (Ex_min (λ x, h (g x)) Is) (Ex_ival (λ x, h (f x)) I).
Proof.
intros Hic Hb.
eapply irrel_coupling_eq_Ex_min; eauto.
eapply irrel_coupling_conseq; eauto.
rewrite //=. intros x y ->. done.
Qed.
Lemma irrel_coupling_eq_Ex_max {A1 A2} f g (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun g →
Rbar_le (Ex_ival f I) (Ex_max g Is).
Proof.
intros HIc Hb.
apply Rbar_opp_le.
rewrite Ex_max_neg_min Rbar_opp_involutive.
rewrite /Rbar_opp//=.
rewrite -Ex_ival_negate.
apply irrel_coupling_eq_Ex_min; eauto.
- eapply irrel_coupling_conseq; eauto => x y ?.
nra.
- destruct Hb as (c&Hb). exists c; intros x. specialize (Hb x).
move: Hb. do 2 apply Rabs_case; nra.
Qed.
Lemma irrel_coupling_eq_ex_Ex_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
ex_Ex_ival f I.
Proof.
intros Hi Hex.
edestruct (bounded_fun_on_to_bounded g) as (g'&?Hb&Heq); eauto.
feed pose proof (irrel_coupling_eq_ex_Ex f g' I Is); eauto.
eapply irrel_coupling_conseq; last first.
{ unshelve (eapply @irrel_coupling_support); last eapply Hi. }
rewrite //=. intros x y (Hpf&Hin&Hinp&?).
rewrite -Heq; eauto.
Qed.
Lemma irrel_coupling_eq_Ex_min_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2) :
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
Rbar_le (Ex_min g Is) (Ex_ival f I).
Proof.
intros Hi Hex.
edestruct (bounded_fun_on_to_bounded g) as (g'&?Hb&Heq); eauto.
feed pose proof (irrel_coupling_eq_Ex_min f g' I Is); eauto.
eapply irrel_coupling_conseq; last first.
{ unshelve (eapply @irrel_coupling_support); last eapply Hi. }
rewrite //=. intros x y (Hpf&Hin&Hinp&?).
rewrite -Heq; eauto.
etransitivity; last eassumption.
refl_right.
eapply Ex_min_eq_ext_supp.
eauto.
Qed.
Lemma irrel_coupling_eq_Ex_max_supp {A1 A2} f g (I: ivdist A1) (Is: pidist A2):
irrel_couplingP I Is (λ x y, f x = g y) →
bounded_fun_on g (λ x, In_psupport x Is) →
Rbar_le (Ex_ival f I) (Ex_max g Is).
Proof.
intros HIc Hb.
apply Rbar_opp_le.
rewrite Ex_max_neg_min Rbar_opp_involutive.
rewrite /Rbar_opp//=.
rewrite -Ex_ival_negate.
apply irrel_coupling_eq_Ex_min_supp; eauto.
- eapply irrel_coupling_conseq; eauto => x y ?.
nra.
- destruct Hb as (c&Hb). exists c; intros x Hin. specialize (Hb x Hin).
move: Hb. do 2 apply Rabs_case; nra.
Qed.
|
{"author": "jtassarotti", "repo": "coq-proba", "sha": "11d69b2286940ff532421252a7d9b1384c2f674a", "save_path": "github-repos/coq/jtassarotti-coq-proba", "path": "github-repos/coq/jtassarotti-coq-proba/coq-proba-11d69b2286940ff532421252a7d9b1384c2f674a/theories/monad/idxval/irrel_equiv.v"}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 21:15:20 2020
@author: lukemcculloch
"""
import os
import weakref
try:
from memory_profiler import profile
MEM_PROFILE = True
except:
print 'please install memory_profiler'
MEM_PROFILE = False
#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from pylab import * #quiver...
#
import matplotlib.tri as tri #plot unstructured data
# see https://matplotlib.org/gallery/images_contours_and_fields/irregulardatagrid.html
pi = np.pi
from flux import roe
from System2D import Grid
from BoundaryConditions import BC_states
from Parameters import Parameters
from Utilities import default_input
from DataHandler import DataHandler
import FileTools as FT
from PlotGrids import PlotGrid
nq = 4 # Euler system size
class StencilLSQ(object):
"""
#------------------------------------------
#>> Cell-centered LSQ stencil data
#------------------------------------------
"""
def __init__(self, cell, mesh):
self.cell = cell #reference to cell
#self.mesh = mesh #reference to mesh
self._mesh = weakref.ref(mesh) if mesh else mesh
#
self.nnghbrs_lsq = None #number of lsq neighbors
self.nghbr_lsq = [] #list of lsq neighbors
self.cx = [] #LSQ coefficient for x-derivative
self.cy = [] #LSQ coefficient for y-derivative
#
#self.node = np.zeros((self.nNodes),float) #node to cell list
self.construct_vertex_stencil()
@property
def mesh(self):
if not self._mesh:
return self._mesh
_mesh = self._mesh()
if _mesh:
return _mesh
else:
raise LookupError("mesh was destroyed")
def __del__(self):
pass
#print("delete LSQ",self.cell.cid)
#print("delete", "LSQstencil")
def construct_vertex_stencil(self):
for node in self.cell.nodes:
for cell in node.parent_cells:
if cell is not self.cell:
self.nghbr_lsq.append(cell)
self.nghbr_lsq = set(self.nghbr_lsq)
self.nghbr_lsq = list(self.nghbr_lsq)
self.nnghbrs_lsq = len(self.nghbr_lsq)
# Allocate the LSQ coeffient arrays for the cell i:
self.cx = np.zeros((self.nnghbrs_lsq),float)
self.cy = np.zeros((self.nnghbrs_lsq),float)
return
def plot_lsq_reconstruction(self, canvas = None,
alpha = .1, saveit = False):
if canvas is None:
fig, ax = plt.subplots()
ax.axis('equal')
else:
ax = canvas
fig.suptitle('LSQ reconstruction stencil', fontsize=10)
ax = self.cell.plot_cell(canvas = ax,
fillcolor='green')
for cell in self.nghbr_lsq:
ax = cell.plot_cell(canvas = ax)
patch = mpatches.Patch(color='green', label='primary cell')
plt.legend(handles=[patch])
if saveit:
mytitle = '../pics/stencil_'+str(self.cell.cid)
self.save_image(filename=mytitle, ftype = '.png')
return
def save_image(self, filename = None, ftype = '.pdf', closeit=True):
""" save pdf file.
No file extension needed.
"""
if filename == None:
filename = default_input('please enter a name for the picture', 'lsq_reconstruction')
plt.savefig(filename+ftype, bbox_inches = 'tight')
if closeit:
plt.close()
return
class Solvers(object):
"""
2D Euler/NS equations = 4 equations:
(1)continuity
(2)x-momentum
(3)y-momentum
(4)energy
"""
def __init__(self, mesh):
self.solver_initialized = False
self.mesh = mesh
self.dim = mesh.dim
self.Parameters = Parameters()
self.second_order = True
self.use_limiter = True
# solution data
self.u = np.zeros((mesh.nCells,nq),float) # conservative variables at cells/nodes
self.w = np.zeros((mesh.nCells,nq),float) # primative variables at cells/nodes
self.gradw = np.zeros((mesh.nCells,nq,self.dim),float) # gradients of w at cells/nodes.
#
self.u0 = np.zeros((mesh.nCells,nq),float) #work array
#
# solution convergence
self.res = np.zeros((mesh.nCells,nq),float) #residual vector
self.res_norm = np.zeros((nq,1),float)
#
# local convergence storage saved for speed
self.gradw1 = np.zeros((nq,self.dim),float)
self.gradw2 = np.zeros((nq,self.dim),float)
# update (pseudo) time step data
#self.u0 = np.zeros((mesh.nCells,nq),float)
self.dtau = np.zeros((mesh.nCells),float)
# accessor integers for clarity
self.ir = 0 # density
self.iu = 1 # x-velocity
self.iv = 2 # y-velocity
self.ip = 3 # pressure
# fluid properties
self.gamma = 1.4 # Ratio of specific heats for air
self.rho_inf = 1.0
self.u_inf = 1.0
self.v_inf = 0.0
self.p_inf = 1./self.gamma
#flux
self.uL3d = np.zeros(5,float) #conservative variables in 3D
self.uR3d = np.zeros(5,float) #conservative variables in 3D
self.n12_3d = np.zeros(3,float) #face normal in 3D
self.num_flux3d = np.zeros(5,float) #numerical flux in 3D
self.wsn = np.zeros((self.mesh.nCells),float) # max wave speed array
#------------------------------------------
#>> Cell-centered limiter data
#------------------------------------------
self.limiter_beps = 1.0e-14
self.phi = np.zeros((mesh.nCells),float)
#------------------------------------------
#>> least squared gradient
#------------------------------------------
self.cclsq = np.asarray( [StencilLSQ(cell,mesh) for cell in mesh.cells] )
#e.g.
#self.cclsq[0].nghbr_lsq #bulk list of all cells in the 'extended cell halo'
#------------------------------------------
#>> precompute least squared gradient coefficients
#------------------------------------------
self.compute_lsq_coefficients()
self.test_lsq_coefficients()
#------------------------------------------
#>> residual data
#------------------------------------------
self.num_flux = np.zeros(4,float)
self.ub = np.zeros(4,float)
self.wave_speed = 0.
# local copies of data
self.unit_face_normal = np.zeros((2),float)
#------------------------------------------
#>> exact solution data
#------------------------------------------
self.w_initial = np.zeros(4, float)
#--------------------------------------
# for the moment, default to simple initial conditions
self.bc_type = ["freestream" for el in range(mesh.nBoundaries)] #np.zeros(mesh.nBoundaries, str)
self.BC = BC_states(solver = self, flowstate = FlowState() )
def solver_boot(self, flowtype = 'vortex'):
#self.compute_lsq_coefficients()
def NotImp():
print("not implemented yet")
return
switchdict = {
'vortex': self.initial_condition_vortex,
'freestream': NotImp #self.set_initial_solution()
}
#switchdict.get(flowtype, "not implemented, at all")
switchdict[flowtype]()
self.plot_flow_at_cell_centers()
#self.explicit_steady_solver()
#self.explicit_unsteady_solver()
self.solver_initialized = True
return
def solver_solve(self, tfinal=1.0, dt=.01):
if not self.solver_initialized :
print("You must initialize the solver first!")
print("call solver_boot() on this object to initialize solver")
return
#self.explicit_steady_solver()
self.explicit_unsteady_solver(tfinal=tfinal, dt=dt)
return
def compute_lsq_coefficients(self):
"""
compute the neighbor-stencil-coefficients such that
a gradient summed around a cell
(compact or extended stencil around the cell in questions)
will give a least squares reconstruction of the gradient
at the cell in question
"""
print "--------------------------------------------------"
print " Computing LSQ coefficients... "
ix = 0
iy = 1
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#The power to the inverse distance weight. The value 0.0 is used to avoid
#instability known for Euler solvers. So, this is the unweighted LSQ gradient.
#More accurate gradients are obtained with 1.0, and such can be used for the
#viscous terms and source terms in turbulence models.
lsq_weight_invdis_power = 1.0
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# compute the LSQ coefficients (cx, cy) in all cells
for i in range(self.mesh.nCells):
cell = self.mesh.cells[i]
#------------------------------------------------------------------
#Define the LSQ problem size
m = self.cclsq[i].nnghbrs_lsq
n = self.dim
#------------------------------------------------------------------
# Allocate LSQ matrix and the pseudo inverse, R^{-1}*Q^T.
a = np.zeros((m,n),float)
#rinvqt = np.zeros((n,m),float)
#------------------------------------------------------------------
# Build the weighted-LSQ matrix A(m,n).
#
# weight_1 * [ (x1-xi)*wxi + (y1-yi)*wyi ] = weight_1 * [ w1 - wi ]
# weight_2 * [ (x2-xi)*wxi + (y2-yi)*wyi ] = weight_2 * [ w2 - wi ]
# .
# .
# weight_m * [ (xm-xi)*wxi + (ym-yi)*wyi ] = weight_2 * [ wm - wi ]
for k, nghbr_cell in enumerate(self.cclsq[i].nghbr_lsq):
dX = nghbr_cell.centroid - cell.centroid
# note you already stored this when you implemented this
# in the mesh itself.
weight_k = 1.0/(np.linalg.norm(dX)**lsq_weight_invdis_power)
a[k,0] = weight_k*dX[0]
a[k,1] = weight_k*dX[1]
#------------------------------------------------------------------
# Perform QR factorization and compute R^{-1}*Q^T from A(m,n)
q, r = np.linalg.qr(a)
rinvqt = np.dot( np.linalg.inv(r), q.T)
#------------------------------------------------------------------
# Compute and store the LSQ coefficients: R^{-1}*Q^T*w
#
# (wx,wy) = R^{-1}*Q^T*RHS
# = sum_k (cx,cy)*(wk-wi).
for k, nghbr_cell in enumerate(self.cclsq[i].nghbr_lsq):
dX = nghbr_cell.centroid - cell.centroid
weight_k = 1.0/(np.linalg.norm(dX)**lsq_weight_invdis_power)
self.cclsq[i].cx[k] = rinvqt[ix,k] * weight_k
self.cclsq[i].cy[k] = rinvqt[iy,k] * weight_k
return
def test_lsq_coefficients(self, tol=1.e-10):
"""
Compute the gradient of w=2*x+y
to see if we get wx=2 and wy=1 correctly.
"""
verifcation_error = False
for i, cell in enumerate(self.mesh.cells):
#initialize wx and wy
wx,wy = 0.0,0.0
# (xi,yi) to be used to compute the function 2*x+y at i.
xi,yi = cell.centroid
#Loop over the vertex neighbors.
for k, nghbr_cell in enumerate(self.cclsq[i].nghbr_lsq):
#(xk,yk) to be used to compute the function 2*x+y at k.
xk,yk = nghbr_cell.centroid
# This is how we use the LSQ coefficients:
# accumulate cx*(wk-wi) and cy*(wk-wi).
wx += self.cclsq[i].cx[k] * ( (2.0*xk+yk) - (2.0*xi+yi))
wy += self.cclsq[i].cy[k] * ( (2.0*xk+yk) - (2.0*xi+yi))
if (abs(wx-2.0) > tol) or (abs(wy-1.0) > tol) :
print " wx = ", wx, " exact ux = 2.0"
print " wy = ", wy, " exact uy = 1.0"
verifcation_error = True
if verifcation_error:
print " LSQ coefficients are not correct. See above. Stop."
else:
print " Verified: LSQ coefficients are exact for a linear function."
return
#-------------------------------------------------------------------------#
# Euler solver: Explicit Unsteady Solver: Ut + Fx + Gy = S
#
# This subroutine solves an un steady problem by 2nd-order TVD-RK with a
# global time step.
#-------------------------------------------------------------------------#
def explicit_unsteady_solver(self, tfinal=1.0, dt=.01):
"""
debugging:
self.t_final = 1.0
time = 0.0
"""
time = 0.0
self.t_final = tfinal
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Physical time-stepping
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#for jj in range(1): #debugging!
while (time < self.t_final):
print time
#------------------------------------------------------------------
# Compute the residual: res(i,:)
self.compute_residual()
#------------------------------------------------------------------
# Compute the global time step, dt. One dt for all cells.
dt = self.compute_global_time_step()
#adjust time step?
#code here
#------------------------------------------------------------------
# Increment the physical time and exit if the final time is reached
time += dt #TBD dt was undefined
#-------------------------------------------------------------------
# Update the solution by 2nd-order TVD-RK.: u^n is saved as u0(:,:)
# 1. u^* = u^n - (dt/vol)*Res(u^n)
# 2. u^{n+1} = 1/2*(u^n + u^*) - 1/2*(dt/vol)*Res(u^*)
#-----------------------------
#- 1st Stage of Runge-Kutta:
#u0 = u: is solution data - conservative variables at the cell centers I think
self.u0[:] = self.u[:]
# slow test first
for i in range(self.mesh.nCells):
self.u[i,:] = self.u0[i,:] - \
(dt/self.mesh.cells[i].volume) * self.res[i,:] #This is R.K. intermediate u*.
self.w[i,:] = self.u2w( self.u[i,:] )
#-----------------------------
#- 2nd Stage of Runge-Kutta:
self.compute_residual()
for i in range(self.mesh.nCells):
self.u[i,:] = 0.5*( self.u[i,:] + self.u0[i,:] ) - \
0.5*(dt/self.mesh.cells[i].volume) * self.res[i,:]
self.w[i,:] = self.u2w( self.u[i,:] )
print(" End of Physical Time-Stepping")
print("---------------------------------------")
return
#-------------------------------------------------------------------------#
#
# compute residuals
#
#-------------------------------------------------------------------------#
#
# compute_residual: comptutes the local residual
#
#-------------------------------------------------------------------------#
def compute_residual_norm(self):
self.res_norm[:] = np.sum(np.abs(self.res)) / float(self.mesh.nCells)
return
#-------------------------------------------------------------------------#
#
# compute_residual: comptutes the residuals at cells for
# the cell-centered finite-volume discretization.
#
#-------------------------------------------------------------------------#
def compute_residual(self):
mesh = self.mesh
# Gradients of primitive variables
self.gradw1[:,:] = 0.
self.gradw2[:,:] = 0.
self.res[:,:] = 0.
self.wsn[:] = 0.0
self.gradw[:,:,:] = 0.0
#----------------------------------------------------------------------
# Compute gradients at cells
if (self.second_order): self.compute_gradients()
if (self.use_limiter): self.compute_limiter()
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Residual computation: interior faces
#----------------------------------------------------------------------
# Flux computation across internal faces (to be accumulated in res(:))
#
# v2=Left(2)
# o---o---------o face(j,:) = [i,k,v2,v1]
# . . .
# . . .
# . .normal .
# . Left .---> Right .
# . c1 . c2 .
# . . .
# o----------o----------------o
# v1=Right(1)
#
#
# 1. Extrapolate the solutions to the face-midpoint from centroids 1 and 2.
# 2. Compute the numerical flux.
# 3. Add it to the residual for 1, and subtract it from the residual for 2.
#
#----------------------------------------------------------------------
savei = 0
#print 'do interior residual'
for i,face in enumerate(mesh.faceList):
"""
#debugging:
i = self.save[0]
face = self.save[1]
"""
#for i,face in enumerate(mesh.faceList[:2]):
#TODO: make sure boundary faces are not in the
# main face list
if face.isBoundary:
pass
else:
#savei = i
adj_face = face.adjacentface
c1 = face.parentcell # Left cell of the face
c2 = adj_face.parentcell # Right cell of the face
v1 = face.nodes[0] # Left node of the face
v2 = face.nodes[1] # Right node of the face
u1 = self.u[c1.cid] #Conservative variables at c1
u2 = self.u[c2.cid] #Conservative variables at c2
self.gradw1 = self.gradw[c1.cid]
self.gradw2 = self.gradw[c2.cid]
self.unit_face_normal[:] = face.normal_vector[:]
#Face midpoint at which we compute the flux.
xm,ym = face.center
#Set limiter functions
if (self.use_limiter) :
phi1 = self.phi[c1.cid]
phi2 = self.phi[c2.cid]
else:
phi1 = 1.0
phi2 = 1.0
# Reconstruct the solution to the face midpoint and compute a numerical flux.
# (reconstruction is implemented inside "interface_flux".
#print 'i = ',i
num_flux, wave_speed = self.interface_flux(u1, u2, #<- Left/right states
self.gradw1, self.gradw2, #<- Left/right same gradients
face.normal_vector, #<- unit face normal
c1.centroid, #<- Left cell centroid
c2.centroid, #<- right cell centroid
xm, ym, #<- face midpoint
phi1, phi2, #<- Limiter functions
)
test = np.any(np.isnan(num_flux)) or np.isnan(wave_speed)
if test:
self.save = [i, face]
assert(not test), "Found a NAN in interior residual"
"""
debugging:
print u1, u2
print self.gradw1
print self.gradw2
print self.unit_face_normal
print c1.centroid
print c2.centroid
print xm,ym
print phi1,phi2
"""
#print i, num_flux, wave_speed
# Add the flux multiplied by the magnitude of the directed area vector to c1.
self.res[c1.cid,:] += num_flux * face.face_nrml_mag
self.wsn[c1.cid] += wave_speed * face.face_nrml_mag
# Subtract the flux multiplied by the magnitude of the directed area vector from c2.
# NOTE: Subtract because the outward face normal is -n for the c2.
self.res[c2.cid,:] -= num_flux * face.face_nrml_mag
self.wsn[c2.cid] += wave_speed * face.face_nrml_mag
# End of Residual computation: interior faces
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# Residual computation: boundary faces:
#
# Close the residual by looping over boundary faces and distribute a contribution
# to the corresponding cell.
# Boundary face j consists of nodes j and j+1.
#
# Interior domain /
# /
# /\ o
# / \ /
# / c1 \ / Outside the domain
# --o-------o------o
# j | j+1
# |
# v Face normal for the face j.
#
# c = bcell, the cell having the boundary face j.
#
savei = 0
#print 'do boundary residual'
for ib, bface in enumerate(self.mesh.boundaryList):
"""
ib = self.save[0]
bface = self.save[1]
"""
#Cell having a boundary face defined by the set of nodes j and j+1.
c1 = bface.parentcell
savei = ib
v1 = bface.nodes[0] # Left node of the face
v2 = bface.nodes[1] # Right node of the face
#Face midpoint at which we compute the flux.
xm,ym = bface.center
#Set limiter functions
if (self.use_limiter) :
phi1 = self.phi[c1.cid]
phi2 = 1.0
else:
phi1 = 1.0
phi2 = 1.0
u1 = self.u[c1.cid] #Conservative variables at c1
self.gradw1 = self.gradw[c1.cid]
self.unit_face_normal[:] = bface.normal_vector[:]
#---------------------------------------------------
# Get the right state (weak BC!)
#print 'ib = ',ib
self.ub = self.BC.get_right_state(xm,ym,
u1,
self.unit_face_normal,
self.bc_type[ib], #CBD (could be done): store these on the faces instead of seperate
self.ub)
self.gradw2 = self.gradw2 #<- Gradient at the right state. Give the same gradient for now.
#---------------------------------------------------
# Compute a flux at the boundary face.
#print 'ub = ',self.ub
num_flux, wave_speed = self.interface_flux(u1, self.ub, #<- Left/right states
self.gradw1, self.gradw2, #<- Left/right same gradients
self.unit_face_normal, #<- unit face normal
c1.centroid, #<- Left cell centroid
[xm, ym], #<- make up a right cell centroid
xm, ym, #<- face midpoint
phi1, phi2, #<- Limiter functions
)
test = np.any(np.isnan(self.wsn)) or np.isnan(wave_speed)
if test:
self.save = [ib, bface]
assert(not test), "Found a NAN in boundary residual"
#print ib, num_flux, wave_speed
"""
debugging:
print u1, u2
print self.gradw1
print self.gradw2
print self.unit_face_normal
print c1.centroid
print [xm, ym]
print xm,ym
print phi1,phi2
"""
#Note: No gradients available outside the domain, and use the gradient at cell c
# for the right state. This does nothing to inviscid fluxes (see below) but
# is important for viscous fluxes.
#Note: Set right centroid = (xm,ym) so that the reconstruction from the right cell
# that doesn't exist is automatically cancelled: wR=wb+gradw*(xm-xc2)=wb.
#---------------------------------------------------
# Add the boundary contributions to the residual.
self.res[c1.cid,:] += num_flux * face.face_nrml_mag
self.wsn[c1.cid] += wave_speed * face.face_nrml_mag
# no c2 on the boundary
# End of Residual computation: exterior faces
#------------------------------------------------------------------
#
#end compute_residual
#S*****************************************************************
return
#-------------------------------------------------------------------------#
#
# time stepping
#
#-------------------------------------------------------------------------#
def compute_global_time_step(self):
CFL = self.Parameters.CFL
#Initialize dt with the local time step at cell 1.
i = 1
assert(abs(self.wsn[i]) > 0.),'wsn time step initilization div by zero'
physical_time_step = CFL*self.mesh.cells[i].volume / ( 0.5*self.wsn[i] )
return physical_time_step
#-------------------------------------------------------------------------#
#
# compute w from u
# ------------------------------------------------------------------------#
# Input: u = conservative variables (rho, rho*u, rho*v, rho*E)
# Output: w = primitive variables (rho, u, v, p)
# ------------------------------------------------------------------------#
#
# Note: E = p/(gamma-1)/rho + 0.5*(u^2+v^2)
# -> p = (gamma-1)*rho*E-0.5*rho*(u^2+v^2)
#
#
#-------------------------------------------------------------------------#
def u2w(self, u):
w = np.zeros((nq),float)
iu = self.iu
iv = self.iv
ir = self.ir
ip = self.ip
w[ir] = u[0]
w[iu] = u[1]/u[0]
w[iv] = u[2]/u[0]
w[ip] = (self.gamma-1.0)*( u[3] - \
0.5*w[0]*(w[1]*w[1] + w[2]*w[2]) )
return w
#-------------------------------------------------------------------------#
#
# compute u from w
# ------------------------------------------------------------------------#
# Input: w = primitive variables (rho, u, v, p)
# Output: u = conservative variables (rho, rho*u, rho*v, rho*E)
# ------------------------------------------------------------------------#
#
# Note: E = p/(gamma-1)/rho + 0.5*(u^2+v^2)
#
#-------------------------------------------------------------------------#
def w2u(self, w):
u = np.zeros((nq),float)
gamma = self.gamma
iu = self.iu
iv = self.iv
ir = self.ir
ip = self.ip
u[0] = w[ir]
u[1] = w[ir]*w[iu]
u[2] = w[ir]*w[iv]
u[3] = w[ip]/(gamma-1.0)+0.5*w[ir]*(w[iu]*w[iu]+w[iv]*w[iv])
return u
#**************************************************************************
# Compute limiter functions
#
#**************************************************************************
def compute_limiter(self):
# loop cells
for cell in self.mesh.cells:
i = cell.cid
# loop primitive variables
for ivar in range(nq):
#----------------------------------------------------
# find the min and max values
# Initialize them with the solution at the current cell.
# which could be min or max.
wmin = self.w[cell.cid,ivar]
wmax = self.w[cell.cid,ivar]
#Loop over LSQ neighbors and find min and max
for nghbr_cell in self.cclsq[i].nghbr_lsq:
wmin = min(wmin, self.w[nghbr_cell.cid,ivar])
wmax = max(wmax, self.w[nghbr_cell.cid,ivar])
#----------------------------------------------------
# Compute phi to enforce maximum principle at vertices (MLP)
xc,yc = self.mesh.cells[i].centroid
# Loop over vertices of the cell i: 3 or 4 vertices for tria or quad.
for k,iv in enumerate(self.mesh.cells[i].nodes):
xp,yp = iv.vector
# Linear reconstruction to the vertex k
#diffx = xp-xc
#diffy = yp-yc
wf = self.w[i,ivar] + \
self.gradw[i,ivar,0]*(xp-xc) + \
self.gradw[i,ivar,1]*(yp-yc)
# compute dw^-.
dwm = wf - self.w[i,ivar]
# compute dw^+.
if ( dwm > 0.0 ):
dwp = wmax - self.w[i,ivar]
else:
dwp = wmin - self.w[i,ivar]
# Increase magnitude by 'limiter_beps' without changin sign.
# dwm = sign(one,dwm)*(abs(dwm) + limiter_beps)
# Note: We always have dwm*dwp >= 0 by the above choice! So, r=a/b>0 always
# Limiter function: Venkat limiter
phi_vertex = self.vk_limiter(dwp, dwm, self.mesh.cells[i].volume)
# Keep the minimum over the control points (vertices)
if (k==0):
phi_vertex_min = phi_vertex
else:
phi_vertex_min = min(phi_vertex_min, phi_vertex)
#end of vertex loop
# Keep the minimum over variables.
if (ivar==0) :
phi_var_min = phi_vertex_min
else:
phi_var_min = min(phi_var_min, phi_vertex_min)
#end primative variable loop
#Set the minimum phi over the control points and over the variables to be
#our limiter function. We'll use it for all variables to be on a safe side.
self.phi[i] = phi_var_min
# end cell loop
return
def vk_limiter(self, a, b, vol):
"""
***********************************************************************
* -- Venkat Limiter Function--
*
* 'Convergence to Steady State Solutions of the Euler Equations on Unstructured
* Grids with Limiters', V. Venkatakrishnan, JCP 118, 120-130, 1995.
*
* The limiter has been implemented in such a way that the difference, b, is
* limited in the form: b -> vk_limiter * b.
*
* ---------------------------------------------------------------------
* Input: a, b : two differences
*
* Output: vk_limiter : to be used as b -> vk_limiter * b.
* ---------------------------------------------------------------------
*
***********************************************************************
"""
two = 2.0
half = 0.5
Kp = 5.0 #<<<<< Adjustable parameter K
diameter = two*(vol/pi)**half
eps2 = (Kp*diameter)**3
vk_limiter = ( (a**2 + eps2) + two*b*a ) / \
(a**2 + two*b**2 + a*b + eps2)
return vk_limiter
# survey of gradient reconstruction methods
# https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20140011550.pdf
#
# compose the finite difference 1st order gradient
# from each element of the stencil.
#
# There are many more cells in the neighborhood than are needed to
# compute a gradient, so write the overdetermined
# system Ax=b
#
# where
#
# A is the matrix of spatial differences between noe centers
# (in this case an Ncells x 2D matrix)
#
# B is the vector of primative variable differences, phi_i - phi_o
# between the values at surrounding nodes, and the node in question
#
# x is just the finite difference we seek:
# [ d phi_o / d x , d phi_o / d y ] = ( A.T A ).inv A.T B
#
# Q: Why are we doing this?
#
# A: to extrapolate solutions linearly from the cell centroids
# to the faces (face midpoints)
#
# Q: Why are we doing that?
#
# A: This slope will allow us to reconstruct
# the fluxes at the cell boundaries in second order
# accurate fashion. (we will use limiters to achieve monotonicity)
#
# Then bob's your uncle, solve the Riemann problem
#
def compute_gradients(self):
"""
#*******************************************************************************
# Compute the LSQ gradients in all cells for all primitive variables.
#
# - Compute the gradient by [wx,wy] = sum_nghbrs [cx,cy]*(w_nghbr - wj),
# where [cx,cy] are the LSQ coefficients.
#
#*******************************************************************************
"""
#init gradient to zero
self.gradw[:,:,:] = 0.
# compute gradients for primative variables
for ivar in range(nq):
#compute gradients in all cells
for i, cell in enumerate(self.mesh.cells):
ci = cell.cid
wi = self.w[ci, ivar] #solution at this cell
#loop nieghbors
for k in range(self.cclsq[ci].nnghbrs_lsq):
nghbr_cell = self.cclsq[ci].nghbr_lsq[k]
wk = self.w[nghbr_cell.cid,ivar] #Solution at the neighbor cell.
self.gradw[ci,ivar,0] = self.gradw[ci,ivar,0] + self.cclsq[ci].cx[k]*(wk-wi)
self.gradw[ci,ivar,1] = self.gradw[ci,ivar,1] + self.cclsq[ci].cy[k]*(wk-wi)
return
def interface_flux(self,
u1, u2,
gradw1, gradw2,
n12, # Directed area vector (unit vector)
C1, # left centroid
C2, # right centroid
xm, ym, # face midpoint
phi1, phi2, # limiter
):
"""
outputs:
num_flux, # numerical flux (output)
wsn # max wave speed at face
#interior
gradw1 = self.gradw1
gradw2 = self.gradw2
n12 = face.normal_vector
C1 = c1.centroid
C2 = c2.centroid
#boundary
gradw1 = self.gradw1
gradw2 = self.gradw2
n12 = face.normal_vector
C1 = c1.centroid
C2 = [xm, ym]
"""
xc1, yc1 = C1
xc2, yc2 = C2
zero = 0.0
inviscid_flux = roe
# convert consertative to primitive variables
# at centroids.
w1 = self.u2w(u1)
w2 = self.u2w(u2)
# Linear Reconstruction in the primitive variables
# primitive variables reconstructed to the face wL, WR:
#Cell 1 centroid to the face midpoint:
wL = w1 + phi1 * (gradw1[:,0]*(xm-xc1) + gradw1[:,1]*(ym-yc1))
#Cell 2 centroid to the face midpoint:
wR = w2 + phi2 * ( gradw2[:,0]*(xm-xc2) + gradw2[:,1]*(ym-yc2) )
# Store the reconstructed solutions as conservative variables.
# Just becasue flux functions use conservative variables.
uL = self.w2u(wL) #conservative variables computed from wL and wR.
uR = self.w2u(wR) #conservative variables computed from wL and wR.
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Define 3D solution arrays and a 3D face normal.
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#Left state: 3D <- 2D
self.uL3d[0] = uL[0]
self.uL3d[1] = uL[1]
self.uL3d[2] = uL[2]
self.uL3d[3] = zero
self.uL3d[4] = uL[3]
#Right state: 3D <- 2D
self.uR3d[0] = uR[0]
self.uR3d[1] = uR[1]
self.uR3d[2] = uR[2]
self.uR3d[3] = zero
self.uR3d[4] = uR[3]
#Normal vector
self.n12_3d[0] = n12[0]
self.n12_3d[1] = n12[1]
self.n12_3d[2] = zero
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Compute inviscid flux by 3D flux subroutines
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#------------------------------------------------------------
# (1) Roe flux
#------------------------------------------------------------
#return inviscid_flux(nx,gamma,uL,uR,f,fL,fR)
self.num_flux3d, wsn = inviscid_flux(self.uL3d, # conservative u's left cell off the face
self.uR3d, # conservative u's right cell off the face
self.n12_3d, #normal vector
self.num_flux3d, #numerical flux
self.wsn, #wave speed
self.gamma)
self.num_flux[0] = self.num_flux3d[0] # rho flux'
self.num_flux[1] = self.num_flux3d[1] # mmtm-x flux'
self.num_flux[2] = self.num_flux3d[2] # mmtm-y flux'
self.num_flux[3] = self.num_flux3d[4] # energy flux
return self.num_flux[:], wsn
def initial_condition_vortex(self, vortex_strength=15.):
"""
#*******************************************************************************
# Set the initial solution for the inviscid vortex test case.
#
# We initialize the solution with the exact solution.
#
# Note: The grid must be generated in the square domain defined by
#
# [x,y] = [-20,10]x[-20,10]
#
# Initially, the vortex is centered at (x,y)=(-10,-10), and will be
# convected to the origin at the final time t=5.0.
#
#*******************************************************************************
"""
print( "setting: initial_condition_vortex")
#GridLen = 1.0
x0 = -10.0 #0.5*GridLen
y0 = -5.0 #0.5*GridLen
K = vortex_strength
alpha = 1.0
gamma = self.gamma
frac = 2.
# Set free stream values (the input Mach number is not used in this test).
self.rho_inf = 1.0
self.u_inf = 2.0
self.v_inf = 2.0
self.p_inf = 1.0/gamma
# Note: Speed of sound a_inf is sqrt(gamma*p_inf/rho_inf) = 1.0.
for i, cell in enumerate(self.mesh.cells):
x = cell.centroid[0] - x0
y = cell.centroid[1] - y0
r = np.sqrt(x**2 + y**2)
self.w_initial[self.iu] = self.u_inf - K/(frac*pi)*y*np.exp(alpha*0.5*(1.-r**2.))
self.w_initial[self.iv] = self.v_inf + K/(frac*pi)*x*np.exp(alpha*0.5*(1.-r**2.))
temperature = 1.0 - K*(gamma-1.0)/(8.0*alpha*pi**2.)*np.exp(alpha*(1.-r**2.))
self.w_initial[self.ir] = self.rho_inf*temperature**( 1.0/(gamma-1.0)) #Density
self.w_initial[self.ip] = self.p_inf *temperature**(gamma/(gamma-1.0)) #Pressure
#Store the initial solution
self.w[i,:] = self.w_initial[:]
# Compute and store conservative variables
self.u[i,:] = self.w2u( self.w[i,:] )
return
def write_solution(self):
self.write_flow_at_cell_centers()
return
def plot_solution(self):
self.plot_flow_at_cell_centers()
return
def write_flow_at_cell_centers(self):
self.solution_dir = '../pics/solution'
location = []
#lx = []
#ly = []
u = []
w = []
for i, cell in enumerate(self.mesh.cells):
#lx.append(str(cell.centroid[0]))
#ly.append(str(cell.centroid[1]))
location.append(' '.join(
[str(el) for el in cell.centroid])+' \n' )
u.append(' '.join(
[ str(el) for el in self.u[cell.cid]])+' \n' )
w.append(' '.join(
[str(el) for el in self.w[cell.cid]])+' \n' )
FT.WriteLines(directory=self.solution_dir,
filename='cellcenters.dat',
lines = location)
#conservative solution
FT.WriteLines(directory=self.solution_dir,
filename='u_at_cellcenters.dat',
lines = u)
#primative variables:
FT.WriteLines(directory=self.solution_dir,
filename='w_at_cellcenters.dat',
lines = w)
return
def plot_flow_at_cell_centers(self):
coords_ = []
for i, cell in enumerate(self.mesh.cells):
coords_.append(cell.centroid)
coords_ = np.asarray(coords_)
u_ = self.u
w_ = self.w
#--------------------------------------------------------------
#
# plot primative variables u,v
Mc = np.sqrt(pow(w_[:,1], 2) + pow(w_[:,2], 2))
figure()
# Q = quiver( coords_[:,0],coords_[:,1],
# w_[:,0], w_[:,1], Mc, units='x', pivot='tip',width=.005, scale=3.3/.15)
Q = quiver( coords_[:,0],coords_[:,1],
w_[:,1], w_[:,2], Mc, units='x', pivot='tip',scale=1./.15)
#--------------------------------------------------------------
#
# plot conservative u,v
Mu = np.sqrt(pow(u_[:,1], 2) + pow(u_[:,2], 2))
figure()
# Q = quiver( coords_[:,0],coords_[:,1],
# u_[:,0], u_[:,1], Mu, units='x', pivot='tip',width=.005, scale=3.3/.15)
Q = quiver( coords_[:,0],coords_[:,1],
u_[:,1], u_[:,2], Mc,
units='xy', angles='xy', pivot='tail',scale=1./.15)
# plot conservative rho
#--------------------------------------------------------------
# plot density and pressure
fig, (ax1, ax2) = plt.subplots(nrows=2)
#--------------------------------------------------------------
# plot density
# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.
# Create grid values first.
npts = len(coords_)
ngridx = self.mesh.m
ngridy = self.mesh.n
xi = np.linspace(self.mesh.xb, self.mesh.xe, ngridx)
yi = np.linspace(self.mesh.yb, self.mesh.ye, ngridy)
# Perform linear interpolation of the data (x,y)
# on a grid defined by (xi,yi)
triang = tri.Triangulation(coords_[:,0], coords_[:,1])
interpolator = tri.LinearTriInterpolator(triang, u_[:,0])
Xi, Yi = np.meshgrid(xi, yi)
density = interpolator(Xi, Yi)
# Note that scipy.interpolate provides means to interpolate data on a grid
# as well. The following would be an alternative to the four lines above:
#from scipy.interpolate import griddata
#zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='linear')
ax1.contour(xi, yi, density, levels=14, linewidths=0.5, colors='k')
#cntr1 = ax1.contourf(xi, yi, zi, levels=14, cmap="RdBu_r")
cntr1 = ax1.contourf(xi, yi, density, cmap="RdBu_r")
fig.colorbar(cntr1, ax=ax1)
#ax1.plot(coords_[:,0], coords_[:,1], 'ko', ms=3)
#ax1.set(xlim=(-2, 2), ylim=(-2, 2))
ax1.set_title('Density (%d points, %d grid points)' %
(npts, ngridx * ngridy))
#
#--------------------------------------------------------------
# plot pressure
# Perform linear interpolation of the data (x,y)
# on a grid defined by (xi,yi)
# triang = tri.Triangulation(coords_[:,0], coords_[:,1])
# interpolator = tri.LinearTriInterpolator(triang, u_[:,3])
# Xi, Yi = np.meshgrid(xi, yi)
# press = interpolator(Xi, Yi)
# ----------
# Tricontour
# ----------
# Directly supply the unordered, irregularly spaced coordinates
# to tricontour.
ax2.tricontour(coords_[:,0], coords_[:,1], u_[:,3],
levels=14, linewidths=0.5, colors='k')
cntr2 = ax2.tricontourf(coords_[:,0], coords_[:,1], u_[:,3],
cmap="RdBu_r") #levels=14, cmap="RdBu_r")
fig.colorbar(cntr2, ax=ax2)
#ax2.plot(coords_[:,0], coords_[:,1], 'ko', ms=3)
#ax2.set(xlim=(-2, 2), ylim=(-2, 2))
ax2.set_title('Pressure (%d points)' % npts)
plt.subplots_adjust(hspace=0.5)
plt.show()
return
def plot_flow_at_cell_centers_from_file(self):
self.solution_dir = '../pics/solution'
coords_ = np.loadtxt(self.solution_dir+'/cellcenters.dat')
u_ = np.loadtxt(self.solution_dir+'/u_at_cellcenters.dat')
w_ = np.loadtxt(self.solution_dir+'/w_at_cellcenters.dat')
Mc = np.sqrt(pow(w_[:,0], 2) + pow(w_[:,0], 2))
figure()
Q = quiver( coords_[:,0],coords_[:,1],
w_[:,0], w_[:,1], Mc, units='x', pivot='tip',width=.005, scale=3.3/.15)
Mu = np.sqrt(pow(u_[:,0], 2) + pow(u_[:,0], 2))
figure()
Q = quiver( coords_[:,0],coords_[:,1],
u_[:,0], u_[:,1], Mu, units='x', pivot='tip',width=.005, scale=3.3/.15)
return
class FlowState(object):
def __init__(self, rho_inf=1., u_inf=1., v_inf=1., p_inf=1.):
self.rho_inf = rho_inf
self.u_inf = u_inf
self.v_inf = v_inf
self.p_inf = p_inf
return
def show_LSQ_grad_area_plots(solver):
for cc in solver.cclsq[55:60]:
cc.plot_lsq_reconstruction()
return
def show_one_tri_cell(solver):
cc = solver.cclsq[57]
cc.plot_lsq_reconstruction()
cell = cc.cell
cell.plot_cell()
return
def show_ont_quad_cell():
ssolve = Solvers(mesh = gd)
cc = ssolve.cclsq[57]
cc.plot_lsq_reconstruction()
cell = cc.cell
cell.plot_cell()
return
class TestInviscidVortex(object):
def __init__(self):
# up a level
uplevel = os.path.join(os.path.dirname( os.getcwd() ))
path2vortex = uplevel+'\\cases\case_unsteady_vortex'
self.DHandler = DataHandler(project_name = 'vortex',
path_to_inputs_folder = path2vortex)
pass
if __name__ == '__main__':
# gd = Grid(type_='rect',m=10,n=10,
# winding='ccw')
mesh = Grid(type_='quad',m=42,n=21,
winding='ccw')
cell = mesh.cellList[44]
face = cell.faces[0]
#cell.plot_cell()
self = Solvers(mesh = mesh)
#cc = self.cclsq[33]
#cc.plot_lsq_reconstruction()
#----------------------------
# plot LSQ gradient stencils
#show_LSQ_grad_area_plots(self)
# cc = ssolve.cclsq[57]
# cc.plot_lsq_reconstruction()
# cell = cc.cell
# cell.plot_cell()
test_vortex = TestInviscidVortex()
#"""
self.solver_boot(flowtype = 'vortex')
#self.solver_solve( tfinal=.005, dt=.01)
self.solver_solve( tfinal=.1, dt=.01)
self.plot_solution()
#"""
|
{"hexsha": "2f25693e4278f71dafa76fac5488a105b1780180", "size": 52973, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Solvers.py", "max_stars_repo_name": "LukeMcCulloch/PyCFD", "max_stars_repo_head_hexsha": "6720e6575e25f8c274ef591d6c215de90a740935", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-04T15:42:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-04T15:42:15.000Z", "max_issues_repo_path": "src/Solvers.py", "max_issues_repo_name": "LukeMcCulloch/PyCFD", "max_issues_repo_head_hexsha": "6720e6575e25f8c274ef591d6c215de90a740935", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Solvers.py", "max_forks_repo_name": "LukeMcCulloch/PyCFD", "max_forks_repo_head_hexsha": "6720e6575e25f8c274ef591d6c215de90a740935", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.082674335, "max_line_length": 120, "alphanum_fraction": 0.4176656032, "include": true, "reason": "import numpy,from scipy", "num_tokens": 11702}
|
from zoopt import Objective
from zoopt import Parameter
from zoopt import Dimension
from zoopt import Solution
import numpy as np
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0 * np.pi * (i - bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
class TestObjective(object):
def test_parameter_set(self):
par = Parameter(budget=1000, noise_handling=True, suppression=True)
assert 1
def test_eval(self):
dim = 100
obj = Objective(func=ackley, dim=Dimension(dim, [[-1, 1]] * dim, [True] * dim))
sol = Solution(x=[0.2] * dim)
res = obj.eval(sol)
assert abs(res) <= 1e-7
def test_resample(self):
dim = 100
obj = Objective(func=ackley, dim=Dimension(dim, [[-1, 1]] * dim, [True] * dim))
sol = Solution(x=[0.2] * dim)
res = obj.eval(sol)
obj.resample(sol, 3)
assert abs(sol.get_value()) <= 1e-7
sol.set_value(0)
obj.resample_func(sol, 3)
assert abs(sol.get_value()) <= 1e-7
def test_history_best_so_far(self):
input_data = [0.5, 0.6, 0.4, 0.7, 0.3, 0.2]
output_data = [0.5, 0.5, 0.4, 0.4, 0.3, 0.2]
obj = Objective()
obj.set_history(input_data)
best_history = obj.get_history_bestsofar()
assert best_history == output_data
|
{"hexsha": "ea7ff81f8cb60d89c03666e17d4fb78bb8f61527", "size": 1566, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_objective.py", "max_stars_repo_name": "HowardHu97/ZOOpt", "max_stars_repo_head_hexsha": "01568e8e6b0e65ac310d362af2da5245ac375e53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 403, "max_stars_repo_stars_event_min_datetime": "2017-04-19T03:01:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T04:31:27.000Z", "max_issues_repo_path": "test/test_objective.py", "max_issues_repo_name": "HowardHu97/ZOOpt", "max_issues_repo_head_hexsha": "01568e8e6b0e65ac310d362af2da5245ac375e53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2017-05-07T10:09:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-18T11:33:00.000Z", "max_forks_repo_path": "test/test_objective.py", "max_forks_repo_name": "HowardHu97/ZOOpt", "max_forks_repo_head_hexsha": "01568e8e6b0e65ac310d362af2da5245ac375e53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 97, "max_forks_repo_forks_event_min_datetime": "2017-04-19T03:52:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T13:05:02.000Z", "avg_line_length": 30.1153846154, "max_line_length": 87, "alphanum_fraction": 0.5842911877, "include": true, "reason": "import numpy", "num_tokens": 487}
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
def read_file(f):
best_result = (-1, -1, [])
ponctuations = []
with open(f, 'r') as f:
for line in f.readlines():
line = line.split(',')
ponct = float(line[0])
ponctuations.append(ponct)
if(ponct > best_result[0]):
best_result = (ponct, float(line[1]), [int(i)+1 for i in line[2:]])
return ponctuations, best_result
def build_histogram(raw_data):
histogram = []
data = sorted(raw_data)
for i in set(data):
histogram.append((i, data.count(i)))
return histogram
rd_data, rd_best = read_file('data/random-multistart_niteroi.txt')
gr_data, gr_best = read_file('data/greedy_niteroi.txt')
ag_data, ag_best = read_file('data/adaptive-greedy_niteroi.txt')
rd_hist = build_histogram(rd_data)
gr_hist = build_histogram(gr_data)
ag_hist = build_histogram(ag_data)
fig, ax = plt.subplots()
# ax.axvline(21.6129, color='k', alpha=0.5)
# plt.bar(21.6129, 1699, 0.1, color='k')
for rd_b, gr_b, ag_b, in zip(rd_hist, gr_hist, ag_hist):
if(rd_b[0] == gr_b[0] and rd_b[0] == ag_b[0]):
if(rd_b[1] > gr_b[1] and rd_b > ag_b[1]):
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
if(gr_b[1] > ag_b[1]):
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
else:
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
elif(rd_b[1] > gr_b[1] and rd_b[1] < ag_b[1]):
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
elif(gr_b[1] < ag_b[1]):
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
else:
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
elif(gr_b[0] == ag_b[0]):
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
if(gr_b[1] > ag_b[1]):
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
else:
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
else:
ax.bar(rd_b[0], rd_b[1], 0.1, color='b')
ax.bar(gr_b[0], gr_b[1], 0.1, color='r')
ax.bar(ag_b[0], ag_b[1], 0.1, color='g')
ax.set_xlabel('Solution Value')
ax.set_ylabel('Occurrence over 10000 trials')
# plt.xticks(np.arange(0, 23, 1))
# plt.yticks(np.arange(0, 1800, 100))
blue_patch = mpatches.Patch(color='blue', label='Random heuristic')
red_patch = mpatches.Patch(color='red', label='Greedy heuristic')
green_patch = mpatches.Patch(color='green', label='Adaptive greedy heuristic')
gray_patch = mpatches.Patch(color='gray', label='Best known solution value')
plt.legend(handles=[blue_patch, red_patch, green_patch, gray_patch])
print(rd_best)
print(gr_best)
print(ag_best)
plt.savefig("executions_niteroi.pdf", bbox_inches='tight')
plt.show()
|
{"hexsha": "1ed9b2cd2eebd5dcca1071e753c9330b946c3fc0", "size": 3296, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotter/plot.py", "max_stars_repo_name": "vitornl/pokemongo-raid", "max_stars_repo_head_hexsha": "67a35de0c67c04a0dca78a8767db4f9da8769e51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-13T15:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-13T15:23:41.000Z", "max_issues_repo_path": "plotter/plot.py", "max_issues_repo_name": "vitornl/pokemongo-raid", "max_issues_repo_head_hexsha": "67a35de0c67c04a0dca78a8767db4f9da8769e51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotter/plot.py", "max_forks_repo_name": "vitornl/pokemongo-raid", "max_forks_repo_head_hexsha": "67a35de0c67c04a0dca78a8767db4f9da8769e51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0638297872, "max_line_length": 83, "alphanum_fraction": 0.5661407767, "include": true, "reason": "import numpy", "num_tokens": 1108}
|
import re
import sys
sys.path.append('..')
import numpy as np
import scipy.special
import matplotlib.pyplot as plt
import matplotlib.colors
import palettable
import pandas as pd
import glob
import os.path
from lib import *
from lib.analytical import *
from lib.fitting import *
def growthlaw(T, d, t0, gamma):
return (-d*(T-t0) + np.log((np.exp(d * T)-1)/(np.exp(d * t0)-1))/(1 + gamma))
theta = 1e5
# C0 does not matter as it's normalized away before subsampling
C0 = 1.0
d = 0.2
gamma = 0.1
sigma0 = 0.0
sample_size = 5e5
sigma = 0.08**.5
print('exponent', 1+(d*gamma/(1+gamma))/sigma**2)
metapath = 'data/meta.csv'
if not os.path.exists(metapath):
Ts = np.random.uniform(0.0, 80.0, 500)
pd.DataFrame.from_dict(dict(Age=Ts)).to_csv(metapath)
else:
df = pd.read_csv(metapath, index_col=0)
Ts = list(df['Age'])
for i, T in enumerate(Ts):
outpath = 'data/ff_%g.csv.gz'%i
if not os.path.exists(outpath):
print(i)
# draw initial times
ts = np.random.uniform(low=0.0, high=T, size=int(theta*T))
# draw initial size
logsizes = np.log(C0)
if sigma0 > 0:
logsizes += np.random.normal(size=len(ts))*sigma0 - sigma0/2
# calculate deterministic dynamics
logsizes += growthlaw(T, d, ts, gamma)
# draw fluctuating growth
variance = 2*sigma**2*(T-ts)
logsizes += np.random.normal(size=len(ts))*variance**.5 -variance/2
sizes = np.exp(logsizes)
# subsample
sizes_sub = np.random.poisson(lam=sample_size * sizes/np.sum(sizes))
mask = sizes_sub>0
sizes_sub = sizes_sub[mask]
ts_sub = ts[mask]
pd.DataFrame.from_dict(dict(Age=ts_sub, counts=sizes_sub)).to_csv(outpath)
|
{"hexsha": "fcecc28f19db26347d00dc4b19b4eeb85b28f91f", "size": 1744, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/model_growth_and_fluctuations/run_sampling.py", "max_stars_repo_name": "andim/paper-tcellimprint", "max_stars_repo_head_hexsha": "e89605e51014fa3f347f96bab3d3d84c2b013a2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-28T10:47:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T20:07:21.000Z", "max_issues_repo_path": "code/model_growth_and_fluctuations/run_sampling.py", "max_issues_repo_name": "andim/paper-tcellimprint", "max_issues_repo_head_hexsha": "e89605e51014fa3f347f96bab3d3d84c2b013a2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/model_growth_and_fluctuations/run_sampling.py", "max_forks_repo_name": "andim/paper-tcellimprint", "max_forks_repo_head_hexsha": "e89605e51014fa3f347f96bab3d3d84c2b013a2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1290322581, "max_line_length": 82, "alphanum_fraction": 0.6393348624, "include": true, "reason": "import numpy,import scipy", "num_tokens": 530}
|
import pytest
import numpy as np
import toppra
import toppra.constraint as constraint
@pytest.fixture(params=[(0, 0)])
def vel_accel_robustaccel(request):
"Velocity + Acceleration + Robust Acceleration constraint"
dtype_a, dtype_ra = request.param
vlims = np.array([[-1, 1], [-1, 2], [-1, 4]], dtype=float)
alims = np.array([[-1, 1], [-1, 2], [-1, 4]], dtype=float)
vel_cnst = constraint.JointVelocityConstraint(vlims)
accl_cnst = constraint.JointAccelerationConstraint(alims, dtype_a)
robust_accl_cnst = constraint.RobustLinearConstraint(
accl_cnst, [0.5, 0.1, 2.0], dtype_ra)
yield vel_cnst, accl_cnst, robust_accl_cnst
@pytest.fixture
def path():
np.random.seed(1)
path = toppra.SplineInterpolator(np.linspace(0, 1, 5), np.random.randn(5, 3))
yield path
|
{"hexsha": "0d969f5f1fe9e35fc1bc366b139381680ea5a617", "size": 815, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tests/solverwrapper/conftest.py", "max_stars_repo_name": "stevegolton/toppra", "max_stars_repo_head_hexsha": "846e2a7f5b87e0e1884b244b07d5fd661edcd9bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 342, "max_stars_repo_stars_event_min_datetime": "2017-07-26T17:37:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T19:50:27.000Z", "max_issues_repo_path": "tests/tests/solverwrapper/conftest.py", "max_issues_repo_name": "stevegolton/toppra", "max_issues_repo_head_hexsha": "846e2a7f5b87e0e1884b244b07d5fd661edcd9bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 151, "max_issues_repo_issues_event_min_datetime": "2017-11-30T06:14:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T02:06:08.000Z", "max_forks_repo_path": "tests/tests/solverwrapper/conftest.py", "max_forks_repo_name": "stevegolton/toppra", "max_forks_repo_head_hexsha": "846e2a7f5b87e0e1884b244b07d5fd661edcd9bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 134, "max_forks_repo_forks_event_min_datetime": "2017-08-18T21:35:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T03:43:08.000Z", "avg_line_length": 30.1851851852, "max_line_length": 81, "alphanum_fraction": 0.6957055215, "include": true, "reason": "import numpy", "num_tokens": 249}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# indexhandlers.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Apr 2018
'''
These are Tornado handlers for the AJAX actions.
'''
####################
## SYSTEM IMPORTS ##
####################
import logging
import json
from datetime import datetime
import numpy as np
# for generating encrypted token information
from cryptography.fernet import Fernet
class FrontendEncoder(json.JSONEncoder):
'''
This handles encoding weird things.
'''
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, complex):
return (obj.real, obj.imag)
elif (isinstance(obj, (float, np.float64, np.float_)) and
not np.isfinite(obj)):
return None
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj)
else:
return json.JSONEncoder.default(self, obj)
# this replaces the default encoder and makes it so Tornado will do the right
# thing when it converts dicts to JSON when a
# tornado.web.RequestHandler.write(dict) is called.
json._default_encoder = FrontendEncoder()
#############
## LOGGING ##
#############
# get a logger
LOGGER = logging.getLogger(__name__)
#####################
## TORNADO IMPORTS ##
#####################
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.escape import xhtml_escape
from tornado import web
###################
## LOCAL IMPORTS ##
###################
from .basehandler import BaseHandler
from .actionworkers import (
worker_get_object,
worker_get_objects,
worker_insert_object_comments,
)
#####################
## OBJECT HANDLERS ##
#####################
class ObjectListHandler(BaseHandler):
'''
This handles the /api/list-objects endpoint.
'''
def initialize(self,
currentdir,
templatepath,
assetpath,
executor,
basedir,
siteinfo,
authnzerver,
session_expiry,
fernetkey,
ratelimit,
cachedir):
'''
handles initial setup.
'''
self.currentdir = currentdir
self.templatepath = templatepath
self.assetpath = assetpath
self.executor = executor
self.basedir = basedir
self.siteinfo = siteinfo
self.authnzerver = authnzerver
self.session_expiry = session_expiry
self.fernetkey = fernetkey
self.ferneter = Fernet(fernetkey)
self.httpclient = AsyncHTTPClient(force_instance=True)
self.ratelimit = ratelimit
self.cachedir = cachedir
@gen.coroutine
def get(self):
'''This handles GET requests to the /api/list-objects endpoint.
Parameters
----------
review_status : str, optional, default = 'all'
Sets the type of list retrieval:
- 'all' -> all objects
- 'complete-good' -> objects that have at least 2 'good' votes
- 'complete-bad' -> objects that have at least 2 'bad' votes
- 'incomplete' -> objects that don't have 2 votes either way
- 'self-complete-good' -> this user's voted objects good-complete
- 'self-complete-bad' -> this user's voted objects bad-complete
- 'self-incomplete' -> this user's voted objects incomplete
- 'other-incomplete' -> other users' voted objects incomplete
page : int, optional, default = 0
The page number to retrieve.
'''
# check if we're actually logged in
if not self.current_user:
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# if the current user is anonymous or locked, ignore their request
if self.current_user and self.current_user['user_role'] in ('anonymous',
'locked'):
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# otherwise, go ahead and process the request
try:
# parse the args
review_status = xhtml_escape(
self.get_argument('review_status','all')
)
if review_status not in ('all',
'incomplete',
'complete-good',
'complete-bad',
'self-incomplete',
'self-complete-good',
'self-complete-bad',
'other-incomplete'):
raise ValueError("Unknown review status requested: '%s'" %
review_status)
keytype = xhtml_escape(self.get_argument('keytype', 'start'))
keyid = int(
xhtml_escape(self.get_argument('keyid', '1'))
)
max_objects = self.siteinfo['rows_per_page']
if keytype.strip() == 'start':
objectlist_info = yield self.executor.submit(
worker_get_objects,
review_status=review_status,
userid=self.current_user['user_id'],
start_keyid=keyid,
end_keyid=None,
max_objects=max_objects,
)
elif keytype.strip() == 'end':
objectlist_info = yield self.executor.submit(
worker_get_objects,
review_status=review_status,
userid=self.current_user['user_id'],
start_keyid=None,
end_keyid=keyid,
max_objects=max_objects,
)
else:
objectlist_info = yield self.executor.submit(
worker_get_objects,
review_status=review_status,
userid=self.current_user['user_id'],
start_keyid=keyid,
end_keyid=None,
max_objects=max_objects,
)
# render the result
if objectlist_info is not None:
retdict = {'status':'ok',
'message':'objectlist OK',
'result':objectlist_info}
else:
retdict = {'status':'failed',
'message':"Unable to retrieve object list.",
'result':None}
self.set_status(404)
self.write(retdict)
self.finish()
except Exception:
LOGGER.exception('Failed to retrieve the object list.')
self.set_status(400)
retdict = {'status':'failed',
'message':'Invalid request for object list.',
'result':None}
self.write(retdict)
self.finish()
class LoadObjectHandler(BaseHandler):
'''This handles the /api/load-object endpoint.
'''
def initialize(self,
currentdir,
templatepath,
assetpath,
executor,
basedir,
siteinfo,
authnzerver,
session_expiry,
fernetkey,
ratelimit,
cachedir):
'''
handles initial setup.
'''
self.currentdir = currentdir
self.templatepath = templatepath
self.assetpath = assetpath
self.executor = executor
self.basedir = basedir
self.siteinfo = siteinfo
self.authnzerver = authnzerver
self.session_expiry = session_expiry
self.fernetkey = fernetkey
self.ferneter = Fernet(fernetkey)
self.httpclient = AsyncHTTPClient(force_instance=True)
self.ratelimit = ratelimit
self.cachedir = cachedir
@gen.coroutine
def get(self, objectid):
'''This handles GET requests to the /api/load-object/<index> endpoint.
Gets catalog and comment info, plots the object if not already plotted,
and then returns JSON with everything.
'''
# check if we're actually logged in
if not self.current_user:
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# if the current user is anonymous or locked, ignore their request
if self.current_user and self.current_user['user_role'] in ('anonymous',
'locked'):
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# otherwise, go ahead and process the request
try:
objindex = int(xhtml_escape(objectid))
if objindex < 0:
objindex = 0
# get the object information
objectinfo = yield self.executor.submit(
worker_get_object,
self.current_user['user_id'],
objindex,
self.basedir,
)
if objectinfo is not None:
retdict = {'status':'ok',
'message':'object found OK',
'result':objectinfo}
else:
retdict = {'status':'failed',
'message':"Object with specified ID not found.",
'result':None}
self.set_status(404)
self.write(retdict)
self.finish()
except Exception:
LOGGER.exception('failed to get requested object ID: %r' % objectid)
self.set_status(400)
retdict = {'status':'failed',
'message':'Invalid request for object ID',
'result':None}
self.write(retdict)
self.finish()
class SaveObjectHandler(BaseHandler):
'''This handles the /api/save-object/<objectid> endpoint.
'''
def initialize(self,
currentdir,
templatepath,
assetpath,
executor,
basedir,
siteinfo,
authnzerver,
session_expiry,
fernetkey,
ratelimit,
cachedir):
'''
handles initial setup.
'''
self.currentdir = currentdir
self.templatepath = templatepath
self.assetpath = assetpath
self.executor = executor
self.basedir = basedir
self.siteinfo = siteinfo
self.authnzerver = authnzerver
self.session_expiry = session_expiry
self.fernetkey = fernetkey
self.ferneter = Fernet(fernetkey)
self.httpclient = AsyncHTTPClient(force_instance=True)
self.ratelimit = ratelimit
self.cachedir = cachedir
@gen.coroutine
def post(self, objectid):
'''This handles POST requests to /api/save-object/<objectid>.
This saves the current object.
'''
# check if we're actually logged in
if not self.current_user:
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# if the current user is anonymous or locked, ignore their request
if self.current_user and self.current_user['user_role'] in ('anonymous',
'locked'):
retdict = {'status':'failed',
'message':'You must be logged in to view objects.',
'result': None}
self.set_status(401)
self.write(retdict)
raise web.Finish()
# check the POST request for validity
if ((not self.keycheck['status'] == 'ok') or
(not self.xsrf_type == 'session')):
self.set_status(403)
retdict = {
'status':'failed',
'result':None,
'message':("Sorry, you don't have access. "
"API keys are not allowed for this endpoint.")
}
self.write(retdict)
raise web.Finish()
try:
objectid = int(xhtml_escape(objectid))
comment_text = self.get_argument('comment_text',None)
user_flags = self.get_argument('user_flags',None)
userid = self.current_user['user_id']
username = self.current_user['full_name']
# check if there's more than one flag selected
user_flags = json.loads(user_flags)
if sum(user_flags[k] for k in user_flags) > 1:
LOGGER.error(
"More than one flag is selected for "
"object: %s, userid: %s" %
(objectid, self.current_user['user_id'])
)
retdict = {
'status':'failed',
'result':None,
'message':(
"You can't choose more than one flag per object."
)
}
self.write(retdict)
raise web.Finish()
if comment_text is not None and len(comment_text.strip()) == 0:
comment_text = ''
if comment_text is not None or user_flags is not None:
# check if the user is allowed to comment on this object
objectinfo = yield self.executor.submit(
worker_get_object,
self.current_user['user_id'],
objectid,
self.basedir,
)
# if this object actually exists and is writable, we can do
# stuff on it
if (objectinfo is None):
LOGGER.error("Object: %s doesn't exist (userid: %s)" %
(objectid, self.current_user['user_id']))
retdict = {
'status':'failed',
'result':None,
'message':(
"You can't choose more than one flag per object."
)
}
self.write(retdict)
self.finish()
elif (objectinfo is not None and
objectinfo['already_reviewed'] is True):
LOGGER.error(
"Object: %s has been already reviewed by userid: %s" %
(objectid, self.current_user['user_id'])
)
retdict = {
'status':'failed',
'result':None,
'message':(
"You have already reviewed this object."
)
}
self.write(retdict)
self.finish()
elif (objectinfo is not None and
objectinfo['already_reviewed'] is False and
objectinfo['review_status'] == 'incomplete'):
commentdict = {'objectid':objectid,
'comment':comment_text,
'user_flags':user_flags}
updated = yield self.executor.submit(
worker_insert_object_comments,
userid,
username,
commentdict,
[x.strip() for x in
self.siteinfo['good_flag_keys'].split(',')],
self.siteinfo['max_good_votes'],
[x.strip() for x in
self.siteinfo['bad_flag_keys'].split(',')],
self.siteinfo['max_bad_votes'],
self.siteinfo['max_all_votes'],
)
if updated is not None:
retdict = {'status':'ok',
'message':'object updated OK',
'result':updated}
LOGGER.info(
"Object: %s successfully "
"reviewed by userid: %s: %r" %
(objectid,
self.current_user['user_id'],
commentdict)
)
self.write(retdict)
self.finish()
else:
retdict = {
'status':'failed',
'message':(
"Object with specified ID "
"could not be updated."
),
'result':None
}
self.write(retdict)
self.finish()
else:
retdict = {'status':'failed',
'message':(
"Object not found, or is already complete. "
"Your comments were not saved."
),
'result':None}
self.write(retdict)
self.finish()
# if no comment content was supplied, do nothing
else:
retdict = {
'status':'ok',
'message':'No comments supplied. Object is unchanged.',
'result': None
}
self.write(retdict)
self.finish()
except Exception:
LOGGER.exception('failed to save changes for object ID: %r' %
objectid)
self.set_status(400)
retdict = {'status':'failed',
'message':'Invalid save request for object ID',
'result':None}
self.write(retdict)
self.finish()
|
{"hexsha": "e289431808d23770b07d69540d3a8581c73ba23c", "size": 19567, "ext": "py", "lang": "Python", "max_stars_repo_path": "vizinspect/frontend/actionhandlers.py", "max_stars_repo_name": "johnnygreco/viz-inspect", "max_stars_repo_head_hexsha": "3fc24e00062e28ccbc5fea70c20ed76d380a4e16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-22T17:04:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-22T17:04:19.000Z", "max_issues_repo_path": "vizinspect/frontend/actionhandlers.py", "max_issues_repo_name": "johnnygreco/viz-inspect", "max_issues_repo_head_hexsha": "3fc24e00062e28ccbc5fea70c20ed76d380a4e16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-02-18T18:22:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-21T22:41:02.000Z", "max_forks_repo_path": "vizinspect/frontend/actionhandlers.py", "max_forks_repo_name": "johnnygreco/viz-inspect", "max_forks_repo_head_hexsha": "3fc24e00062e28ccbc5fea70c20ed76d380a4e16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8857142857, "max_line_length": 80, "alphanum_fraction": 0.4690550417, "include": true, "reason": "import numpy", "num_tokens": 3558}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 13:26:06 2017
@author: nblago
"""
from __future__ import print_function
import datetime
from astropy.io import votable
import numpy as np
import os
import logging
import warnings
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astroquery.vizier import Vizier
from astropy.coordinates import Angle
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib import request
from urllib.request import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from urllib import urlretrieve
from urllib2 import HTTPError
class QueryCatalogue:
def __init__(self, ra=0, dec=0, radius=0, minmag=5, maxmag=23, logger=None):
if (type(ra)==str or type(ra)==str ):
c = SkyCoord('%s %s'%(ra, dec), unit=(u.hourangle, u.deg), frame='icrs')
self.ra = c.ra.value
self.dec = c.dec.value
else:
self.ra = ra
self.dec= dec
self.rad = float(radius)
#self.rad = np.minimum(0.3, self.rad)
self.minmag = minmag
self.maxmag = maxmag
self.logger = logger
if (logger is None):
FORMAT = '%(asctime)-15s %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
self.logger = logging.getLogger('QueryCatalogue')
def query_usnob1(self):
#ra, dec = coordinates_conversor.hour2deg(f[0].header['RA'], f[0].header['DEC'])
#SEDM FoV is 6.5 arcmin, due to uncertainties in the position, 4 arcmin radius assumed.
# Download USNO-B1 catalog for the position
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
catalog_url = 'http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1&RA=%.5f&DEC=%.5f&SR=%.4f&VERB=1' % (self.ra, self.dec, self.rad)
self.logger.info( "Downloading USNO-B1 catalog...")
self.logger.info(catalog_url)
tmp_file = '/tmp/tmp_usnob1_%s.cat'%timestamp
urlretrieve(catalog_url, tmp_file)
# Read RA, Dec and magnitude from XML format USNO catalog
catalog = votable.parse_single_table(tmp_file).to_table()
#Clean temporary file.
if (os.path.isfile(tmp_file)):
os.remove(tmp_file)
return catalog.as_array().data
def query_apass(self):
'''
Queries the APASS catalogue
'''
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
catalog_url = 'https://www.aavso.org/cgi-bin/apass_download.pl?ra=%.5f&dec=%.5f&radius=%.4f8&outtype=1' % (self.ra, self.dec, self.rad)
self.logger.info( "Downloading APASS catalog...")
self.logger.info(catalog_url)
tmp_file = '/tmp/tmp_apass_%s.cat'%timestamp
urlretrieve(catalog_url, tmp_file)
catalog = np.genfromtxt(tmp_file, delimiter=",", names=True)
#Clean temporary file.
if (os.path.isfile(tmp_file)):
os.remove(tmp_file)
return catalog
def query_sdss(self):
'''
Queries the SDSS catalogue. The minmag and maxmag apply to the r-band.
If there is no SDSS, an empty array will be returned.
'''
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
catalog_url='http://skyserver.sdss.org/dr9/en/tools/search/x_radial.asp?ra=%.5f&dec=%.5f&check_type=type&type=6&radius=%.4f&check_u=u&min_u=%.2f&max_u=%.2f&check_g=g&min_g=%.2f&max_g=%.2f&check_r=r&min_r=%.2f&max_r=%.2f&check_i=i&min_i=%.2f&max_i=%.2f&check_z=z&min_z=%.2f&max_z=%.2f&entries=top&topnum=500&format=csv'%\
(self.ra, self.dec, self.rad*60,self.minmag,self.maxmag,self.minmag,self.maxmag,self.minmag,self.maxmag,self.minmag,self.maxmag,self.minmag,self.maxmag)
self.logger.info( "Downloading SDSS catalog...")
self.logger.info( "%s"%catalog_url )
tmp_file = '/tmp/tmp_sdss_%s.cat'%timestamp
urlretrieve(catalog_url, tmp_file)
catalog = np.genfromtxt(tmp_file, delimiter=",", names=True)
#Clean temporary file.
if (os.path.isfile(tmp_file)):
os.remove(tmp_file)
if len(catalog.dtype) ==1:
catalog = np.array([], dtype=[('objid', '<f8'), ('run', '<f8'), ('rerun', '<f8'), ('camcol', '<f8'), ('field', '<f8'), ('obj', '<f8'), \
('type', '<f8'), ('ra', '<f8'), ('dec', '<f8'), ('u', '<f8'), ('g', '<f8'), ('r', '<f8'), ('i', '<f8'), ('z', '<f8'), \
('Err_u', '<f8'), ('Err_g', '<f8'), ('Err_r', '<f8'), ('Err_i', '<f8'), ('Err_z', '<f8')])
return catalog
def query_catalogue(self, catalog_name="PS1V3OBJECTS", filtered=True, tmpdir="/tmp"):
'''
Sends a VO query to the PS1 catalogue.
Filters the result by mangitude.
From: http://gsss.stsci.edu/Software/WebServices.htm
General Catalog Access : http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?Parameters...
Required Parameter List
1 of the following 3 queries - VO ConeSearch, BoxSearch, IDsearch
RA=ra(deg) &DEC=dec(deg) &SR=search radius(deg)
BBOX=raMin(deg),decMin(deg),raMax(deg),decMax(deg)
ID=catID
Optional Parameters
FORMAT= VOTABLE(default) | HTML | KML | CSV | TSV | JSON | TEXT(limited set of catalogs)
CATALOG=GSC23(default) | GSC11 | GSC12 | USNOB | SDSS | FIRST | 2MASS | IRAS | GALEX | GAIA | TGAS | WISE
| CAOM_OBSCORE | CAOM_OBSPOINTING | PS1V3OBJECTS | PS1V3DETECTIONS
FILENAME=outputname (directs output to file)
MAXOBJ=n (limits number of entries returned by brightest magnitude)
MAGRANGE=bright,faint (limits number of entries returned by limits)
MINDET=n (minimum numbr of detections PanSTARRS only)
'''
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
url = "http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?CAT=%s&RA=%.5f&DEC=%.5f&SR=%.5f&MAGRANGE=%.3f,%.3f"%(catalog_name, self.ra, self.dec, self.rad, self.minmag, self.maxmag)
self.logger.info("URL queried: %s"%url)
tmp_file = os.path.join(tmpdir, 'ps1_cat_%.3f_%.3f_%.3f_%.2f_%.2f.xml'%(self.ra, self.dec, self.rad, self.minmag, self.maxmag) )
#If the file was not downloaded to the temporary file, we download it. Otherwise, we skip this step.
if not os.path.isfile(tmp_file):
with open(tmp_file, "wb") as f:
page = urlopen(url)
f.write(page.read())
self.logger.info("Saved query as file: %s"%tmp_file)
# Read RA, Dec and magnitude from XML format USNO catalog
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
catalog = votable.parse_single_table(tmp_file).to_table()
except ValueError:
self.logger.warn("The search radius was too large for the service. Reducing to 0.25 deg.")
self.rad = 0.25
os.remove(tmp_file)
return self.query_catalogue(catalog_name=catalog_name, filtered=filtered, tmpdir=tmpdir)
'''if catalog.as_array() is None:
#Clean temporary file.
if (os.path.isfile(tmp_file)):
os.remove(tmp_file)
return None'''
self.logger.info("First row catalogue: %s:"%catalog[0])
#catalog = catalog.as_array().data
#If it is PS1, we know what fields we want.
#Otherwise, we just return everything.
if (catalog_name == "PS1V3OBJECTS"):
if (filtered):
#Filter spurious sources/ Objects where the majority of pixels where not masked (QFperfect >=0.9) and likely stars (rmeanpsfmag - rmeankronmag < 0.5)
catalog = catalog[ (catalog["ng"]>3)*(catalog["nr"]>3)* (catalog["ni"]>3)\
*(catalog["gQfPerfect"]>=0.95) *(catalog["rQfPerfect"]>=0.95)*(catalog["iQfPerfect"]>=0.95) * (catalog["rMeanPSFMag"] - catalog["rMeanKronMag"] < 0.5)]
newcat = np.zeros(len(catalog), dtype=[("ra", np.double), ("dec", np.double), ("objid", np.long), ("mag", np.float), \
("g", np.float), ("r", np.float), ("i", np.float), ("z", np.float), ("y", np.float), \
("Err_g", np.float), ("Err_r", np.float), ("Err_i", np.float), ("Err_z", np.float), ("Err_y", np.float), ("distance", np.double)])
newcat["objid"] = catalog["objID"]
newcat["ra"] = catalog["RAmean"]
newcat["dec"] = catalog["DECmean"]
newcat["mag"] = catalog["rMeanPSFMag"]
newcat["g"] = catalog["gMeanPSFMag"]
newcat["r"] = catalog["rMeanPSFMag"]
newcat["i"] = catalog["iMeanPSFMag"]
newcat["z"] = catalog["zMeanPSFMag"]
newcat["y"] = catalog["yMeanPSFMag"]
newcat["Err_g"] = catalog["gMeanPSFMagErr"]
newcat["Err_r"] = catalog["rMeanPSFMagErr"]
newcat["Err_i"] = catalog["iMeanPSFMagErr"]
newcat["Err_z"] = catalog["zMeanPSFMagErr"]
newcat["Err_y"] = catalog["yMeanPSFMagErr"]
newcat["distance"] = catalog["distance"]
elif (catalog_name=="2MASS" and filtered==True):
mask = np.array([('U' not in c.decode()) and ('F' not in c.decode()) and ('E' not in c.decode()) for c in catalog['ph_qual']])
mask2 = catalog['cc_flag']=='000'.encode()
newcat = catalog[mask*mask2]
self.logger.info("Prunned bad flags from 2MASS catalogue. Rows left: %d:"%len(newcat))
print ("Prunned bad flags from 2MASS catalogue. Rows left: %d:"%len(newcat))
else:
newcat = catalog
#Clean temporary file.\
#if (os.path.isfile(tmp_file)):
# os.remove(tmp_file)
return newcat
def query_sky_mapper(self, filtered=True, tmpdir="/tmp"):
'''
Sends a VO query to the SkyMapper catalogue.
'''
url = "http://skymapper.anu.edu.au/sm-cone/public/query?RA=%.5f&DEC=%.5f&SR=%.4f&RESPONSEFORMAT=CSV"%(self.ra, self.dec, self.rad)
with open(os.path.join(tmpdir, "skymapper_cat.csv"), "wb") as f:
try:
page = urlopen(url)
content = page.read()
f.write(content)
except HTTPError:
print ("ERROR! Page %s did not load properly!"%url )
return None
# Read RA, Dec and magnitude from CSV
catalog = Table.read(os.path.join(tmpdir, "skymapper_cat.csv"), format="ascii.csv")
if (filtered):
mask = (catalog["class_star"]>0.7) * (catalog["ngood"] >5) * (catalog['r_psf']>self.minmag) * (catalog['r_psf']<self.maxmag)
catalog = catalog[mask]
catalog.rename_column("raj2000", "ra")
catalog.rename_column("dej2000", "dec")
return catalog
def query_vizier(self, catalog='APASS'):
'''
Uses the astroquery environment to get the data from Vizier.
Possible selection of catalogues:
'''
result = Vizier.query_region("%.6f %.6f"%(self.ra, self.dec), radius=Angle(self.rad, "deg"), \
catalog=catalog) #column_filters={"rmag":">%s"%self.minmag,"rmag":"<%s"%self.maxmag }
return result[0]
|
{"hexsha": "3c268e4f8083266094f8b9d84282315b5a5ef55c", "size": 11999, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/photometry/QueryCatalogue.py", "max_stars_repo_name": "nblago/utils", "max_stars_repo_head_hexsha": "862a34eb9820474d1071e5ac2eec58d66d297649", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-25T11:18:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T11:18:20.000Z", "max_issues_repo_path": "src/photometry/QueryCatalogue.py", "max_issues_repo_name": "nblago/utils", "max_issues_repo_head_hexsha": "862a34eb9820474d1071e5ac2eec58d66d297649", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/photometry/QueryCatalogue.py", "max_forks_repo_name": "nblago/utils", "max_forks_repo_head_hexsha": "862a34eb9820474d1071e5ac2eec58d66d297649", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-27T03:45:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-27T03:45:18.000Z", "avg_line_length": 41.375862069, "max_line_length": 328, "alphanum_fraction": 0.5747978998, "include": true, "reason": "import numpy,from astropy", "num_tokens": 3122}
|
# --------------------------------------------------------------------------
# ACE1.jl: Julia implementation of the Atomic Cluster Expansion
# Copyright (c) 2019 Christoph Ortner <christophortner0@gmail.com>
# Licensed under ASL - see ASL.md for terms and conditions.
# --------------------------------------------------------------------------
# prototypes for space transforms and cutoffs
function transform end
function transform_d end
function fcut end
function fcut_d end
"""
`VecOrTup = Union{AbstractVector, Tuple}`
"""
const VecOrTup = Union{AbstractVector, Tuple}
abstract type ScalarBasis{T} <: IPBasis end
abstract type OneParticleBasis{T} <: IPBasis end
abstract type OnepBasisFcn end
# ------------------------------------------------------------
# Abstract polynomial degree business
"""
`AbstractDegree` : object specifying a degree can be called via
`degree(D, arg)` or via `D(arg)`
"""
abstract type AbstractDegree end
(D::AbstractDegree)(args...) = degree(D, args...)
"""
`function degree(D::AbstractDegree, arg)` : compute some notion of degree of
the `arg` argument.
"""
function degree end
"""
interface functions for `OneParticleBasis`
"""
function add_into_A! end
"""
interface functions for `OneParticleBasis`
"""
function add_into_A_dA! end
"""
`function scaling(b, p)`:
a scaling factor for a basis functions ϕ, which gives a rought estimate on
the magnitude of ∇ᵖϕ e.g.,
```
ϕ = r^n Ylm
```
has scaling factor `n^p + l^p`, though sharper estimates are also possible.
"""
function scaling end
using LinearAlgebra: Diagonal
diagonal_regulariser(basis; diff = 0) = Diagonal(scaling(basis, diff))
"""
every scalar basis must implement this
"""
function rand_radial end
|
{"hexsha": "ea20acb890681784e6783b208b8b3ec5585cbdd2", "size": 1720, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/prototypes.jl", "max_stars_repo_name": "casv2/ACE1.jl", "max_stars_repo_head_hexsha": "40d7cdfc8141193aeea4ee666b8cedb746928489", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/prototypes.jl", "max_issues_repo_name": "casv2/ACE1.jl", "max_issues_repo_head_hexsha": "40d7cdfc8141193aeea4ee666b8cedb746928489", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/prototypes.jl", "max_forks_repo_name": "casv2/ACE1.jl", "max_forks_repo_head_hexsha": "40d7cdfc8141193aeea4ee666b8cedb746928489", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7721518987, "max_line_length": 76, "alphanum_fraction": 0.6465116279, "num_tokens": 396}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.